diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 7993419e8..b48354f01 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,4 +7,4 @@ # global owners are only requested if there isn't a more specific # codeowner specified below. For this reason, the global codeowners # are often repeated in package-level definitions. -* @alexanderbez @ebuchman @cmwaters @tessr @tychoish @williambanfield @creachadair +* @ebuchman @cmwaters @tychoish @williambanfield @creachadair diff --git a/.github/dependabot.yml b/.github/dependabot.yml index b6729552d..3db35d523 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -3,25 +3,48 @@ updates: - package-ecosystem: github-actions directory: "/" schedule: - interval: daily - time: "11:00" + interval: weekly open-pull-requests-limit: 10 + labels: + - T:dependencies + - S:automerge + - package-ecosystem: npm directory: "/docs" schedule: - interval: daily - time: "11:00" + interval: weekly open-pull-requests-limit: 10 - reviewers: - - fadeev + + ################################### + ## + ## Update All Go Dependencies + - package-ecosystem: gomod directory: "/" schedule: interval: daily - time: "11:00" + target-branch: "master" open-pull-requests-limit: 10 - reviewers: - - melekes - - tessr labels: - T:dependencies + - S:automerge + + - package-ecosystem: gomod + directory: "/" + schedule: + interval: daily + target-branch: "v0.34.x" + open-pull-requests-limit: 10 + labels: + - T:dependencies + - S:automerge + + - package-ecosystem: gomod + directory: "/" + schedule: + interval: daily + target-branch: "v0.35.x" + open-pull-requests-limit: 10 + labels: + - T:dependencies + - S:automerge diff --git a/.github/mergify.yml b/.github/mergify.yml index df570504a..d49264597 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -6,7 +6,7 @@ pull_request_rules: actions: merge: method: squash - strict: true + strict: smart+fasttrack commit_message: title+body - name: backport patches to v0.34.x branch conditions: @@ -16,3 +16,12 @@ pull_request_rules: backport: branches: - v0.34.x + - name: backport patches to v0.35.x branch + conditions: + - base=master + - label=S:backport-to-v0.35.x + actions: + backport: + branches: + - v0.35.x + diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 6ac3a738e..1e2749b3c 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -4,7 +4,6 @@ on: push: paths: - "**.go" - - "!test/" branches: - master - release/** @@ -13,7 +12,7 @@ jobs: split-test-files: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.4.0 - name: Create a file with all the pkgs run: go list ./... > pkgs.txt - name: Split pkgs into 4 files @@ -47,8 +46,8 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: "1.16" - - uses: actions/checkout@v2.3.4 + go-version: "1.17" + - uses: actions/checkout@v2.4.0 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | @@ -70,8 +69,8 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: "1.16" - - uses: actions/checkout@v2.3.4 + go-version: "1.17" + - uses: actions/checkout@v2.4.0 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | @@ -86,10 +85,10 @@ jobs: - name: Set up Go uses: actions/setup-go@v2 with: - go-version: 1.16 + go-version: "1.17" - name: test & coverage report creation run: | - cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out -covermode=atomic + cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out if: env.GIT_DIFF - uses: actions/upload-artifact@v2 with: @@ -100,7 +99,7 @@ jobs: runs-on: ubuntu-latest needs: tests steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.4.0 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | @@ -125,7 +124,7 @@ jobs: name: "${{ github.sha }}-03-coverage" if: env.GIT_DIFF - run: | - cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt + cat ./*profile.out | grep -v "mode: set" >> coverage.txt if: env.GIT_DIFF - uses: codecov/codecov-action@v2.1.0 with: diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index e773526fd..033a7c46f 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -14,7 +14,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.4.0 - name: Prepare id: prep run: | diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index 9c3b74cda..910d9bc2c 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -17,15 +17,15 @@ jobs: strategy: fail-fast: false matrix: - group: ['00', '01', '02', '03'] + group: ['00', '01'] runs-on: ubuntu-latest timeout-minutes: 60 steps: - uses: actions/setup-go@v2 with: - go-version: '1.16' + go-version: '1.17' - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.4.0 with: ref: 'v0.34.x' @@ -37,7 +37,7 @@ jobs: - name: Generate testnets working-directory: test/e2e # When changing -g, also change the matrix groups above - run: ./build/generator -g 4 -d networks/nightly + run: ./build/generator -g 2 -d networks/nightly - name: Run testnets in group ${{ matrix.group }} working-directory: test/e2e diff --git a/.github/workflows/e2e-nightly-35x.yml b/.github/workflows/e2e-nightly-35x.yml new file mode 100644 index 000000000..df4dfb039 --- /dev/null +++ b/.github/workflows/e2e-nightly-35x.yml @@ -0,0 +1,76 @@ +# Runs randomly generated E2E testnets nightly on v0.35.x. + +# !! If you change something in this file, you probably want +# to update the e2e-nightly-master workflow as well! + +name: e2e-nightly-35x +on: + workflow_dispatch: # allow running workflow manually + schedule: + - cron: '0 2 * * *' + +jobs: + e2e-nightly-test: + # Run parallel jobs for the listed testnet groups (must match the + # ./build/generator -g flag) + strategy: + fail-fast: false + matrix: + p2p: ['legacy', 'new', 'hybrid'] + group: ['00', '01', '02', '03'] + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - uses: actions/setup-go@v2 + with: + go-version: '1.17' + + - uses: actions/checkout@v2.4.0 + with: + ref: 'v0.35.x' + + - name: Build + working-directory: test/e2e + # Run make jobs in parallel, since we can't run steps in parallel. + run: make -j2 docker generator runner tests + + - name: Generate testnets + working-directory: test/e2e + # When changing -g, also change the matrix groups above + run: ./build/generator -g 4 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }} + + - name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }} + working-directory: test/e2e + run: ./run-multiple.sh networks/nightly/${{ matrix.p2p }}/*-group${{ matrix.group }}-*.toml + + e2e-nightly-fail-2: + needs: e2e-nightly-test + if: ${{ failure() }} + runs-on: ubuntu-latest + steps: + - name: Notify Slack on failure + uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: tendermint-internal + SLACK_USERNAME: Nightly E2E Tests + SLACK_ICON_EMOJI: ':skull:' + SLACK_COLOR: danger + SLACK_MESSAGE: Nightly E2E tests failed on v0.35.x + SLACK_FOOTER: '' + + e2e-nightly-success: # may turn this off once they seem to pass consistently + needs: e2e-nightly-test + if: ${{ success() }} + runs-on: ubuntu-latest + steps: + - name: Notify Slack on success + uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: tendermint-internal + SLACK_USERNAME: Nightly E2E Tests + SLACK_ICON_EMOJI: ':white_check_mark:' + SLACK_COLOR: good + SLACK_MESSAGE: Nightly E2E tests passed on v0.35.x + SLACK_FOOTER: '' diff --git a/.github/workflows/e2e-nightly-master.yml b/.github/workflows/e2e-nightly-master.yml index 029fee6bb..8d55eb6ad 100644 --- a/.github/workflows/e2e-nightly-master.yml +++ b/.github/workflows/e2e-nightly-master.yml @@ -10,22 +10,21 @@ on: - cron: '0 2 * * *' jobs: - e2e-nightly-test-2: + e2e-nightly-test: # Run parallel jobs for the listed testnet groups (must match the # ./build/generator -g flag) strategy: fail-fast: false matrix: - p2p: ['legacy', 'new', 'hybrid'] - group: ['00', '01'] + group: ['00', '01', '02', '03'] runs-on: ubuntu-latest timeout-minutes: 60 steps: - uses: actions/setup-go@v2 with: - go-version: '1.16' + go-version: '1.17' - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.4.0 - name: Build working-directory: test/e2e @@ -35,14 +34,14 @@ jobs: - name: Generate testnets working-directory: test/e2e # When changing -g, also change the matrix groups above - run: ./build/generator -g 2 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }} + run: ./build/generator -g 4 -d networks/nightly/ - - name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }} + - name: Run ${{ matrix.p2p }} p2p testnets working-directory: test/e2e - run: ./run-multiple.sh networks/nightly/${{ matrix.p2p }}/*-group${{ matrix.group }}-*.toml + run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml e2e-nightly-fail-2: - needs: e2e-nightly-test-2 + needs: e2e-nightly-test if: ${{ failure() }} runs-on: ubuntu-latest steps: @@ -58,7 +57,7 @@ jobs: SLACK_FOOTER: '' e2e-nightly-success: # may turn this off once they seem to pass consistently - needs: e2e-nightly-test-2 + needs: e2e-nightly-test if: ${{ success() }} runs-on: ubuntu-latest steps: diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 5cc605ead..400741def 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -16,8 +16,8 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: '1.16' - - uses: actions/checkout@v2.3.4 + go-version: '1.17' + - uses: actions/checkout@v2.4.0 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | @@ -33,10 +33,6 @@ jobs: - name: Run CI testnet working-directory: test/e2e - run: ./build/runner -f networks/ci.toml + run: ./run-multiple.sh networks/ci.toml if: "env.GIT_DIFF != ''" - - name: Emit logs on failure - if: ${{ failure() }} - working-directory: test/e2e - run: ./build/runner -f networks/ci.toml logs diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index c47dc4411..d38e1f785 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -15,32 +15,17 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: '1.16' + go-version: '1.17' - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.4.0 - name: Install go-fuzz working-directory: test/fuzz run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build - - name: Fuzz mempool-v1 + - name: Fuzz mempool working-directory: test/fuzz - run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v1 - continue-on-error: true - - - name: Fuzz mempool-v0 - working-directory: test/fuzz - run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v0 - continue-on-error: true - - - name: Fuzz p2p-addrbook - working-directory: test/fuzz - run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-addrbook - continue-on-error: true - - - name: Fuzz p2p-pex - working-directory: test/fuzz - run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-pex + run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool continue-on-error: true - name: Fuzz p2p-sc diff --git a/.github/workflows/jepsen.yml b/.github/workflows/jepsen.yml index 0e358af6e..60b49443d 100644 --- a/.github/workflows/jepsen.yml +++ b/.github/workflows/jepsen.yml @@ -46,7 +46,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout the Jepsen repository - uses: actions/checkout@v2.3.4 + uses: actions/checkout@v2.4.0 with: repository: 'tendermint/jepsen' diff --git a/.github/workflows/linkchecker.yml b/.github/workflows/linkchecker.yml index c97b22cd1..1a6380026 100644 --- a/.github/workflows/linkchecker.yml +++ b/.github/workflows/linkchecker.yml @@ -6,7 +6,7 @@ jobs: markdown-link-check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.4.0 - uses: gaurav-nelson/github-action-markdown-link-check@1.0.13 with: folder-path: "docs" diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 79cb3685b..b808d9f77 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 8 steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.4.0 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | @@ -23,7 +23,7 @@ jobs: - uses: golangci/golangci-lint-action@v2.5.2 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. - version: v1.38 + version: v1.42.1 args: --timeout 10m github-token: ${{ secrets.github_token }} if: env.GIT_DIFF diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 953170c59..7cfc7c161 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Code - uses: actions/checkout@v2.3.4 + uses: actions/checkout@v2.4.0 - name: Lint Code Base uses: docker://github/super-linter:v3 env: diff --git a/.github/workflows/proto-docker.yml b/.github/workflows/proto-docker.yml deleted file mode 100644 index ee26bd111..000000000 --- a/.github/workflows/proto-docker.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: Build & Push TM Proto Builder -on: - pull_request: - paths: - - "tools/proto/*" - push: - branches: - - master - paths: - - "tools/proto/*" - schedule: - # run this job once a month to recieve any go or buf updates - - cron: "* * 1 * *" - -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2.3.4 - - name: Prepare - id: prep - run: | - DOCKER_IMAGE=tendermintdev/docker-build-proto - VERSION=noop - if [[ $GITHUB_REF == refs/tags/* ]]; then - VERSION=${GITHUB_REF#refs/tags/} - elif [[ $GITHUB_REF == refs/heads/* ]]; then - VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g') - if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then - VERSION=latest - fi - fi - TAGS="${DOCKER_IMAGE}:${VERSION}" - echo ::set-output name=tags::${TAGS} - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1.6.0 - - - name: Login to DockerHub - uses: docker/login-action@v1.10.0 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Publish to Docker Hub - uses: docker/build-push-action@v2.7.0 - with: - context: ./tools/proto - file: ./tools/proto/Dockerfile - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.prep.outputs.tags }} diff --git a/.github/workflows/proto.yml b/.github/workflows/proto.yml deleted file mode 100644 index 43a1972ec..000000000 --- a/.github/workflows/proto.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Protobuf -# Protobuf runs buf (https://buf.build/) lint and check-breakage -# This workflow is only run when a .proto file has been modified -on: - workflow_dispatch: # allow running workflow manually - pull_request: - paths: - - "**.proto" -jobs: - proto-lint: - runs-on: ubuntu-latest - timeout-minutes: 4 - steps: - - uses: actions/checkout@v2.3.4 - - name: lint - run: make proto-lint - proto-breakage: - runs-on: ubuntu-latest - timeout-minutes: 4 - steps: - - uses: actions/checkout@v2.3.4 - - name: check-breakage - run: make proto-check-breaking-ci diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index dd18e750b..54a6c010a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -12,13 +12,13 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2.3.4 + uses: actions/checkout@v2.4.0 with: fetch-depth: 0 - uses: actions/setup-go@v2 with: - go-version: '1.16' + go-version: '1.17' - name: Build uses: goreleaser/goreleaser-action@v2 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 14d531812..83f753a5e 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -17,8 +17,8 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: "1.16" - - uses: actions/checkout@v2.3.4 + go-version: "1.17" + - uses: actions/checkout@v2.4.0 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | @@ -49,8 +49,8 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: "1.16" - - uses: actions/checkout@v2.3.4 + go-version: "1.17" + - uses: actions/checkout@v2.4.0 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | @@ -80,8 +80,8 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: "1.16" - - uses: actions/checkout@v2.3.4 + go-version: "1.17" + - uses: actions/checkout@v2.4.0 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | diff --git a/.gitignore b/.gitignore index 7f412d461..b753f0375 100644 --- a/.gitignore +++ b/.gitignore @@ -25,6 +25,7 @@ docs/_build docs/dist docs/node_modules/ docs/spec +docs/.vuepress/public/rpc index.html.md libs/pubsub/query/fuzz_test/output profile\.out @@ -46,3 +47,10 @@ test/fuzz/**/corpus test/fuzz/**/crashers test/fuzz/**/suppressions test/fuzz/**/*.zip +proto/tendermint/blocksync/types.proto +proto/tendermint/consensus/types.proto +proto/tendermint/mempool/*.proto +proto/tendermint/p2p/*.proto +proto/tendermint/statesync/*.proto +proto/tendermint/types/*.proto +proto/tendermint/version/*.proto diff --git a/.golangci.yml b/.golangci.yml index b62f926e2..e0f3fe163 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -13,12 +13,12 @@ linters: # - gochecknoinits # - gocognit - goconst - - gocritic + # - gocritic # - gocyclo # - godox - gofmt - goimports - - golint + - revive - gosec - gosimple - govet diff --git a/CHANGELOG.md b/CHANGELOG.md index 5d4f3d278..d6d780cd3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,160 +1,196 @@ # Changelog -Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint). +Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos). -## v0.35 +## v0.35.0 -Special thanks to external contributors on this release: @JayT106, @bipulprasad, @alessio, @Yawning, @silasdavis, -@cuonglm, @tanyabouman, @JoeKash, @githubsands, @jeebster, @crypto-facs, @liamsi, and @gotjoshua +November 4, 2021 -### BREAKING CHANGES - -- CLI/RPC/Config - - [pubsub/events] \#6634 The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez) - - [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker) - - [config] \#5728 `fastsync.version = "v1"` is no longer supported (@melekes) - - [cli] \#5772 `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes) - - [cli] \#5777 use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters) - - [rpc] \#6019 standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters) - - [rpc] \#6168 Change default sorting to desc for `/tx_search` results (@melekes) - - [cli] \#6282 User must specify the node mode when using `tendermint init` (@cmwaters) - - [state/indexer] \#6382 reconstruct indexer, move txindex into the indexer package (@JayT106) - - [cli] \#6372 Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters) - - [config] \#6462 Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish) - - [rpc] \#6610 Add MaxPeerBlockHeight into /status rpc call (@JayT106) - - [blocksync/rpc] \#6620 Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106) - - [rpc/grpc] \#6725 Mark gRPC in the RPC layer as deprecated. - - [blocksync/v2] \#6730 Fast Sync v2 is deprecated, please use v0 - - [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents. - - [rpc/jsonrpc/server] \#6785 `Listen` function updated to take an `int` argument, `maxOpenConnections`, instead of an entire config object. (@williambanfield) - - [rpc] \#6820 Update RPC methods to reflect changes in the p2p layer, disabling support for `UnsafeDialPeers` and `UnsafeDialPeers` when used with the new p2p layer, and changing the response format of the peer list in `NetInfo` for all users. - - [cli] \#6854 Remove deprecated snake case commands. (@tychoish) - -- Apps - - [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez) - - [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface - - [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`. - - [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters. - - [ABCI] \#3546 Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield) - - [Version] \#6494 `TMCoreSemVer` has been renamed to `TMVersion`. - - It is not required any longer to set ldflags to set version strings - - [abci/counter] \#6684 Delete counter example app - -- Go API - - [pubsub] \#6634 The `Query#Matches` method along with other pubsub methods, now accepts a `[]abci.Event` instead of `map[string][]string`. (@alexanderbez) - - [p2p] \#6618 \#6583 Move `p2p.NodeInfo`, `p2p.NodeID` and `p2p.NetAddress` into `types` to support use in external packages. (@tychoish) - - [node] \#6540 Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish) - - [p2p] \#6547 Move the entire `p2p` package and all reactor implementations into `internal`. (@tychoish) - - [libs/log] \#6534 Remove the existing custom Tendermint logger backed by go-kit. The logging interface, `Logger`, remains. Tendermint still provides a default logger backed by the performant zerolog logger. (@alexanderbez) - - [libs/time] \#6495 Move types/time to libs/time to improve consistency. (@tychoish) - - [mempool] \#6529 The `Context` field has been removed from the `TxInfo` type. `CheckTx` now requires a `Context` argument. (@alexanderbez) - - [abci/client, proxy] \#5673 `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes) - - [p2p] Remove unused function `MakePoWTarget`. (@erikgrinaker) - - [libs/bits] \#5720 Validate `BitArray` in `FromProto`, which now returns an error (@melekes) - - [proto/p2p] Rename `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker) - - [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker) - - [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio) - - [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters) - - [state] \#5864 Use an iterator when pruning state (@cmwaters) - - [types] \#6023 Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`. - - Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic. - - [light] \#6054 Move `MaxRetryAttempt` option from client to provider. - - `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters) - - [all] \#6077 Change spelling from British English to American (@cmwaters) - - Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub - - Rename "behaviour" pkg to "behavior" and internalized it in blocksync v2 - - [rpc/client/http] \#6176 Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes) - - [rpc/client/http] \#6176 Unexpose `WSEvents` (@melekes) - - [rpc/jsonrpc/client/ws_client] \#6176 `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes) - - [internal/libs] \#6366 Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder - - [libs/rand] \#6364 Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi) - - [mempool] \#6466 The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package. - Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself - and `TxInfo`. (@alexanderbez) - - [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning) - - [types] \#6627 Move `NodeKey` to types to make the type public. - - [config] \#6627 Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID` - - [blocksync] \#6755 Rename `FastSync` and `Blockchain` package to `BlockSync` (@cmwaters) - -- Data Storage - - [store/state/evidence/light] \#5771 Use an order-preserving varint key encoding (@cmwaters) - - [mempool] \#6396 Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish) - - [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106) - -- Tooling - - [tools] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106) - - [cli/indexer] \#6676 Reindex events command line tooling. (@JayT106) +Special thanks to external contributors on this release: @JayT106, +@bipulprasad, @alessio, @Yawning, @silasdavis, @cuonglm, @tanyabouman, +@JoeKash, @githubsands, @jeebster, @crypto-facs, @liamsi, and @gotjoshua ### FEATURES +- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state in the event of an incorrect app hash. (@cmwaters) +- [config] [\#7174](https://github.com/tendermint/tendermint/pull/7174) expose ability to write config to arbitrary paths. (@tychoish) +- [mempool, rpc] [\#7065](https://github.com/tendermint/tendermint/pull/7065) add removetx rpc method (backport of #7047) (@tychoish). +- [\#6982](https://github.com/tendermint/tendermint/pull/6982) tendermint binary has built-in suppport for running the e2e application (with state sync support) (@cmwaters). - [config] Add `--mode` flag and config variable. See [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) @dongsam -- [rpc] \#6329 Don't cap page size in unsafe mode (@gotjoshua, @cmwaters) -- [pex] \#6305 v2 pex reactor with backwards compatability. Introduces two new pex messages to +- [rpc] [\#6329](https://github.com/tendermint/tendermint/pull/6329) Don't cap page size in unsafe mode (@gotjoshua, @cmwaters) +- [pex] [\#6305](https://github.com/tendermint/tendermint/pull/6305) v2 pex reactor with backwards compatability. Introduces two new pex messages to accomodate for the new p2p stack. Removes the notion of seeds and crawling. All peer exchange reactors behave the same. (@cmwaters) -- [crypto] \#6376 Enable sr25519 as a validator key type -- [mempool] \#6466 Introduction of a prioritized mempool. (@alexanderbez) +- [crypto] [\#6376](https://github.com/tendermint/tendermint/pull/6376) Enable sr25519 as a validator key type +- [mempool] [\#6466](https://github.com/tendermint/tendermint/pull/6466) Introduction of a prioritized mempool. (@alexanderbez) - `Priority` and `Sender` have been introduced into the `ResponseCheckTx` type, where the `priority` will determine the prioritization of the transaction when a proposer reaps transactions for a block proposal. The `sender` field acts as an index. - Operators may toggle between the legacy mempool reactor, `v0`, and the new prioritized reactor, `v1`, by setting the `mempool.version` configuration, where `v1` is the default configuration. - Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node. - Transactions are gossiped in FIFO order as they are in `v0`. -- [config/indexer] \#6411 Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106) -- [blocksync/event] \#6619 Emit blocksync status event when switching consensus/blocksync (@JayT106) -- [statesync/event] \#6700 Emit statesync status start/end event (@JayT106) -- [inspect] \#6785 Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield) - -### IMPROVEMENTS - -- [libs/log] Console log formatting changes as a result of \#6534 and \#6589. (@tychoish) -- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez) -- [types] \#6478 Add `block_id` to `newblock` event (@jeebster) -- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778) -- [crypto/ed25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `ed25519` signing and verification. (@Yawning) -- [crypto/sr25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `sr25519` signing and verification. (@Yawning) -- [privval] \#5603 Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys. -- [privval] \#5725 Add gRPC support to private validator. -- [privval] \#5876 `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778) -- [abci/client] \#5673 `Async` requests return an error if queue is full (@melekes) -- [mempool] \#5673 Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes) -- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778) -- [blocksync/v1] \#5728 Remove blocksync v1 (@melekes) -- [blocksync/v0] \#5741 Relax termination conditions and increase sync timeout (@melekes) -- [cli] \#5772 `gen_node_key` output now contains node ID (`id` field) (@melekes) -- [blocksync/v2] \#5774 Send status request when new peer joins (@melekes) -- [store] \#5888 store.SaveBlock saves using batches instead of transactions for now to improve ACID properties. This is a quick fix for underlying issues around tm-db and ACID guarantees. (@githubsands) -- [consensus] \#5987 and \#5792 Remove the `time_iota_ms` consensus parameter. Merge `tmproto.ConsensusParams` and `abci.ConsensusParams`. (@marbar3778, @valardragon) -- [types] \#5994 Reduce the use of protobuf types in core logic. (@marbar3778) - - `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams`, `sm.Version` and `version.Consensus` have become native types. They still utilize protobuf when being sent over the wire or written to disk. -- [rpc/client/http] \#6163 Do not drop events even if the `out` channel is full (@melekes) -- [node] \#6059 Validate and complete genesis doc before saving to state store (@silasdavis) -- [state] \#6067 Batch save state data (@githubsands & @cmwaters) -- [crypto] \#6120 Implement batch verification interface for ed25519 and sr25519. (@marbar3778) -- [types] \#6120 use batch verification for verifying commits signatures. - - If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature. -- [privval/file] \#6185 Return error on `LoadFilePV`, `LoadFilePVEmptyState`. Allows for better programmatic control of Tendermint. -- [privval] \#6240 Add `context.Context` to privval interface. -- [rpc] \#6265 set cache control in http-rpc response header (@JayT106) -- [statesync] \#6378 Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. -- [node/state] \#6370 graceful shutdown in the consensus reactor (@JayT106) -- [crypto/merkle] \#6443 Improve HashAlternatives performance (@cuonglm) -- [crypto/merkle] \#6513 Optimize HashAlternatives (@marbar3778) -- [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm) -- [consensus/metrics] \#6549 Change block_size gauge to a histogram for better observability over time (@marbar3778) -- [statesync] \#6587 Increase chunk priority and re-request chunks that don't arrive (@cmwaters) -- [state/privval] \#6578 No GetPubKey retry beyond the proposal/voting window (@JayT106) -- [rpc] \#6615 Add TotalGasUsed to block_results response (@crypto-facs) -- [cmd/tendermint/commands] \#6623 replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman) -- [statesync] \6807 Implement P2P state provider as an alternative to RPC (@cmwaters) +- [config/indexer] [\#6411](https://github.com/tendermint/tendermint/pull/6411) Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106) +- [blocksync/event] [\#6619](https://github.com/tendermint/tendermint/pull/6619) Emit blocksync status event when switching consensus/blocksync (@JayT106) +- [statesync/event] [\#6700](https://github.com/tendermint/tendermint/pull/6700) Emit statesync status start/end event (@JayT106) +- [inspect] [\#6785](https://github.com/tendermint/tendermint/pull/6785) Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield) ### BUG FIXES -- [privval] \#5638 Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash) -- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters) -- [rpc] \#6507 Ensure RPC client can handle URLs without ports (@JayT106) -- [statesync] \#6463 Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters) -- [blocksync] \#6590 Update the metrics during blocksync (@JayT106) +- [\#7106](https://github.com/tendermint/tendermint/pull/7106) Revert mutex change to ABCI Clients (@tychoish). +- [\#7142](https://github.com/tendermint/tendermint/pull/7142) mempool: remove panic when recheck-tx was not sent to ABCI application (@williambanfield). +- [consensus]: [\#7060](https://github.com/tendermint/tendermint/pull/7060) + wait until peerUpdates channel is closed to close remaining peers (@williambanfield) +- [privval] [\#5638](https://github.com/tendermint/tendermint/pull/5638) Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash) +- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters) +- [rpc] [\#6507](https://github.com/tendermint/tendermint/pull/6507) Ensure RPC client can handle URLs without ports (@JayT106) +- [statesync] [\#6463](https://github.com/tendermint/tendermint/pull/6463) Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters) +- [blocksync] [\#6590](https://github.com/tendermint/tendermint/pull/6590) Update the metrics during blocksync (@JayT106) + +### BREAKING CHANGES + +- Go API + + - [crypto/armor]: [\#6963](https://github.com/tendermint/tendermint/pull/6963) remove package which is unused, and based on + deprecated fundamentals. Downstream users should maintain this + library. (@tychoish) + - [state] [store] [proxy] [rpc/core]: [\#6937](https://github.com/tendermint/tendermint/pull/6937) move packages to + `internal` to prevent consumption of these internal APIs by + external users. (@tychoish) + - [pubsub] [\#6634](https://github.com/tendermint/tendermint/pull/6634) The `Query#Matches` method along with other pubsub methods, now accepts a `[]abci.Event` instead of `map[string][]string`. (@alexanderbez) + - [p2p] [\#6618](https://github.com/tendermint/tendermint/pull/6618) [\#6583](https://github.com/tendermint/tendermint/pull/6583) Move `p2p.NodeInfo`, `p2p.NodeID` and `p2p.NetAddress` into `types` to support use in external packages. (@tychoish) + - [node] [\#6540](https://github.com/tendermint/tendermint/pull/6540) Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish) + - [p2p] [\#6547](https://github.com/tendermint/tendermint/pull/6547) Move the entire `p2p` package and all reactor implementations into `internal`. (@tychoish) + - [libs/log] [\#6534](https://github.com/tendermint/tendermint/pull/6534) Remove the existing custom Tendermint logger backed by go-kit. The logging interface, `Logger`, remains. Tendermint still provides a default logger backed by the performant zerolog logger. (@alexanderbez) + - [libs/time] [\#6495](https://github.com/tendermint/tendermint/pull/6495) Move types/time to libs/time to improve consistency. (@tychoish) + - [mempool] [\#6529](https://github.com/tendermint/tendermint/pull/6529) The `Context` field has been removed from the `TxInfo` type. `CheckTx` now requires a `Context` argument. (@alexanderbez) + - [abci/client, proxy] [\#5673](https://github.com/tendermint/tendermint/pull/5673) `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes) + - [p2p] Remove unused function `MakePoWTarget`. (@erikgrinaker) + - [libs/bits] [\#5720](https://github.com/tendermint/tendermint/pull/5720) Validate `BitArray` in `FromProto`, which now returns an error (@melekes) + - [proto/p2p] Rename `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker) + - [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker) + - [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio) + - [store] [\#5848](https://github.com/tendermint/tendermint/pull/5848) Remove block store state in favor of using the db iterators directly (@cmwaters) + - [state] [\#5864](https://github.com/tendermint/tendermint/pull/5864) Use an iterator when pruning state (@cmwaters) + - [types] [\#6023](https://github.com/tendermint/tendermint/pull/6023) Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`. + - Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic. + - [light] [\#6054](https://github.com/tendermint/tendermint/pull/6054) Move `MaxRetryAttempt` option from client to provider. + - `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters) + - [all] [\#6077](https://github.com/tendermint/tendermint/pull/6077) Change spelling from British English to American (@cmwaters) + - Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub + - Rename "behaviour" pkg to "behavior" and internalized it in blocksync v2 + - [rpc/client/http] [\#6176](https://github.com/tendermint/tendermint/pull/6176) Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes) + - [rpc/client/http] [\#6176](https://github.com/tendermint/tendermint/pull/6176) Unexpose `WSEvents` (@melekes) + - [rpc/jsonrpc/client/ws_client] [\#6176](https://github.com/tendermint/tendermint/pull/6176) `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes) + - [internal/libs] [\#6366](https://github.com/tendermint/tendermint/pull/6366) Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder + - [libs/rand] [\#6364](https://github.com/tendermint/tendermint/pull/6364) Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi) + - [mempool] [\#6466](https://github.com/tendermint/tendermint/pull/6466) The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package. + Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself + and `TxInfo`. (@alexanderbez) + - [crypto/sr25519] [\#6526](https://github.com/tendermint/tendermint/pull/6526) Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning) + - [types] [\#6627](https://github.com/tendermint/tendermint/pull/6627) Move `NodeKey` to types to make the type public. + - [config] [\#6627](https://github.com/tendermint/tendermint/pull/6627) Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID` + - [blocksync] [\#6755](https://github.com/tendermint/tendermint/pull/6755) Rename `FastSync` and `Blockchain` package to `BlockSync` (@cmwaters) + +- CLI/RPC/Config + + - [pubsub/events] [\#6634](https://github.com/tendermint/tendermint/pull/6634) The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez) + - [config] [\#5598](https://github.com/tendermint/tendermint/pull/5598) The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker) + - [config] [\#5728](https://github.com/tendermint/tendermint/pull/5728) `fastsync.version = "v1"` is no longer supported (@melekes) + - [cli] [\#5772](https://github.com/tendermint/tendermint/pull/5772) `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes) + - [cli] [\#5777](https://github.com/tendermint/tendermint/pull/5777) use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters) + - [rpc] [\#6019](https://github.com/tendermint/tendermint/pull/6019) standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters) + - [rpc] [\#6168](https://github.com/tendermint/tendermint/pull/6168) Change default sorting to desc for `/tx_search` results (@melekes) + - [cli] [\#6282](https://github.com/tendermint/tendermint/pull/6282) User must specify the node mode when using `tendermint init` (@cmwaters) + - [state/indexer] [\#6382](https://github.com/tendermint/tendermint/pull/6382) reconstruct indexer, move txindex into the indexer package (@JayT106) + - [cli] [\#6372](https://github.com/tendermint/tendermint/pull/6372) Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters) + - [config] [\#6462](https://github.com/tendermint/tendermint/pull/6462) Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish) + - [rpc] [\#6610](https://github.com/tendermint/tendermint/pull/6610) Add MaxPeerBlockHeight into /status rpc call (@JayT106) + - [blocksync/rpc] [\#6620](https://github.com/tendermint/tendermint/pull/6620) Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106) + - [rpc/grpc] [\#6725](https://github.com/tendermint/tendermint/pull/6725) Mark gRPC in the RPC layer as deprecated. + - [blocksync/v2] [\#6730](https://github.com/tendermint/tendermint/pull/6730) Fast Sync v2 is deprecated, please use v0 + - [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents. + - [rpc/jsonrpc/server] [\#6785](https://github.com/tendermint/tendermint/pull/6785) `Listen` function updated to take an `int` argument, `maxOpenConnections`, instead of an entire config object. (@williambanfield) + - [rpc] [\#6820](https://github.com/tendermint/tendermint/pull/6820) Update RPC methods to reflect changes in the p2p layer, disabling support for `UnsafeDialPeers` and `UnsafeDialPeers` when used with the new p2p layer, and changing the response format of the peer list in `NetInfo` for all users. + - [cli] [\#6854](https://github.com/tendermint/tendermint/pull/6854) Remove deprecated snake case commands. (@tychoish) + - [tools] [\#6498](https://github.com/tendermint/tendermint/pull/6498) Set OS home dir to instead of the hardcoded PATH. (@JayT106) + - [cli/indexer] [\#6676](https://github.com/tendermint/tendermint/pull/6676) Reindex events command line tooling. (@JayT106) + +- Apps + + - [ABCI] [\#6408](https://github.com/tendermint/tendermint/pull/6408) Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez) + - [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Remove `SetOption` method from `ABCI.Client` interface + - [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Reset `Oneof` indexes for `Request` and `Response`. + - [ABCI] [\#5818](https://github.com/tendermint/tendermint/pull/5818) Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters. + - [ABCI] [\#3546](https://github.com/tendermint/tendermint/pull/3546) Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield) + - [Version] [\#6494](https://github.com/tendermint/tendermint/pull/6494) `TMCoreSemVer` has been renamed to `TMVersion`. + - It is not required any longer to set ldflags to set version strings + - [abci/counter] [\#6684](https://github.com/tendermint/tendermint/pull/6684) Delete counter example app + +- Data Storage + - [store/state/evidence/light] [\#5771](https://github.com/tendermint/tendermint/pull/5771) Use an order-preserving varint key encoding (@cmwaters) + - [mempool] [\#6396](https://github.com/tendermint/tendermint/pull/6396) Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish) + - [state] [\#6541](https://github.com/tendermint/tendermint/pull/6541) Move pruneBlocks from consensus/state to state/execution. (@JayT106) + +### IMPROVEMENTS + +- [libs/log] Console log formatting changes as a result of [\#6534](https://github.com/tendermint/tendermint/pull/6534) and [\#6589](https://github.com/tendermint/tendermint/pull/6589). (@tychoish) +- [statesync] [\#6566](https://github.com/tendermint/tendermint/pull/6566) Allow state sync fetchers and request timeout to be configurable. (@alexanderbez) +- [types] [\#6478](https://github.com/tendermint/tendermint/pull/6478) Add `block_id` to `newblock` event (@jeebster) +- [crypto/ed25519] [\#5632](https://github.com/tendermint/tendermint/pull/5632) Adopt zip215 `ed25519` verification. (@marbar3778) +- [crypto/ed25519] [\#6526](https://github.com/tendermint/tendermint/pull/6526) Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `ed25519` signing and verification. (@Yawning) +- [crypto/sr25519] [\#6526](https://github.com/tendermint/tendermint/pull/6526) Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `sr25519` signing and verification. (@Yawning) +- [privval] [\#5603](https://github.com/tendermint/tendermint/pull/5603) Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys. +- [privval] [\#5725](https://github.com/tendermint/tendermint/pull/5725) Add gRPC support to private validator. +- [privval] [\#5876](https://github.com/tendermint/tendermint/pull/5876) `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778) +- [abci/client] [\#5673](https://github.com/tendermint/tendermint/pull/5673) `Async` requests return an error if queue is full (@melekes) +- [mempool] [\#5673](https://github.com/tendermint/tendermint/pull/5673) Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes) +- [abci] [\#5706](https://github.com/tendermint/tendermint/pull/5706) Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778) +- [blocksync/v1] [\#5728](https://github.com/tendermint/tendermint/pull/5728) Remove blocksync v1 (@melekes) +- [blocksync/v0] [\#5741](https://github.com/tendermint/tendermint/pull/5741) Relax termination conditions and increase sync timeout (@melekes) +- [cli] [\#5772](https://github.com/tendermint/tendermint/pull/5772) `gen_node_key` output now contains node ID (`id` field) (@melekes) +- [blocksync/v2] [\#5774](https://github.com/tendermint/tendermint/pull/5774) Send status request when new peer joins (@melekes) +- [store] [\#5888](https://github.com/tendermint/tendermint/pull/5888) store.SaveBlock saves using batches instead of transactions for now to improve ACID properties. This is a quick fix for underlying issues around tm-db and ACID guarantees. (@githubsands) +- [consensus] [\#5987](https://github.com/tendermint/tendermint/pull/5987) and [\#5792](https://github.com/tendermint/tendermint/pull/5792) Remove the `time_iota_ms` consensus parameter. Merge `tmproto.ConsensusParams` and `abci.ConsensusParams`. (@marbar3778, @valardragon) +- [types] [\#5994](https://github.com/tendermint/tendermint/pull/5994) Reduce the use of protobuf types in core logic. (@marbar3778) + - `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams`, `sm.Version` and `version.Consensus` have become native types. They still utilize protobuf when being sent over the wire or written to disk. +- [rpc/client/http] [\#6163](https://github.com/tendermint/tendermint/pull/6163) Do not drop events even if the `out` channel is full (@melekes) +- [node] [\#6059](https://github.com/tendermint/tendermint/pull/6059) Validate and complete genesis doc before saving to state store (@silasdavis) +- [state] [\#6067](https://github.com/tendermint/tendermint/pull/6067) Batch save state data (@githubsands & @cmwaters) +- [crypto] [\#6120](https://github.com/tendermint/tendermint/pull/6120) Implement batch verification interface for ed25519 and sr25519. (@marbar3778) +- [types] [\#6120](https://github.com/tendermint/tendermint/pull/6120) use batch verification for verifying commits signatures. + - If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature. +- [privval/file] [\#6185](https://github.com/tendermint/tendermint/pull/6185) Return error on `LoadFilePV`, `LoadFilePVEmptyState`. Allows for better programmatic control of Tendermint. +- [privval] [\#6240](https://github.com/tendermint/tendermint/pull/6240) Add `context.Context` to privval interface. +- [rpc] [\#6265](https://github.com/tendermint/tendermint/pull/6265) set cache control in http-rpc response header (@JayT106) +- [statesync] [\#6378](https://github.com/tendermint/tendermint/pull/6378) Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. +- [node/state] [\#6370](https://github.com/tendermint/tendermint/pull/6370) graceful shutdown in the consensus reactor (@JayT106) +- [crypto/merkle] [\#6443](https://github.com/tendermint/tendermint/pull/6443) Improve HashAlternatives performance (@cuonglm) +- [crypto/merkle] [\#6513](https://github.com/tendermint/tendermint/pull/6513) Optimize HashAlternatives (@marbar3778) +- [p2p/pex] [\#6509](https://github.com/tendermint/tendermint/pull/6509) Improve addrBook.hash performance (@cuonglm) +- [consensus/metrics] [\#6549](https://github.com/tendermint/tendermint/pull/6549) Change block_size gauge to a histogram for better observability over time (@marbar3778) +- [statesync] [\#6587](https://github.com/tendermint/tendermint/pull/6587) Increase chunk priority and re-request chunks that don't arrive (@cmwaters) +- [state/privval] [\#6578](https://github.com/tendermint/tendermint/pull/6578) No GetPubKey retry beyond the proposal/voting window (@JayT106) +- [rpc] [\#6615](https://github.com/tendermint/tendermint/pull/6615) Add TotalGasUsed to block_results response (@crypto-facs) +- [cmd/tendermint/commands] [\#6623](https://github.com/tendermint/tendermint/pull/6623) replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman) +- [statesync] \6807 Implement P2P state provider as an alternative to RPC (@cmwaters) + +## v0.34.14 + +This release backports the `rollback` feature to allow recovery in the event of an incorrect app hash. + +### FEATURES + +- [\#6982](https://github.com/tendermint/tendermint/pull/6982) The tendermint binary now has built-in suppport for running the end-to-end test application (with state sync support) (@cmwaters). +- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state. This may be useful in the event of non-determinstic app hash or when reverting an upgrade. @cmwaters + +### IMPROVEMENTS + +- [\#7103](https://github.com/tendermint/tendermint/pull/7104) Remove IAVL dependency (backport of #6550) (@cmwaters) + +### BUG FIXES + +- [\#7057](https://github.com/tendermint/tendermint/pull/7057) Import Postgres driver support for the psql indexer (@creachadair). +- [ABCI] [\#7110](https://github.com/tendermint/tendermint/issues/7110) Revert "change client to use multi-reader mutexes (#6873)" (@tychoish). ## v0.34.13 @@ -1954,7 +1990,7 @@ For more, see issues marked This release also includes a fix to prevent Tendermint from including the same piece of evidence in more than one block. This issue was reported by @chengwenxi in our -[bug bounty program](https://hackerone.com/tendermint). +[bug bounty program](https://hackerone.com/cosmos). ### BREAKING CHANGES: @@ -2447,7 +2483,7 @@ Special thanks to external contributors on this release: @james-ray, @overbool, @phymbert, @Slamper, @Uzair1995, @yutianwu. Special thanks to @Slamper for a series of bug reports in our [bug bounty -program](https://hackerone.com/tendermint) which are fixed in this release. +program](https://hackerone.com/cosmos) which are fixed in this release. This release is primarily about adding Version fields to various data structures, optimizing consensus messages for signing and verification in diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index af278d3fb..cb1a79cac 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -1,26 +1,49 @@ # Unreleased Changes +Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos). + ## vX.X -Special thanks to external contributors on this release: +Month, DD, YYYY -Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint). +Special thanks to external contributors on this release: ### BREAKING CHANGES - CLI/RPC/Config + - [rpc] Remove the deprecated gRPC interface to the RPC service. (@creachadair) + - [blocksync] \#7159 Remove support for disabling blocksync in any circumstance. (@tychoish) + - [mempool] \#7171 Remove legacy mempool implementation. (@tychoish) + - Apps + - [proto/tendermint] \#6976 Remove core protobuf files in favor of only housing them in the [tendermint/spec](https://github.com/tendermint/spec) repository. + - P2P Protocol + - [p2p] \#7035 Remove legacy P2P routing implementation and associated configuration options. (@tychoish) + - [p2p] \#7265 Peer manager reduces peer score for each failed dial attempts for peers that have not successfully dialed. (@tychoish) + - Go API + - [pubsub] \#7231 Remove unbuffered subscriptions and rework the Subscription interface. (@creachadair) + - [eventbus] \#7231 Move the EventBus type to the internal/eventbus package. (@creachadair) + - [blocksync] \#7046 Remove v2 implementation of the blocksync service and recactor, which was disabled in the previous release. (@tychoish) + - [p2p] \#7064 Remove WDRR queue implementation. (@tychoish) + - [config] \#7169 `WriteConfigFile` now returns an error. (@tychoish) + - [libs/service] \#7288 Remove SetLogger method on `service.Service` interface. (@tychosih) + + - Blockchain Protocol ### FEATURES +- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state in the event of non-determinstic app hash or reverting an upgrade. +- [mempool, rpc] \#7041 Add removeTx operation to the RPC layer. (@tychoish) + ### IMPROVEMENTS ### BUG FIXES +- fix: assignment copies lock value in `BitArray.UnmarshalJSON()` (@lklimek) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 23bfafcdf..e4613f84e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -109,7 +109,7 @@ We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`. -We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`. +We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`. This command uses the spec repo to get the necessary protobuf files for generating the go code. If you are modifying the proto files manually for changes in the core data structures, you will need to clone them into the go repo and comment out lines 22-37 of the file `./scripts/protocgen.sh`. ### Visual Studio Code @@ -227,141 +227,6 @@ Fixes #nnnn Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though! -### Release procedure - -#### A note about backport branches -Tendermint's `master` branch is under active development. -Releases are specified using tags and are built from long-lived "backport" branches. -Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, -and the backport branches have names like `v0.34.x` or `v0.33.x` -(literally, `x`; it is not a placeholder in this case). - -As non-breaking changes land on `master`, they should also be backported (cherry-picked) -to these backport branches. - -We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport -to the needed branch. There should be a label for any backport branch that you'll be targeting. -To notify the bot to backport a pull request, mark the pull request with -the label `S:backport-to-`. -Once the original pull request is merged, the bot will try to cherry-pick the pull request -to the backport branch. If the bot fails to backport, it will open a pull request. -The author of the original pull request is responsible for solving the conflicts and -merging the pull request. - -#### Creating a backport branch -If this is the first release candidate for a major release, you get to have the honor of creating -the backport branch! - -Note that, after creating the backport branch, you'll also need to update the tags on `master` -so that `go mod` is able to order the branches correctly. You should tag `master` with a "dev" tag -that is "greater than" the backport branches tags. See #6072 for more context. - -In the following example, we'll assume that we're making a backport branch for -the 0.35.x line. - -1. Start on `master` -2. Create the backport branch: - `git checkout -b v0.35.x` -3. Go back to master and tag it as the dev branch for the _next_ major release and push it back up: - `git tag -a v0.36.0-dev; git push v0.36.0-dev` -4. Create a new workflow to run the e2e nightlies for this backport branch. - (See https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-34x.yml - for an example.) - -#### Release candidates - -Before creating an official release, especially a major release, we may want to create a -release candidate (RC) for our friends and partners to test out. We use git tags to -create RCs, and we build them off of backport branches. - -Tags for RCs should follow the "standard" release naming conventions, with `-rcX` at the end -(for example, `v0.35.0-rc0`). - -(Note that branches and tags _cannot_ have the same names, so it's important that these branches -have distinct names from the tags/release names.) - -If this is the first RC for a major release, you'll have to make a new backport branch (see above). -Otherwise: - -1. Start from the backport branch (e.g. `v0.35.x`). -1. Run the integration tests and the e2e nightlies - (which can be triggered from the Github UI; - e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml). -1. Prepare the changelog: - - Move the changes included in `CHANGELOG_PENDING.md` into `CHANGELOG.md`. - - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for - all PRs - - Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes - or other upgrading flows. - - Bump TMVersionDefault version in `version.go` - - Bump P2P and block protocol versions in `version.go`, if necessary - - Bump ABCI protocol version in `version.go`, if necessary -1. Open a PR with these changes against the backport branch. -1. Once these changes have landed on the backport branch, be sure to pull them back down locally. -2. Once you have the changes locally, create the new tag, specifying a name and a tag "message": - `git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0` -3. Push the tag back up to origin: - `git push origin v0.35.0-rc0` - Now the tag should be available on the repo's releases page. -4. Future RCs will continue to be built off of this branch. - -Note that this process should only be used for "true" RCs-- -release candidates that, if successful, will be the next release. -For more experimental "RCs," create a new, short-lived branch and tag that instead. - -#### Major release - -This major release process assumes that this release was preceded by release candidates. -If there were no release candidates, begin by creating a backport branch, as described above. - -1. Start on the backport branch (e.g. `v0.35.x`) -2. Run integration tests and the e2e nightlies. -3. Prepare the release: - - "Squash" changes from the changelog entries for the RCs into a single entry, - and add all changes included in `CHANGELOG_PENDING.md`. - (Squashing includes both combining all entries, as well as removing or simplifying - any intra-RC changes. It may also help to alphabetize the entries by package name.) - - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for - all PRs - - Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes - or other upgrading flows. - - Bump TMVersionDefault version in `version.go` - - Bump P2P and block protocol versions in `version.go`, if necessary - - Bump ABCI protocol version in `version.go`, if necessary -4. Open a PR with these changes against the backport branch. -5. Once these changes are on the backport branch, push a tag with prepared release details. - This will trigger the actual release `v0.35.0`. - - `git tag -a v0.35.0 -m 'Release v0.35.0'` - - `git push origin v0.35.0` -7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`. - -#### Minor release (point releases) - -Minor releases are done differently from major releases: They are built off of long-lived backport branches, rather than from master. -As non-breaking changes land on `master`, they should also be backported (cherry-picked) to these backport branches. - -Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate. - -To create a minor release: - -1. Checkout the long-lived backport branch: `git checkout v0.35.x` -2. Run integration tests (`make test_integrations`) and the nightlies. -3. Check out a new branch and prepare the release: - - Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md` - - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues - - Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh ` - - Reset the `CHANGELOG_PENDING.md` - - Bump the ABCI version number, if necessary. - (Note that ABCI follows semver, and that ABCI versions are the only versions - which can change during minor releases, and only field additions are valid minor changes.) -4. Open a PR with these changes that will land them back on `v0.35.x` -5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag. - - `git tag -a v0.35.1 -m 'Release v0.35.1'` - - `git push origin v0.35.1` -6. Create a pull request back to master with the CHANGELOG & version changes from the latest release. - - Remove all `R:minor` labels from the pull requests that were included in the release. - - Do not merge the backport branch into master. - ## Testing ### Unit tests diff --git a/Makefile b/Makefile index a509f3a26..d091b9d48 100644 --- a/Makefile +++ b/Makefile @@ -14,8 +14,8 @@ endif LD_FLAGS = -X github.com/tendermint/tendermint/version.TMVersion=$(VERSION) BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)" -HTTPS_GIT := https://github.com/tendermint/tendermint.git -DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf +BUILD_IMAGE := ghcr.io/tendermint/docker-build-proto +DOCKER_PROTO_BUILDER := docker run -v $(shell pwd):/workspace --workdir /workspace $(BUILD_IMAGE) CGO_ENABLED ?= 0 # handle nostrip @@ -79,32 +79,17 @@ $(BUILDDIR)/: ### Protobuf ### ############################################################################### -proto-all: proto-gen proto-lint proto-check-breaking -.PHONY: proto-all - proto-gen: @docker pull -q tendermintdev/docker-build-proto @echo "Generating Protobuf files" - @docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto sh ./scripts/protocgen.sh + @$(DOCKER_PROTO_BUILDER) sh ./scripts/protocgen.sh .PHONY: proto-gen -proto-lint: - @$(DOCKER_BUF) check lint --error-format=json -.PHONY: proto-lint - proto-format: @echo "Formatting Protobuf files" - docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto find ./ -not -path "./third_party/*" -name *.proto -exec clang-format -i {} \; + @$(DOCKER_PROTO_BUILDER) find ./ -not -path "./third_party/*" -name *.proto -exec clang-format -i {} \; .PHONY: proto-format -proto-check-breaking: - @$(DOCKER_BUF) check breaking --against-input .git#branch=master -.PHONY: proto-check-breaking - -proto-check-breaking-ci: - @$(DOCKER_BUF) check breaking --against-input $(HTTPS_GIT)#branch=master -.PHONY: proto-check-breaking-ci - ############################################################################### ### Build ABCI ### ############################################################################### @@ -131,11 +116,11 @@ generate_test_cert: # generate server cerificate @certstrap request-cert -cn server -ip 127.0.0.1 # self-sign server cerificate with rootCA - @certstrap sign server --CA "root CA" + @certstrap sign server --CA "root CA" # generate client cerificate @certstrap request-cert -cn client -ip 127.0.0.1 # self-sign client cerificate with rootCA - @certstrap sign client --CA "root CA" + @certstrap sign client --CA "root CA" .PHONY: generate_test_cert ############################################################################### @@ -214,7 +199,7 @@ DESTINATION = ./index.html.md build-docs: @cd docs && \ while read -r branch path_prefix; do \ - (git checkout $${branch} && npm install && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \ + (git checkout $${branch} && npm ci && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \ mkdir -p ~/output/$${path_prefix} ; \ cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \ cp ~/output/$${path_prefix}/index.html ~/output ; \ @@ -227,13 +212,13 @@ build-docs: build-docker: build-linux cp $(BUILDDIR)/tendermint DOCKER/tendermint - docker build --label=tendermint --tag="tendermint/tendermint" DOCKER + docker build --label=tendermint --tag="tendermint/tendermint" -f DOCKER/Dockerfile . rm -rf DOCKER/tendermint .PHONY: build-docker ############################################################################### -### Mocks ### +### Mocks ### ############################################################################### mockery: diff --git a/README.md b/README.md index 5d62a2f23..7823d45c1 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ Or [Blockchain](), for shor [![version](https://img.shields.io/github/tag/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/releases/latest) [![API Reference](https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667)](https://pkg.go.dev/github.com/tendermint/tendermint) [![Go version](https://img.shields.io/badge/go-1.16-blue.svg)](https://github.com/moovweb/gvm) -[![Discord chat](https://img.shields.io/discord/669268347736686612.svg)](https://discord.gg/vcExX9T) +[![Discord chat](https://img.shields.io/discord/669268347736686612.svg)](https://discord.gg/cosmosnetwork) [![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE) [![tendermint/tendermint](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint) [![Sourcegraph](https://sourcegraph.com/github.com/tendermint/tendermint/-/badge.svg)](https://sourcegraph.com/github.com/tendermint/tendermint?badge) @@ -33,12 +33,14 @@ Tendermint has been in the production of private and public environments, most n See below for more details about [versioning](#versioning). In any case, if you intend to run Tendermint in production, we're happy to help. You can -contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/vcExX9T). +contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/cosmosnetwork). + +More on how releases are conducted can be found [here](./RELEASES.md). ## Security To report a security vulnerability, see our [bug bounty -program](https://hackerone.com/tendermint). +program](https://hackerone.com/cosmos). For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md). We also maintain a dedicated mailing list for security updates. We will only ever use this mailing list @@ -112,6 +114,8 @@ in [UPGRADING.md](./UPGRADING.md). ### Tendermint Core +We keep a public up-to-date version of our roadmap [here](./docs/roadmap/roadmap.md) + For details about the blockchain data structures and the p2p protocols, see the [Tendermint specification](https://docs.tendermint.com/master/spec/). diff --git a/RELEASES.md b/RELEASES.md new file mode 100644 index 000000000..a7f862e33 --- /dev/null +++ b/RELEASES.md @@ -0,0 +1,180 @@ +# Releases + +Tendermint uses [semantic versioning](https://semver.org/) with each release following +a `vX.Y.Z` format. The `master` branch is used for active development and thus it's +advisable not to build against it. + +The latest changes are always initially merged into `master`. +Releases are specified using tags and are built from long-lived "backport" branches +that are cut from `master` when the release process begins. +Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, +and the backport branches have names like `v0.34.x` or `v0.33.x` +(literally, `x`; it is not a placeholder in this case). Tendermint only +maintains the last two releases at a time (the oldest release is predominantly +just security patches). + +## Backporting + +As non-breaking changes land on `master`, they should also be backported +to these backport branches. + +We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport +to the needed branch. There should be a label for any backport branch that you'll be targeting. +To notify the bot to backport a pull request, mark the pull request with the label corresponding +to the correct backport branch. For example, to backport to v0.35.x, add the label `S:backport-to-v0.35.x`. +Once the original pull request is merged, the bot will try to cherry-pick the pull request +to the backport branch. If the bot fails to backport, it will open a pull request. +The author of the original pull request is responsible for solving the conflicts and +merging the pull request. + +### Creating a backport branch + +If this is the first release candidate for a major release, you get to have the +honor of creating the backport branch! + +Note that, after creating the backport branch, you'll also need to update the +tags on `master` so that `go mod` is able to order the branches correctly. You +should tag `master` with a "dev" tag that is "greater than" the backport +branches tags. See [#6072](https://github.com/tendermint/tendermint/pull/6072) +for more context. + +In the following example, we'll assume that we're making a backport branch for +the 0.35.x line. + +1. Start on `master` +2. Create and push the backport branch: + ```sh + git checkout -b v0.35.x + git push origin v0.35.x + ``` + +After doing these steps, go back to `master` and do the following: + +1. Tag `master` as the dev branch for the _next_ major release and push it back up. + For example: + ```sh + git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36." + git push origin v0.36.0-dev + ``` + +2. Create a new workflow to run e2e nightlies for the new backport branch. + (See [e2e-nightly-master.yml][e2e] for an example.) + +3. Add a new section to the Mergify config (`.github/mergify.yml`) to enable the + backport bot to work on this branch, and add a corresponding `S:backport-to-v0.35.x` + [label](https://github.com/tendermint/tendermint/labels) so the bot can be triggered. + +4. Add a new section to the Dependabot config (`.github/dependabot.yml`) to + enable automatic update of Go dependencies on this branch. Copy and edit one + of the existing branch configurations to set the correct `target-branch`. + +[e2e]: https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-master.yml + +## Release candidates + +Before creating an official release, especially a major release, we may want to create a +release candidate (RC) for our friends and partners to test out. We use git tags to +create RCs, and we build them off of backport branches. + +Tags for RCs should follow the "standard" release naming conventions, with `-rcX` at the end +(for example, `v0.35.0-rc0`). + +(Note that branches and tags _cannot_ have the same names, so it's important that these branches +have distinct names from the tags/release names.) + +If this is the first RC for a major release, you'll have to make a new backport branch (see above). +Otherwise: + +1. Start from the backport branch (e.g. `v0.35.x`). +2. Run the integration tests and the e2e nightlies + (which can be triggered from the Github UI; + e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml). +3. Prepare the changelog: + - Move the changes included in `CHANGELOG_PENDING.md` into `CHANGELOG.md`. Each RC should have + it's own changelog section. These will be squashed when the final candidate is released. + - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for + all PRs + - Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes + or other upgrading flows. + - Bump TMVersionDefault version in `version.go` + - Bump P2P and block protocol versions in `version.go`, if necessary. + Check the changelog for breaking changes in these components. + - Bump ABCI protocol version in `version.go`, if necessary +4. Open a PR with these changes against the backport branch. +5. Once these changes have landed on the backport branch, be sure to pull them back down locally. +6. Once you have the changes locally, create the new tag, specifying a name and a tag "message": + `git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0` +7. Push the tag back up to origin: + `git push origin v0.35.0-rc0` + Now the tag should be available on the repo's releases page. +8. Future RCs will continue to be built off of this branch. + +Note that this process should only be used for "true" RCs-- +release candidates that, if successful, will be the next release. +For more experimental "RCs," create a new, short-lived branch and tag that instead. + +## Major release + +This major release process assumes that this release was preceded by release candidates. +If there were no release candidates, begin by creating a backport branch, as described above. + +1. Start on the backport branch (e.g. `v0.35.x`) +2. Run integration tests (`make test_integrations`) and the e2e nightlies. +3. Prepare the release: + - "Squash" changes from the changelog entries for the RCs into a single entry, + and add all changes included in `CHANGELOG_PENDING.md`. + (Squashing includes both combining all entries, as well as removing or simplifying + any intra-RC changes. It may also help to alphabetize the entries by package name.) + - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for + all PRs + - Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes + or other upgrading flows. + - Bump TMVersionDefault version in `version.go` + - Bump P2P and block protocol versions in `version.go`, if necessary + - Bump ABCI protocol version in `version.go`, if necessary +4. Open a PR with these changes against the backport branch. +5. Once these changes are on the backport branch, push a tag with prepared release details. + This will trigger the actual release `v0.35.0`. + - `git tag -a v0.35.0 -m 'Release v0.35.0'` + - `git push origin v0.35.0` +6. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`. +7. Add the release to the documentation site generator config (see + [DOCS_README.md](./docs/DOCS_README.md) for more details). In summary: + - Start on branch `master`. + - Add a new line at the bottom of [`docs/versions`](./docs/versions) to + ensure the newest release is the default for the landing page. + - Add a new entry to `themeConfig.versions` in + [`docs/.vuepress/config.js`](./docs/.vuepress/config.js) to include the + release in the dropdown versions menu. + - Commit these changes to `master` and backport them into the backport + branch for this release. + +## Minor release (point releases) + +Minor releases are done differently from major releases: They are built off of +long-lived backport branches, rather than from master. As non-breaking changes +land on `master`, they should also be backported into these backport branches. + +Minor releases don't have release candidates by default, although any tricky +changes may merit a release candidate. + +To create a minor release: + +1. Checkout the long-lived backport branch: `git checkout v0.35.x` +2. Run integration tests (`make test_integrations`) and the nightlies. +3. Check out a new branch and prepare the release: + - Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md` + - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues + - Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh ` + - Reset the `CHANGELOG_PENDING.md` + - Bump the TMDefaultVersion in `version.go` + - Bump the ABCI version number, if necessary. + (Note that ABCI follows semver, and that ABCI versions are the only versions + which can change during minor releases, and only field additions are valid minor changes.) +4. Open a PR with these changes that will land them back on `v0.35.x` +5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag. + - `git tag -a v0.35.1 -m 'Release v0.35.1'` + - `git push origin v0.35.1` +6. Create a pull request back to master with the CHANGELOG & version changes from the latest release. + - Remove all `R:minor` labels from the pull requests that were included in the release. + - Do not merge the backport branch into master. diff --git a/SECURITY.md b/SECURITY.md index 57d13e565..133e993c4 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -4,7 +4,7 @@ As part of our [Coordinated Vulnerability Disclosure Policy](https://tendermint.com/security), we operate a [bug -bounty](https://hackerone.com/tendermint). +bounty](https://hackerone.com/cosmos). See the policy for more details on submissions and rewards, and see "Example Vulnerabilities" (below) for examples of the kinds of bugs we're most interested in. ### Guidelines @@ -86,7 +86,7 @@ If you are running older versions of Tendermint Core, we encourage you to upgrad ## Scope -The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/tendermint). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope: +The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/cosmos). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope: * Any third-party services * Findings from physical testing, such as office access diff --git a/UPGRADING.md b/UPGRADING.md index 99efdf225..bd6411c6a 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -98,7 +98,7 @@ are: - `blockchain` - `evidence` -Accordingly, the `node` package was changed to reduce access to +Accordingly, the `node` package changed to reduce access to tendermint internals: applications that use tendermint as a library will need to change to accommodate these changes. Most notably: @@ -109,6 +109,20 @@ will need to change to accommodate these changes. Most notably: longer exported and have been replaced with `node.New` and `node.NewDefault` which provide more functional interfaces. +To access any of the functionality previously available via the +`node.Node` type, use the `*local.Local` "RPC" client, that exposes +the full RPC interface provided as direct function calls. Import the +`github.com/tendermint/tendermint/rpc/client/local` package and pass +the node service as in the following: + +```go + node := node.NewDefault() //construct the node object + // start and set up the node service + + client := local.New(node.(local.NodeService)) + // use client object to interact with the node +``` + ### gRPC Support Mark gRPC in the RPC layer as deprecated and to be removed in 0.36. diff --git a/abci/README.md b/abci/README.md index 4a953dab3..e2234f4d1 100644 --- a/abci/README.md +++ b/abci/README.md @@ -20,7 +20,7 @@ To get up and running quickly, see the [getting started guide](../docs/app-dev/g A detailed description of the ABCI methods and message types is contained in: - [The main spec](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md) -- [A protobuf file](../proto/tendermint/abci/types.proto) +- [A protobuf file](https://github.com/tendermint/spec/blob/master/proto/tendermint/abci/types.proto) - [A Go interface](./types/application.go) ## Protocol Buffers diff --git a/abci/client/client.go b/abci/client/client.go index b6d34e422..1f0017557 100644 --- a/abci/client/client.go +++ b/abci/client/client.go @@ -1,4 +1,4 @@ -package abcicli +package abciclient import ( "context" @@ -7,6 +7,7 @@ import ( "github.com/tendermint/tendermint/abci/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) @@ -68,12 +69,12 @@ type Client interface { // NewClient returns a new ABCI client of the specified transport type. // It returns an error if the transport is not "socket" or "grpc" -func NewClient(addr, transport string, mustConnect bool) (client Client, err error) { +func NewClient(logger log.Logger, addr, transport string, mustConnect bool) (client Client, err error) { switch transport { case "socket": - client = NewSocketClient(addr, mustConnect) + client = NewSocketClient(logger, addr, mustConnect) case "grpc": - client = NewGRPCClient(addr, mustConnect) + client = NewGRPCClient(logger, addr, mustConnect) default: err = fmt.Errorf("unknown abci transport %s", transport) } @@ -87,7 +88,7 @@ type ReqRes struct { *sync.WaitGroup *types.Response // Not set atomically, so be sure to use WaitGroup. - mtx tmsync.RWMutex + mtx tmsync.Mutex done bool // Gets set to true once *after* WaitGroup.Done(). cb func(*types.Response) // A single callback that may be set. } @@ -137,16 +138,16 @@ func (r *ReqRes) InvokeCallback() { // // ref: https://github.com/tendermint/tendermint/issues/5439 func (r *ReqRes) GetCallback() func(*types.Response) { - r.mtx.RLock() - defer r.mtx.RUnlock() + r.mtx.Lock() + defer r.mtx.Unlock() return r.cb } // SetDone marks the ReqRes object as done. func (r *ReqRes) SetDone() { r.mtx.Lock() - defer r.mtx.Unlock() r.done = true + r.mtx.Unlock() } func waitGroup1() (wg *sync.WaitGroup) { diff --git a/abci/client/creators.go b/abci/client/creators.go new file mode 100644 index 000000000..7cabb2e43 --- /dev/null +++ b/abci/client/creators.go @@ -0,0 +1,36 @@ +package abciclient + +import ( + "fmt" + + "github.com/tendermint/tendermint/abci/types" + tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" +) + +// Creator creates new ABCI clients. +type Creator func(log.Logger) (Client, error) + +// NewLocalCreator returns a Creator for the given app, +// which will be running locally. +func NewLocalCreator(app types.Application) Creator { + mtx := new(tmsync.Mutex) + + return func(_ log.Logger) (Client, error) { + return NewLocalClient(mtx, app), nil + } +} + +// NewRemoteCreator returns a Creator for the given address (e.g. +// "192.168.0.1") and transport (e.g. "tcp"). Set mustConnect to true if you +// want the client to connect before reporting success. +func NewRemoteCreator(logger log.Logger, addr, transport string, mustConnect bool) Creator { + return func(log.Logger) (Client, error) { + remoteApp, err := NewClient(logger, addr, transport, mustConnect) + if err != nil { + return nil, fmt.Errorf("failed to connect to proxy: %w", err) + } + + return remoteApp, nil + } +} diff --git a/abci/client/doc.go b/abci/client/doc.go index eac40fe11..fd5a17075 100644 --- a/abci/client/doc.go +++ b/abci/client/doc.go @@ -1,4 +1,4 @@ -// Package abcicli provides an ABCI implementation in Go. +// Package abciclient provides an ABCI implementation in Go. // // There are 3 clients available: // 1. socket (unix or TCP) @@ -26,4 +26,4 @@ // // sync: waits for all Async calls to complete (essentially what Flush does in // the socket client) and calls Sync method. -package abcicli +package abciclient diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index 31bd6fae1..ef88736ab 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -1,4 +1,4 @@ -package abcicli +package abciclient import ( "context" @@ -11,6 +11,7 @@ import ( "github.com/tendermint/tendermint/abci/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" ) @@ -24,7 +25,7 @@ type grpcClient struct { conn *grpc.ClientConn chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool - mtx tmsync.RWMutex + mtx tmsync.Mutex addr string err error resCb func(*types.Request, *types.Response) // listens to all callbacks @@ -42,7 +43,7 @@ var _ Client = (*grpcClient)(nil) // which is expensive, but easy - if you want something better, use the socket // protocol! maybe one day, if people really want it, we use grpc streams, but // hopefully not :D -func NewGRPCClient(addr string, mustConnect bool) Client { +func NewGRPCClient(logger log.Logger, addr string, mustConnect bool) Client { cli := &grpcClient{ addr: addr, mustConnect: mustConnect, @@ -54,7 +55,7 @@ func NewGRPCClient(addr string, mustConnect bool) Client { // gRPC calls while processing a slow callback at the channel head. chReqRes: make(chan *ReqRes, 64), } - cli.BaseService = *service.NewBaseService(nil, "grpcClient", cli) + cli.BaseService = *service.NewBaseService(logger, "grpcClient", cli) return cli } @@ -62,7 +63,7 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { return tmnet.Connect(addr) } -func (cli *grpcClient) OnStart() error { +func (cli *grpcClient) OnStart(ctx context.Context) error { // This processes asynchronous request/response messages and dispatches // them to callbacks. go func() { @@ -149,8 +150,8 @@ func (cli *grpcClient) StopForError(err error) { } func (cli *grpcClient) Error() error { - cli.mtx.RLock() - defer cli.mtx.RUnlock() + cli.mtx.Lock() + defer cli.mtx.Unlock() return cli.err } @@ -158,8 +159,8 @@ func (cli *grpcClient) Error() error { // NOTE: callback may get internally generated flush responses. func (cli *grpcClient) SetResponseCallback(resCb Callback) { cli.mtx.Lock() - defer cli.mtx.Unlock() cli.resCb = resCb + cli.mtx.Unlock() } //---------------------------------------- diff --git a/abci/client/local_client.go b/abci/client/local_client.go index 69457b5b0..701108a3c 100644 --- a/abci/client/local_client.go +++ b/abci/client/local_client.go @@ -1,4 +1,4 @@ -package abcicli +package abciclient import ( "context" @@ -15,7 +15,7 @@ import ( type localClient struct { service.BaseService - mtx *tmsync.RWMutex + mtx *tmsync.Mutex types.Application Callback } @@ -26,24 +26,22 @@ var _ Client = (*localClient)(nil) // methods of the given app. // // Both Async and Sync methods ignore the given context.Context parameter. -func NewLocalClient(mtx *tmsync.RWMutex, app types.Application) Client { +func NewLocalClient(mtx *tmsync.Mutex, app types.Application) Client { if mtx == nil { - mtx = &tmsync.RWMutex{} + mtx = new(tmsync.Mutex) } - cli := &localClient{ mtx: mtx, Application: app, } - cli.BaseService = *service.NewBaseService(nil, "localClient", cli) return cli } func (app *localClient) SetResponseCallback(cb Callback) { app.mtx.Lock() - defer app.mtx.Unlock() app.Callback = cb + app.mtx.Unlock() } // TODO: change types.Application to include Error()? @@ -67,8 +65,8 @@ func (app *localClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, err } func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) { - app.mtx.RLock() - defer app.mtx.RUnlock() + app.mtx.Lock() + defer app.mtx.Unlock() res := app.Application.Info(req) return app.callback( @@ -100,8 +98,8 @@ func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheck } func (app *localClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) { - app.mtx.RLock() - defer app.mtx.RUnlock() + app.mtx.Lock() + defer app.mtx.Unlock() res := app.Application.Query(req) return app.callback( @@ -215,8 +213,8 @@ func (app *localClient) EchoSync(ctx context.Context, msg string) (*types.Respon } func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { - app.mtx.RLock() - defer app.mtx.RUnlock() + app.mtx.Lock() + defer app.mtx.Unlock() res := app.Application.Info(req) return &res, nil @@ -249,8 +247,8 @@ func (app *localClient) QuerySync( ctx context.Context, req types.RequestQuery, ) (*types.ResponseQuery, error) { - app.mtx.RLock() - defer app.mtx.RUnlock() + app.mtx.Lock() + defer app.mtx.Unlock() res := app.Application.Query(req) return &res, nil diff --git a/abci/client/mocks/client.go b/abci/client/mocks/client.go index 6726ce95e..f0d82a50e 100644 --- a/abci/client/mocks/client.go +++ b/abci/client/mocks/client.go @@ -5,9 +5,7 @@ package mocks import ( context "context" - abcicli "github.com/tendermint/tendermint/abci/client" - - log "github.com/tendermint/tendermint/libs/log" + abciclient "github.com/tendermint/tendermint/abci/client" mock "github.com/stretchr/testify/mock" @@ -20,15 +18,15 @@ type Client struct { } // ApplySnapshotChunkAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*abcicli.ReqRes, error) { +func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -66,15 +64,15 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestA } // BeginBlockAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abcicli.ReqRes, error) { +func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -112,15 +110,15 @@ func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBloc } // CheckTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) { +func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -158,15 +156,15 @@ func (_m *Client) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*t } // CommitAsync provides a mock function with given fields: _a0 -func (_m *Client) CommitAsync(_a0 context.Context) (*abcicli.ReqRes, error) { +func (_m *Client) CommitAsync(_a0 context.Context) (*abciclient.ReqRes, error) { ret := _m.Called(_a0) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context) *abciclient.ReqRes); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -204,15 +202,15 @@ func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error) } // DeliverTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) { +func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -250,15 +248,15 @@ func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx) } // EchoAsync provides a mock function with given fields: ctx, msg -func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) { +func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abciclient.ReqRes, error) { ret := _m.Called(ctx, msg) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, string) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, string) *abciclient.ReqRes); ok { r0 = rf(ctx, msg) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -296,15 +294,15 @@ func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho } // EndBlockAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abcicli.ReqRes, error) { +func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -356,15 +354,15 @@ func (_m *Client) Error() error { } // FlushAsync provides a mock function with given fields: _a0 -func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) { +func (_m *Client) FlushAsync(_a0 context.Context) (*abciclient.ReqRes, error) { ret := _m.Called(_a0) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context) *abciclient.ReqRes); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -393,15 +391,15 @@ func (_m *Client) FlushSync(_a0 context.Context) error { } // InfoAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abcicli.ReqRes, error) { +func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -439,15 +437,15 @@ func (_m *Client) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.R } // InitChainAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain) (*abcicli.ReqRes, error) { +func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -499,15 +497,15 @@ func (_m *Client) IsRunning() bool { } // ListSnapshotsAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListSnapshots) (*abcicli.ReqRes, error) { +func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListSnapshots) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -545,15 +543,15 @@ func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSn } // LoadSnapshotChunkAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*abcicli.ReqRes, error) { +func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -591,15 +589,15 @@ func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLo } // OfferSnapshotAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*abcicli.ReqRes, error) { +func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -636,49 +634,16 @@ func (_m *Client) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferS return r0, r1 } -// OnReset provides a mock function with given fields: -func (_m *Client) OnReset() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// OnStart provides a mock function with given fields: -func (_m *Client) OnStart() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// OnStop provides a mock function with given fields: -func (_m *Client) OnStop() { - _m.Called() -} - // QueryAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abcicli.ReqRes, error) { +func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -731,51 +696,18 @@ func (_m *Client) Quit() <-chan struct{} { return r0 } -// Reset provides a mock function with given fields: -func (_m *Client) Reset() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SetLogger provides a mock function with given fields: _a0 -func (_m *Client) SetLogger(_a0 log.Logger) { - _m.Called(_a0) -} - // SetResponseCallback provides a mock function with given fields: _a0 -func (_m *Client) SetResponseCallback(_a0 abcicli.Callback) { +func (_m *Client) SetResponseCallback(_a0 abciclient.Callback) { _m.Called(_a0) } -// Start provides a mock function with given fields: -func (_m *Client) Start() error { - ret := _m.Called() +// Start provides a mock function with given fields: _a0 +func (_m *Client) Start(_a0 context.Context) error { + ret := _m.Called(_a0) var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Stop provides a mock function with given fields: -func (_m *Client) Stop() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) } else { r0 = ret.Error(0) } diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index 3fef8540d..8dfee0c8d 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -1,4 +1,4 @@ -package abcicli +package abciclient import ( "bufio" @@ -13,7 +13,7 @@ import ( "github.com/tendermint/tendermint/abci/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/libs/timer" + "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" ) @@ -22,8 +22,6 @@ const ( // reqQueueSize is the max number of queued async requests. // (memory: 256MB max assuming 1MB transactions) reqQueueSize = 256 - // Don't wait longer than... - flushThrottleMS = 20 ) type reqResWithContext struct { @@ -40,10 +38,9 @@ type socketClient struct { mustConnect bool conn net.Conn - reqQueue chan *reqResWithContext - flushTimer *timer.ThrottleTimer + reqQueue chan *reqResWithContext - mtx tmsync.RWMutex + mtx tmsync.Mutex err error reqSent *list.List // list of requests sent, waiting for response resCb func(*types.Request, *types.Response) // called on all requests, if set. @@ -54,23 +51,22 @@ var _ Client = (*socketClient)(nil) // NewSocketClient creates a new socket client, which connects to a given // address. If mustConnect is true, the client will return an error upon start // if it fails to connect. -func NewSocketClient(addr string, mustConnect bool) Client { +func NewSocketClient(logger log.Logger, addr string, mustConnect bool) Client { cli := &socketClient{ reqQueue: make(chan *reqResWithContext, reqQueueSize), - flushTimer: timer.NewThrottleTimer("socketClient", flushThrottleMS), mustConnect: mustConnect, addr: addr, reqSent: list.New(), resCb: nil, } - cli.BaseService = *service.NewBaseService(nil, "socketClient", cli) + cli.BaseService = *service.NewBaseService(logger, "socketClient", cli) return cli } // OnStart implements Service by connecting to the server and spawning reading // and writing goroutines. -func (cli *socketClient) OnStart() error { +func (cli *socketClient) OnStart(ctx context.Context) error { var ( err error conn net.Conn @@ -89,8 +85,8 @@ func (cli *socketClient) OnStart() error { } cli.conn = conn - go cli.sendRequestsRoutine(conn) - go cli.recvResponseRoutine(conn) + go cli.sendRequestsRoutine(ctx, conn) + go cli.recvResponseRoutine(ctx, conn) return nil } @@ -102,14 +98,13 @@ func (cli *socketClient) OnStop() { cli.conn.Close() } - cli.flushQueue() - cli.flushTimer.Stop() + cli.drainQueue() } // Error returns an error if the client was stopped abruptly. func (cli *socketClient) Error() error { - cli.mtx.RLock() - defer cli.mtx.RUnlock() + cli.mtx.Lock() + defer cli.mtx.Unlock() return cli.err } @@ -125,48 +120,43 @@ func (cli *socketClient) SetResponseCallback(resCb Callback) { //---------------------------------------- -func (cli *socketClient) sendRequestsRoutine(conn io.Writer) { - w := bufio.NewWriter(conn) +func (cli *socketClient) sendRequestsRoutine(ctx context.Context, conn io.Writer) { + bw := bufio.NewWriter(conn) for { select { + case <-ctx.Done(): + return + case <-cli.Quit(): + return case reqres := <-cli.reqQueue: - // cli.Logger.Debug("Sent request", "requestType", reflect.TypeOf(reqres.Request), "request", reqres.Request) + if ctx.Err() != nil { + return + } if reqres.C.Err() != nil { cli.Logger.Debug("Request's context is done", "req", reqres.R, "err", reqres.C.Err()) continue } - cli.willSendReq(reqres.R) - err := types.WriteMessage(reqres.R.Request, w) - if err != nil { + + if err := types.WriteMessage(reqres.R.Request, bw); err != nil { cli.stopForError(fmt.Errorf("write to buffer: %w", err)) return } - - // If it's a flush request, flush the current buffer. - if _, ok := reqres.R.Request.Value.(*types.Request_Flush); ok { - err = w.Flush() - if err != nil { - cli.stopForError(fmt.Errorf("flush buffer: %w", err)) - return - } + if err := bw.Flush(); err != nil { + cli.stopForError(fmt.Errorf("flush buffer: %w", err)) + return } - case <-cli.flushTimer.Ch: // flush queue - select { - case cli.reqQueue <- &reqResWithContext{R: NewReqRes(types.ToRequestFlush()), C: context.Background()}: - default: - // Probably will fill the buffer, or retry later. - } - case <-cli.Quit(): - return } } } -func (cli *socketClient) recvResponseRoutine(conn io.Reader) { +func (cli *socketClient) recvResponseRoutine(ctx context.Context, conn io.Reader) { r := bufio.NewReader(conn) for { + if ctx.Err() != nil { + return + } var res = &types.Response{} err := types.ReadMessage(r, res) if err != nil { @@ -492,14 +482,6 @@ func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, s } } - // Maybe auto-flush, or unset auto-flush - switch req.Value.(type) { - case *types.Request_Flush: - cli.flushTimer.Unset() - default: - cli.flushTimer.Set() - } - return reqres, nil } @@ -537,7 +519,9 @@ func queueErr(e error) error { return fmt.Errorf("can't queue req: %w", e) } -func (cli *socketClient) flushQueue() { +// drainQueue marks as complete and discards all remaining pending requests +// from the queue. +func (cli *socketClient) drainQueue() { cli.mtx.Lock() defer cli.mtx.Unlock() @@ -547,14 +531,17 @@ func (cli *socketClient) flushQueue() { reqres.Done() } - // mark all queued messages as resolved -LOOP: + // Mark all queued messages as resolved. + // + // TODO(creachadair): We can't simply range the channel, because it is never + // closed, and the writer continues to add work. + // See https://github.com/tendermint/tendermint/issues/6996. for { select { case reqres := <-cli.reqQueue: reqres.R.Done() default: - break LOOP + return } } } diff --git a/abci/client/socket_client_test.go b/abci/client/socket_client_test.go index d61d729e1..a3469ddd1 100644 --- a/abci/client/socket_client_test.go +++ b/abci/client/socket_client_test.go @@ -1,4 +1,4 @@ -package abcicli_test +package abciclient_test import ( "context" @@ -11,35 +11,28 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/server" "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) -var ctx = context.Background() - func TestProperSyncCalls(t *testing.T) { - app := slowApp{} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - s, c := setupClientServer(t, app) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := c.Stop(); err != nil { - t.Error(err) - } - }) + app := slowApp{} + logger := log.TestingLogger() + + _, c := setupClientServer(ctx, t, logger, app) resp := make(chan error, 1) go func() { // This is BeginBlockSync unrolled.... reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{}) assert.NoError(t, err) - err = c.FlushSync(context.Background()) + err = c.FlushSync(ctx) assert.NoError(t, err) res := reqres.Response.GetBeginBlock() assert.NotNil(t, res) @@ -55,64 +48,29 @@ func TestProperSyncCalls(t *testing.T) { } } -func TestHangingSyncCalls(t *testing.T) { - app := slowApp{} +func setupClientServer( + ctx context.Context, + t *testing.T, + logger log.Logger, + app types.Application, +) (service.Service, abciclient.Client) { + t.Helper() - s, c := setupClientServer(t, app) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Log(err) - } - }) - t.Cleanup(func() { - if err := c.Stop(); err != nil { - t.Log(err) - } - }) - - resp := make(chan error, 1) - go func() { - // Start BeginBlock and flush it - reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{}) - assert.NoError(t, err) - flush, err := c.FlushAsync(ctx) - assert.NoError(t, err) - // wait 20 ms for all events to travel socket, but - // no response yet from server - time.Sleep(20 * time.Millisecond) - // kill the server, so the connections break - err = s.Stop() - assert.NoError(t, err) - - // wait for the response from BeginBlock - reqres.Wait() - flush.Wait() - resp <- c.Error() - }() - - select { - case <-time.After(time.Second): - require.Fail(t, "No response arrived") - case err, ok := <-resp: - require.True(t, ok, "Must not close channel") - assert.Error(t, err, "We should get EOF error") - } -} - -func setupClientServer(t *testing.T, app types.Application) ( - service.Service, abcicli.Client) { // some port between 20k and 30k port := 20000 + rand.Int31()%10000 addr := fmt.Sprintf("localhost:%d", port) - s, err := server.NewServer(addr, "socket", app) - require.NoError(t, err) - err = s.Start() + s, err := server.NewServer(logger, addr, "socket", app) require.NoError(t, err) + require.NoError(t, s.Start(ctx)) + t.Cleanup(s.Wait) - c := abcicli.NewSocketClient(addr, true) - err = c.Start() - require.NoError(t, err) + c := abciclient.NewSocketClient(logger, addr, true) + require.NoError(t, c.Start(ctx)) + t.Cleanup(c.Wait) + + require.True(t, s.IsRunning()) + require.True(t, c.IsRunning()) return s, c } diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index b9af27e22..783c41dbb 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -2,20 +2,20 @@ package main import ( "bufio" - "context" "encoding/hex" "errors" "fmt" "io" "os" + "os/signal" "strings" + "syscall" "github.com/spf13/cobra" "github.com/tendermint/tendermint/libs/log" - tmos "github.com/tendermint/tendermint/libs/os" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/server" @@ -27,10 +27,8 @@ import ( // client is a global variable so it can be reused by the console var ( - client abcicli.Client + client abciclient.Client logger log.Logger - - ctx = context.Background() ) // flags @@ -67,12 +65,12 @@ var RootCmd = &cobra.Command{ if client == nil { var err error - client, err = abcicli.NewClient(flagAddress, flagAbci, false) + client, err = abciclient.NewClient(logger.With("module", "abci-client"), flagAddress, flagAbci, false) if err != nil { return err } - client.SetLogger(logger.With("module", "abci-client")) - if err := client.Start(); err != nil { + + if err := client.Start(cmd.Context()); err != nil { return err } } @@ -292,23 +290,24 @@ func compose(fs []func() error) error { } func cmdTest(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() return compose( []func() error{ - func() error { return servertest.InitChain(client) }, - func() error { return servertest.Commit(client, nil) }, - func() error { return servertest.DeliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil) }, - func() error { return servertest.Commit(client, nil) }, - func() error { return servertest.DeliverTx(client, []byte{0x00}, code.CodeTypeOK, nil) }, - func() error { return servertest.Commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1}) }, - func() error { return servertest.DeliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil) }, - func() error { return servertest.DeliverTx(client, []byte{0x01}, code.CodeTypeOK, nil) }, - func() error { return servertest.DeliverTx(client, []byte{0x00, 0x02}, code.CodeTypeOK, nil) }, - func() error { return servertest.DeliverTx(client, []byte{0x00, 0x03}, code.CodeTypeOK, nil) }, - func() error { return servertest.DeliverTx(client, []byte{0x00, 0x00, 0x04}, code.CodeTypeOK, nil) }, + func() error { return servertest.InitChain(ctx, client) }, + func() error { return servertest.Commit(ctx, client, nil) }, + func() error { return servertest.DeliverTx(ctx, client, []byte("abc"), code.CodeTypeBadNonce, nil) }, + func() error { return servertest.Commit(ctx, client, nil) }, + func() error { return servertest.DeliverTx(ctx, client, []byte{0x00}, code.CodeTypeOK, nil) }, + func() error { return servertest.Commit(ctx, client, []byte{0, 0, 0, 0, 0, 0, 0, 1}) }, + func() error { return servertest.DeliverTx(ctx, client, []byte{0x00}, code.CodeTypeBadNonce, nil) }, + func() error { return servertest.DeliverTx(ctx, client, []byte{0x01}, code.CodeTypeOK, nil) }, + func() error { return servertest.DeliverTx(ctx, client, []byte{0x00, 0x02}, code.CodeTypeOK, nil) }, + func() error { return servertest.DeliverTx(ctx, client, []byte{0x00, 0x03}, code.CodeTypeOK, nil) }, + func() error { return servertest.DeliverTx(ctx, client, []byte{0x00, 0x00, 0x04}, code.CodeTypeOK, nil) }, func() error { - return servertest.DeliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil) + return servertest.DeliverTx(ctx, client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil) }, - func() error { return servertest.Commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) }, + func() error { return servertest.Commit(ctx, client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) }, }) } @@ -443,13 +442,15 @@ func cmdEcho(cmd *cobra.Command, args []string) error { if len(args) > 0 { msg = args[0] } - res, err := client.EchoSync(ctx, msg) + res, err := client.EchoSync(cmd.Context(), msg) if err != nil { return err } + printResponse(cmd, args, response{ Data: []byte(res.Message), }) + return nil } @@ -459,7 +460,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error { if len(args) == 1 { version = args[0] } - res, err := client.InfoSync(ctx, types.RequestInfo{Version: version}) + res, err := client.InfoSync(cmd.Context(), types.RequestInfo{Version: version}) if err != nil { return err } @@ -484,7 +485,7 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error { if err != nil { return err } - res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes}) + res, err := client.DeliverTxSync(cmd.Context(), types.RequestDeliverTx{Tx: txBytes}) if err != nil { return err } @@ -510,7 +511,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error { if err != nil { return err } - res, err := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes}) + res, err := client.CheckTxSync(cmd.Context(), types.RequestCheckTx{Tx: txBytes}) if err != nil { return err } @@ -525,7 +526,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error { // Get application Merkle root hash func cmdCommit(cmd *cobra.Command, args []string) error { - res, err := client.CommitSync(ctx) + res, err := client.CommitSync(cmd.Context()) if err != nil { return err } @@ -550,7 +551,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error { return err } - resQuery, err := client.QuerySync(ctx, types.RequestQuery{ + resQuery, err := client.QuerySync(cmd.Context(), types.RequestQuery{ Data: queryBytes, Path: flagPath, Height: int64(flagHeight), @@ -586,25 +587,21 @@ func cmdKVStore(cmd *cobra.Command, args []string) error { } // Start the listener - srv, err := server.NewServer(flagAddress, flagAbci, app) + srv, err := server.NewServer(logger.With("module", "abci-server"), flagAddress, flagAbci, app) if err != nil { return err } - srv.SetLogger(logger.With("module", "abci-server")) - if err := srv.Start(); err != nil { + + ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM) + defer cancel() + + if err := srv.Start(ctx); err != nil { return err } - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(logger, func() { - // Cleanup - if err := srv.Stop(); err != nil { - logger.Error("Error while stopping server", "err", err) - } - }) - // Run forever. - select {} + <-ctx.Done() + return nil } //-------------------------------------------------------------------------------- diff --git a/abci/example/example_test.go b/abci/example/example_test.go index fdfc5515e..80d5a3130 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -17,7 +17,7 @@ import ( "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/example/kvstore" abciserver "github.com/tendermint/tendermint/abci/server" @@ -29,47 +29,48 @@ func init() { } func TestKVStore(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fmt.Println("### Testing KVStore") - testStream(t, kvstore.NewApplication()) + testStream(ctx, t, kvstore.NewApplication()) } func TestBaseApp(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() fmt.Println("### Testing BaseApp") - testStream(t, types.NewBaseApplication()) + testStream(ctx, t, types.NewBaseApplication()) } func TestGRPC(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fmt.Println("### Testing GRPC") - testGRPCSync(t, types.NewGRPCApplication(types.NewBaseApplication())) + testGRPCSync(ctx, t, types.NewGRPCApplication(types.NewBaseApplication())) } -func testStream(t *testing.T, app types.Application) { +func testStream(ctx context.Context, t *testing.T, app types.Application) { + t.Helper() + const numDeliverTxs = 20000 socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30)) defer os.Remove(socketFile) socket := fmt.Sprintf("unix://%v", socketFile) - + logger := log.TestingLogger() // Start the listener - server := abciserver.NewSocketServer(socket, app) - server.SetLogger(log.TestingLogger().With("module", "abci-server")) - err := server.Start() + server := abciserver.NewSocketServer(logger.With("module", "abci-server"), socket, app) + t.Cleanup(server.Wait) + err := server.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := server.Stop(); err != nil { - t.Error(err) - } - }) // Connect to the socket - client := abcicli.NewSocketClient(socket, false) - client.SetLogger(log.TestingLogger().With("module", "abci-client")) - err = client.Start() + client := abciclient.NewSocketClient(log.TestingLogger().With("module", "abci-client"), socket, false) + t.Cleanup(client.Wait) + + err = client.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := client.Stop(); err != nil { - t.Error(err) - } - }) done := make(chan struct{}) counter := 0 @@ -98,8 +99,6 @@ func testStream(t *testing.T, app types.Application) { } }) - ctx := context.Background() - // Write requests for counter := 0; counter < numDeliverTxs; counter++ { // Send request @@ -127,24 +126,20 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { return tmnet.Connect(addr) } -func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) { +func testGRPCSync(ctx context.Context, t *testing.T, app types.ABCIApplicationServer) { numDeliverTxs := 2000 - socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30)) + socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30)) defer os.Remove(socketFile) socket := fmt.Sprintf("unix://%v", socketFile) - + logger := log.TestingLogger() // Start the listener - server := abciserver.NewGRPCServer(socket, app) - server.SetLogger(log.TestingLogger().With("module", "abci-server")) - if err := server.Start(); err != nil { + server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, app) + + if err := server.Start(ctx); err != nil { t.Fatalf("Error starting GRPC server: %v", err.Error()) } - t.Cleanup(func() { - if err := server.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { server.Wait() }) // Connect to the socket conn, err := grpc.Dial(socket, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index a52312a00..664e628b0 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -3,7 +3,7 @@ package kvstore import ( "context" "fmt" - "io/ioutil" + "os" "sort" "testing" @@ -12,7 +12,7 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/code" abciserver "github.com/tendermint/tendermint/abci/server" "github.com/tendermint/tendermint/abci/types" @@ -24,8 +24,6 @@ const ( testValue = "def" ) -var ctx = context.Background() - func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) { req := types.RequestDeliverTx{Tx: tx} ar := app.DeliverTx(req) @@ -74,7 +72,7 @@ func TestKVStoreKV(t *testing.T) { } func TestPersistentKVStoreKV(t *testing.T) { - dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO + dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO if err != nil { t.Fatal(err) } @@ -90,7 +88,7 @@ func TestPersistentKVStoreKV(t *testing.T) { } func TestPersistentKVStoreInfo(t *testing.T) { - dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO + dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO if err != nil { t.Fatal(err) } @@ -122,7 +120,7 @@ func TestPersistentKVStoreInfo(t *testing.T) { // add a validator, remove a validator, update a validator func TestValUpdates(t *testing.T) { - dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO + dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO if err != nil { t.Fatal(err) } @@ -229,103 +227,103 @@ func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) { } } -func makeSocketClientServer(app types.Application, name string) (abcicli.Client, service.Service, error) { +func makeSocketClientServer( + ctx context.Context, + t *testing.T, + logger log.Logger, + app types.Application, + name string, +) (abciclient.Client, service.Service, error) { + + ctx, cancel := context.WithCancel(ctx) + t.Cleanup(cancel) + // Start the listener socket := fmt.Sprintf("unix://%s.sock", name) - logger := log.TestingLogger() - server := abciserver.NewSocketServer(socket, app) - server.SetLogger(logger.With("module", "abci-server")) - if err := server.Start(); err != nil { + server := abciserver.NewSocketServer(logger.With("module", "abci-server"), socket, app) + if err := server.Start(ctx); err != nil { + cancel() return nil, nil, err } // Connect to the socket - client := abcicli.NewSocketClient(socket, false) - client.SetLogger(logger.With("module", "abci-client")) - if err := client.Start(); err != nil { - if err = server.Stop(); err != nil { - return nil, nil, err - } + client := abciclient.NewSocketClient(logger.With("module", "abci-client"), socket, false) + if err := client.Start(ctx); err != nil { + cancel() return nil, nil, err } return client, server, nil } -func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, service.Service, error) { +func makeGRPCClientServer( + ctx context.Context, + t *testing.T, + logger log.Logger, + app types.Application, + name string, +) (abciclient.Client, service.Service, error) { + ctx, cancel := context.WithCancel(ctx) + t.Cleanup(cancel) // Start the listener socket := fmt.Sprintf("unix://%s.sock", name) - logger := log.TestingLogger() gapp := types.NewGRPCApplication(app) - server := abciserver.NewGRPCServer(socket, gapp) - server.SetLogger(logger.With("module", "abci-server")) - if err := server.Start(); err != nil { + server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, gapp) + + if err := server.Start(ctx); err != nil { + cancel() return nil, nil, err } - client := abcicli.NewGRPCClient(socket, true) - client.SetLogger(logger.With("module", "abci-client")) - if err := client.Start(); err != nil { - if err := server.Stop(); err != nil { - return nil, nil, err - } + client := abciclient.NewGRPCClient(logger.With("module", "abci-client"), socket, true) + + if err := client.Start(ctx); err != nil { + cancel() return nil, nil, err } return client, server, nil } func TestClientServer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.TestingLogger() + // set up socket app kvstore := NewApplication() - client, server, err := makeSocketClientServer(kvstore, "kvstore-socket") + client, server, err := makeSocketClientServer(ctx, t, logger, kvstore, "kvstore-socket") require.NoError(t, err) - t.Cleanup(func() { - if err := server.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := client.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); server.Wait() }) + t.Cleanup(func() { cancel(); client.Wait() }) - runClientTests(t, client) + runClientTests(ctx, t, client) // set up grpc app kvstore = NewApplication() - gclient, gserver, err := makeGRPCClientServer(kvstore, "kvstore-grpc") + gclient, gserver, err := makeGRPCClientServer(ctx, t, logger, kvstore, "/tmp/kvstore-grpc") require.NoError(t, err) - t.Cleanup(func() { - if err := gserver.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := gclient.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); gserver.Wait() }) + t.Cleanup(func() { cancel(); gclient.Wait() }) - runClientTests(t, gclient) + runClientTests(ctx, t, gclient) } -func runClientTests(t *testing.T, client abcicli.Client) { +func runClientTests(ctx context.Context, t *testing.T, client abciclient.Client) { // run some tests.... key := testKey value := key tx := []byte(key) - testClient(t, client, tx, key, value) + testClient(ctx, t, client, tx, key, value) value = testValue tx = []byte(key + "=" + value) - testClient(t, client, tx, key, value) + testClient(ctx, t, client, tx, key, value) } -func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) { +func testClient(ctx context.Context, t *testing.T, app abciclient.Client, tx []byte, key, value string) { ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx}) require.NoError(t, err) require.False(t, ar.IsErr(), ar) diff --git a/abci/example/kvstore/persistent_kvstore.go b/abci/example/kvstore/persistent_kvstore.go index 0fcfcadf7..40451baa9 100644 --- a/abci/example/kvstore/persistent_kvstore.go +++ b/abci/example/kvstore/persistent_kvstore.go @@ -11,9 +11,9 @@ import ( "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/libs/log" - pc "github.com/tendermint/tendermint/proto/tendermint/crypto" + cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" ) const ( @@ -30,7 +30,7 @@ type PersistentKVStoreApplication struct { // validator set ValUpdates []types.ValidatorUpdate - valAddrToPubKeyMap map[string]pc.PublicKey + valAddrToPubKeyMap map[string]cryptoproto.PublicKey logger log.Logger } @@ -46,7 +46,7 @@ func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication return &PersistentKVStoreApplication{ app: &Application{state: state}, - valAddrToPubKeyMap: make(map[string]pc.PublicKey), + valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey), logger: log.NewNopLogger(), } } @@ -194,8 +194,8 @@ func (app *PersistentKVStoreApplication) Validators() (validators []types.Valida return } -func MakeValSetChangeTx(pubkey pc.PublicKey, power int64) []byte { - pk, err := cryptoenc.PubKeyFromProto(pubkey) +func MakeValSetChangeTx(pubkey cryptoproto.PublicKey, power int64) []byte { + pk, err := encoding.PubKeyFromProto(pubkey) if err != nil { panic(err) } @@ -243,7 +243,7 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon // add, update, or remove a validator func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx { - pubkey, err := cryptoenc.PubKeyFromProto(v.PubKey) + pubkey, err := encoding.PubKeyFromProto(v.PubKey) if err != nil { panic(fmt.Errorf("can't decode public key: %w", err)) } diff --git a/abci/server/grpc_server.go b/abci/server/grpc_server.go index 503f0b64f..78da22cdb 100644 --- a/abci/server/grpc_server.go +++ b/abci/server/grpc_server.go @@ -1,11 +1,13 @@ package server import ( + "context" "net" "google.golang.org/grpc" "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" ) @@ -22,7 +24,7 @@ type GRPCServer struct { } // NewGRPCServer returns a new gRPC ABCI server -func NewGRPCServer(protoAddr string, app types.ABCIApplicationServer) service.Service { +func NewGRPCServer(logger log.Logger, protoAddr string, app types.ABCIApplicationServer) service.Service { proto, addr := tmnet.ProtocolAndAddress(protoAddr) s := &GRPCServer{ proto: proto, @@ -30,12 +32,12 @@ func NewGRPCServer(protoAddr string, app types.ABCIApplicationServer) service.Se listener: nil, app: app, } - s.BaseService = *service.NewBaseService(nil, "ABCIServer", s) + s.BaseService = *service.NewBaseService(logger, "ABCIServer", s) return s } // OnStart starts the gRPC service. -func (s *GRPCServer) OnStart() error { +func (s *GRPCServer) OnStart(ctx context.Context) error { ln, err := net.Listen(s.proto, s.addr) if err != nil { diff --git a/abci/server/server.go b/abci/server/server.go index 6dd13ad02..2a6d50fd2 100644 --- a/abci/server/server.go +++ b/abci/server/server.go @@ -12,17 +12,18 @@ import ( "fmt" "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) -func NewServer(protoAddr, transport string, app types.Application) (service.Service, error) { +func NewServer(logger log.Logger, protoAddr, transport string, app types.Application) (service.Service, error) { var s service.Service var err error switch transport { case "socket": - s = NewSocketServer(protoAddr, app) + s = NewSocketServer(logger, protoAddr, app) case "grpc": - s = NewGRPCServer(protoAddr, types.NewGRPCApplication(app)) + s = NewGRPCServer(logger, protoAddr, types.NewGRPCApplication(app)) default: err = fmt.Errorf("unknown server type %s", transport) } diff --git a/abci/server/socket_server.go b/abci/server/socket_server.go index 543b444b1..29d912671 100644 --- a/abci/server/socket_server.go +++ b/abci/server/socket_server.go @@ -2,10 +2,10 @@ package server import ( "bufio" + "context" "fmt" "io" "net" - "os" "runtime" "github.com/tendermint/tendermint/abci/types" @@ -19,7 +19,6 @@ import ( type SocketServer struct { service.BaseService - isLoggerSet bool proto string addr string @@ -33,7 +32,7 @@ type SocketServer struct { app types.Application } -func NewSocketServer(protoAddr string, app types.Application) service.Service { +func NewSocketServer(logger tmlog.Logger, protoAddr string, app types.Application) service.Service { proto, addr := tmnet.ProtocolAndAddress(protoAddr) s := &SocketServer{ proto: proto, @@ -42,23 +41,18 @@ func NewSocketServer(protoAddr string, app types.Application) service.Service { app: app, conns: make(map[int]net.Conn), } - s.BaseService = *service.NewBaseService(nil, "ABCIServer", s) + s.BaseService = *service.NewBaseService(logger, "ABCIServer", s) return s } -func (s *SocketServer) SetLogger(l tmlog.Logger) { - s.BaseService.SetLogger(l) - s.isLoggerSet = true -} - -func (s *SocketServer) OnStart() error { +func (s *SocketServer) OnStart(ctx context.Context) error { ln, err := net.Listen(s.proto, s.addr) if err != nil { return err } s.listener = ln - go s.acceptConnectionsRoutine() + go s.acceptConnectionsRoutine(ctx) return nil } @@ -70,6 +64,7 @@ func (s *SocketServer) OnStop() { s.connsMtx.Lock() defer s.connsMtx.Unlock() + for id, conn := range s.conns { delete(s.conns, id) if err := conn.Close(); err != nil { @@ -103,8 +98,13 @@ func (s *SocketServer) rmConn(connID int) error { return conn.Close() } -func (s *SocketServer) acceptConnectionsRoutine() { +func (s *SocketServer) acceptConnectionsRoutine(ctx context.Context) { for { + if ctx.Err() != nil { + return + + } + // Accept a connection s.Logger.Info("Waiting for new connection...") conn, err := s.listener.Accept() @@ -124,35 +124,46 @@ func (s *SocketServer) acceptConnectionsRoutine() { responses := make(chan *types.Response, 1000) // A channel to buffer responses // Read requests from conn and deal with them - go s.handleRequests(closeConn, conn, responses) + go s.handleRequests(ctx, closeConn, conn, responses) // Pull responses from 'responses' and write them to conn. - go s.handleResponses(closeConn, conn, responses) + go s.handleResponses(ctx, closeConn, conn, responses) // Wait until signal to close connection - go s.waitForClose(closeConn, connID) + go s.waitForClose(ctx, closeConn, connID) } } -func (s *SocketServer) waitForClose(closeConn chan error, connID int) { - err := <-closeConn - switch { - case err == io.EOF: - s.Logger.Error("Connection was closed by client") - case err != nil: - s.Logger.Error("Connection error", "err", err) - default: - // never happens - s.Logger.Error("Connection was closed") - } +func (s *SocketServer) waitForClose(ctx context.Context, closeConn chan error, connID int) { + defer func() { + // Close the connection + if err := s.rmConn(connID); err != nil { + s.Logger.Error("Error closing connection", "err", err) + } + }() - // Close the connection - if err := s.rmConn(connID); err != nil { - s.Logger.Error("Error closing connection", "err", err) + select { + case <-ctx.Done(): + return + case err := <-closeConn: + switch { + case err == io.EOF: + s.Logger.Error("Connection was closed by client") + case err != nil: + s.Logger.Error("Connection error", "err", err) + default: + // never happens + s.Logger.Error("Connection was closed") + } } } // Read requests from conn and deal with them -func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, responses chan<- *types.Response) { +func (s *SocketServer) handleRequests( + ctx context.Context, + closeConn chan error, + conn io.Reader, + responses chan<- *types.Response, +) { var count int var bufReader = bufio.NewReader(conn) @@ -164,15 +175,15 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, resp buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] err := fmt.Errorf("recovered from panic: %v\n%s", r, buf) - if !s.isLoggerSet { - fmt.Fprintln(os.Stderr, err) - } closeConn <- err s.appMtx.Unlock() } }() for { + if ctx.Err() != nil { + return + } var req = &types.Request{} err := types.ReadMessage(bufReader, req) @@ -239,23 +250,21 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types } // Pull responses from 'responses' and write them to conn. -func (s *SocketServer) handleResponses(closeConn chan error, conn io.Writer, responses <-chan *types.Response) { - var count int - var bufWriter = bufio.NewWriter(conn) - for { - var res = <-responses - err := types.WriteMessage(res, bufWriter) - if err != nil { +func (s *SocketServer) handleResponses( + ctx context.Context, + closeConn chan error, + conn io.Writer, + responses <-chan *types.Response, +) { + bw := bufio.NewWriter(conn) + for res := range responses { + if err := types.WriteMessage(res, bw); err != nil { closeConn <- fmt.Errorf("error writing message: %w", err) return } - if _, ok := res.Value.(*types.Response_Flush); ok { - err = bufWriter.Flush() - if err != nil { - closeConn <- fmt.Errorf("error flushing write buffer: %w", err) - return - } + if err := bw.Flush(); err != nil { + closeConn <- fmt.Errorf("error flushing write buffer: %w", err) + return } - count++ } } diff --git a/abci/tests/client_server_test.go b/abci/tests/client_server_test.go index 2ef64e66a..2dfa68c63 100644 --- a/abci/tests/client_server_test.go +++ b/abci/tests/client_server_test.go @@ -1,27 +1,35 @@ package tests import ( + "context" "testing" "github.com/stretchr/testify/assert" - abciclient "github.com/tendermint/tendermint/abci/client" + abciclientent "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abciserver "github.com/tendermint/tendermint/abci/server" + "github.com/tendermint/tendermint/libs/log" ) func TestClientServerNoAddrPrefix(t *testing.T) { - addr := "localhost:26658" - transport := "socket" - app := kvstore.NewApplication() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - server, err := abciserver.NewServer(addr, transport, app) + const ( + addr = "localhost:26658" + transport = "socket" + ) + app := kvstore.NewApplication() + logger := log.TestingLogger() + + server, err := abciserver.NewServer(logger, addr, transport, app) assert.NoError(t, err, "expected no error on NewServer") - err = server.Start() + err = server.Start(ctx) assert.NoError(t, err, "expected no error on server.Start") - client, err := abciclient.NewClient(addr, transport, true) + client, err := abciclientent.NewClient(logger, addr, transport, true) assert.NoError(t, err, "expected no error on NewClient") - err = client.Start() + err = client.Start(ctx) assert.NoError(t, err, "expected no error on client.Start") } diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index 10d4a3e58..5062083f0 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -7,14 +7,12 @@ import ( "fmt" mrand "math/rand" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/types" tmrand "github.com/tendermint/tendermint/libs/rand" ) -var ctx = context.Background() - -func InitChain(client abcicli.Client) error { +func InitChain(ctx context.Context, client abciclient.Client) error { total := 10 vals := make([]types.ValidatorUpdate, total) for i := 0; i < total; i++ { @@ -34,7 +32,7 @@ func InitChain(client abcicli.Client) error { return nil } -func Commit(client abcicli.Client, hashExp []byte) error { +func Commit(ctx context.Context, client abciclient.Client, hashExp []byte) error { res, err := client.CommitSync(ctx) data := res.Data if err != nil { @@ -51,7 +49,7 @@ func Commit(client abcicli.Client, hashExp []byte) error { return nil } -func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { +func DeliverTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes}) code, data, log := res.Code, res.Data, res.Log if code != codeExp { @@ -70,7 +68,7 @@ func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp [] return nil } -func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { +func CheckTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { res, _ := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes}) code, data, log := res.Code, res.Data, res.Log if code != codeExp { diff --git a/abci/types/client.go b/abci/types/client.go new file mode 100644 index 000000000..ab1254f4c --- /dev/null +++ b/abci/types/client.go @@ -0,0 +1 @@ +package types diff --git a/abci/types/pubkey.go b/abci/types/pubkey.go index a0f746992..c188fc8f5 100644 --- a/abci/types/pubkey.go +++ b/abci/types/pubkey.go @@ -4,7 +4,7 @@ import ( fmt "fmt" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/crypto/secp256k1" "github.com/tendermint/tendermint/crypto/sr25519" ) @@ -12,7 +12,7 @@ import ( func Ed25519ValidatorUpdate(pk []byte, power int64) ValidatorUpdate { pke := ed25519.PubKey(pk) - pkp, err := cryptoenc.PubKeyToProto(pke) + pkp, err := encoding.PubKeyToProto(pke) if err != nil { panic(err) } @@ -29,7 +29,7 @@ func UpdateValidator(pk []byte, power int64, keyType string) ValidatorUpdate { return Ed25519ValidatorUpdate(pk, power) case secp256k1.KeyType: pke := secp256k1.PubKey(pk) - pkp, err := cryptoenc.PubKeyToProto(pke) + pkp, err := encoding.PubKeyToProto(pke) if err != nil { panic(err) } @@ -39,7 +39,7 @@ func UpdateValidator(pk []byte, power int64, keyType string) ValidatorUpdate { } case sr25519.KeyType: pke := sr25519.PubKey(pk) - pkp, err := cryptoenc.PubKeyToProto(pke) + pkp, err := encoding.PubKeyToProto(pke) if err != nil { panic(err) } diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 6b00c587a..6e4b53a1d 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -1838,7 +1838,7 @@ type ResponseCheckTx struct { Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"` Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"` // mempool_error is set by Tendermint. - // ABCI applictions creating a ResponseCheckTX should not set mempool_error. + // ABCI applications creating a ResponseCheckTX should not set mempool_error. MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"` } diff --git a/buf.gen.yaml b/buf.gen.yaml deleted file mode 100644 index dc56781dd..000000000 --- a/buf.gen.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# The version of the generation template. -# Required. -# The only currently-valid value is v1beta1. -version: v1beta1 - -# The plugins to run. -plugins: - # The name of the plugin. - - name: gogofaster - # The the relative output directory. - out: proto - # Any options to provide to the plugin. - opt: Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,plugins=grpc,paths=source_relative diff --git a/buf.yaml b/buf.yaml deleted file mode 100644 index cc4aced57..000000000 --- a/buf.yaml +++ /dev/null @@ -1,16 +0,0 @@ -version: v1beta1 - -build: - roots: - - proto - - third_party/proto -lint: - use: - - BASIC - - FILE_LOWER_SNAKE_CASE - - UNARY_RPC - ignore: - - gogoproto -breaking: - use: - - FILE diff --git a/cmd/priv_val_server/main.go b/cmd/priv_val_server/main.go index 203b3df0d..cda123d7f 100644 --- a/cmd/priv_val_server/main.go +++ b/cmd/priv_val_server/main.go @@ -6,7 +6,6 @@ import ( "crypto/x509" "flag" "fmt" - "io/ioutil" "net" "net/http" "os" @@ -78,7 +77,7 @@ func main() { } certPool := x509.NewCertPool() - bs, err := ioutil.ReadFile(*rootCA) + bs, err := os.ReadFile(*rootCA) if err != nil { fmt.Fprintf(os.Stderr, "failed to read client ca cert: %s", err) os.Exit(1) diff --git a/cmd/tendermint/commands/debug/dump.go b/cmd/tendermint/commands/debug/dump.go index f99975a75..9d67cfe84 100644 --- a/cmd/tendermint/commands/debug/dump.go +++ b/cmd/tendermint/commands/debug/dump.go @@ -3,7 +3,6 @@ package debug import ( "errors" "fmt" - "io/ioutil" "os" "path/filepath" "time" @@ -11,7 +10,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/cli" rpchttp "github.com/tendermint/tendermint/rpc/client/http" ) @@ -65,9 +64,9 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error { } home := viper.GetString(cli.HomeFlag) - conf := cfg.DefaultConfig() + conf := config.DefaultConfig() conf = conf.SetRoot(home) - cfg.EnsureRoot(conf.RootDir) + config.EnsureRoot(conf.RootDir) dumpDebugData(outDir, conf, rpc) @@ -79,10 +78,10 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error { return nil } -func dumpDebugData(outDir string, conf *cfg.Config, rpc *rpchttp.HTTP) { +func dumpDebugData(outDir string, conf *config.Config, rpc *rpchttp.HTTP) { start := time.Now().UTC() - tmpDir, err := ioutil.TempDir(outDir, "tendermint_debug_tmp") + tmpDir, err := os.MkdirTemp(outDir, "tendermint_debug_tmp") if err != nil { logger.Error("failed to create temporary directory", "dir", tmpDir, "error", err) return diff --git a/cmd/tendermint/commands/debug/io.go b/cmd/tendermint/commands/debug/io.go index dcfff50c8..bf904cf5c 100644 --- a/cmd/tendermint/commands/debug/io.go +++ b/cmd/tendermint/commands/debug/io.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -111,5 +110,5 @@ func writeStateJSONToFile(state interface{}, dir, filename string) error { return fmt.Errorf("failed to encode state dump: %w", err) } - return ioutil.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm) + return os.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm) } diff --git a/cmd/tendermint/commands/debug/kill.go b/cmd/tendermint/commands/debug/kill.go index bef534152..fbb2dcebe 100644 --- a/cmd/tendermint/commands/debug/kill.go +++ b/cmd/tendermint/commands/debug/kill.go @@ -3,7 +3,6 @@ package debug import ( "errors" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -14,7 +13,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/cli" rpchttp "github.com/tendermint/tendermint/rpc/client/http" ) @@ -34,7 +33,7 @@ $ tendermint debug kill 34255 /path/to/tm-debug.zip`, } func killCmdHandler(cmd *cobra.Command, args []string) error { - pid, err := strconv.ParseUint(args[0], 10, 64) + pid, err := strconv.ParseInt(args[0], 10, 64) if err != nil { return err } @@ -50,13 +49,13 @@ func killCmdHandler(cmd *cobra.Command, args []string) error { } home := viper.GetString(cli.HomeFlag) - conf := cfg.DefaultConfig() + conf := config.DefaultConfig() conf = conf.SetRoot(home) - cfg.EnsureRoot(conf.RootDir) + config.EnsureRoot(conf.RootDir) // Create a temporary directory which will contain all the state dumps and // relevant files and directories that will be compressed into a file. - tmpDir, err := ioutil.TempDir(os.TempDir(), "tendermint_debug_tmp") + tmpDir, err := os.MkdirTemp(os.TempDir(), "tendermint_debug_tmp") if err != nil { return fmt.Errorf("failed to create temporary directory: %w", err) } @@ -92,7 +91,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error { } logger.Info("killing Tendermint process") - if err := killProc(pid, tmpDir); err != nil { + if err := killProc(int(pid), tmpDir); err != nil { return err } @@ -105,7 +104,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error { // is tailed and piped to a file under the directory dir. An error is returned // if the output file cannot be created or the tail command cannot be started. // An error is not returned if any subsequent syscall fails. -func killProc(pid uint64, dir string) error { +func killProc(pid int, dir string) error { // pipe STDERR output from tailing the Tendermint process to a file // // NOTE: This will only work on UNIX systems. @@ -128,7 +127,7 @@ func killProc(pid uint64, dir string) error { go func() { // Killing the Tendermint process with the '-ABRT|-6' signal will result in // a goroutine stacktrace. - p, err := os.FindProcess(int(pid)) + p, err := os.FindProcess(pid) if err != nil { fmt.Fprintf(os.Stderr, "failed to find PID to kill Tendermint process: %s", err) } else if err = p.Signal(syscall.SIGABRT); err != nil { diff --git a/cmd/tendermint/commands/debug/util.go b/cmd/tendermint/commands/debug/util.go index 226bfadc7..6fa48df1d 100644 --- a/cmd/tendermint/commands/debug/util.go +++ b/cmd/tendermint/commands/debug/util.go @@ -3,13 +3,13 @@ package debug import ( "context" "fmt" - "io/ioutil" + "io" "net/http" "os" "path" "path/filepath" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" rpchttp "github.com/tendermint/tendermint/rpc/client/http" ) @@ -48,7 +48,7 @@ func dumpConsensusState(rpc *rpchttp.HTTP, dir, filename string) error { // copyWAL copies the Tendermint node's WAL file. It returns an error if the // WAL file cannot be read or copied. -func copyWAL(conf *cfg.Config, dir string) error { +func copyWAL(conf *config.Config, dir string) error { walPath := conf.Consensus.WalFile() walFile := filepath.Base(walPath) @@ -73,10 +73,10 @@ func dumpProfile(dir, addr, profile string, debug int) error { } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("failed to read %s profile response body: %w", profile, err) } - return ioutil.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm) + return os.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm) } diff --git a/cmd/tendermint/commands/init.go b/cmd/tendermint/commands/init.go index bc94f763b..02e400a0a 100644 --- a/cmd/tendermint/commands/init.go +++ b/cmd/tendermint/commands/init.go @@ -121,7 +121,9 @@ func initFilesWithConfig(config *cfg.Config) error { } // write config file - cfg.WriteConfigFile(config.RootDir, config) + if err := cfg.WriteConfigFile(config.RootDir, config); err != nil { + return err + } logger.Info("Generated config", "mode", config.Mode) return nil diff --git a/cmd/tendermint/commands/inspect.go b/cmd/tendermint/commands/inspect.go index de31d33d4..3cd6ef572 100644 --- a/cmd/tendermint/commands/inspect.go +++ b/cmd/tendermint/commands/inspect.go @@ -8,12 +8,7 @@ import ( "github.com/spf13/cobra" - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/inspect" - "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/indexer/sink" - "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/internal/inspect" ) // InspectCmd is the command for starting an inspect server. @@ -55,29 +50,10 @@ func runInspect(cmd *cobra.Command, args []string) error { cancel() }() - blockStoreDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "blockstore", Config: config}) + ins, err := inspect.NewFromConfig(logger, config) if err != nil { return err } - blockStore := store.NewBlockStore(blockStoreDB) - stateDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "state", Config: config}) - if err != nil { - if err := blockStoreDB.Close(); err != nil { - logger.Error("error closing block store db", "error", err) - } - return err - } - genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) - if err != nil { - return err - } - sinks, err := sink.EventSinksFromConfig(config, cfg.DefaultDBProvider, genDoc.ChainID) - if err != nil { - return err - } - stateStore := state.NewStore(stateDB) - - ins := inspect.New(config.RPC, blockStore, stateStore, sinks, logger) logger.Info("starting inspect server") if err := ins.Run(ctx); err != nil { diff --git a/cmd/tendermint/commands/light.go b/cmd/tendermint/commands/light.go index 5e7446e51..0e1894ccf 100644 --- a/cmd/tendermint/commands/light.go +++ b/cmd/tendermint/commands/light.go @@ -6,12 +6,13 @@ import ( "fmt" "net/http" "os" + "os/signal" "path/filepath" "strings" + "syscall" "time" "github.com/spf13/cobra" - dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/libs/log" @@ -192,8 +193,12 @@ func runProxy(cmd *cobra.Command, args []string) error { p.Listener.Close() }) + // this might be redundant to the above, eventually. + ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM) + defer cancel() + logger.Info("Starting proxy...", "laddr", listenAddr) - if err := p.ListenAndServe(); err != http.ErrServerClosed { + if err := p.ListenAndServe(ctx); err != http.ErrServerClosed { // Error starting or closing listener: logger.Error("proxy ListenAndServe", "err", err) } diff --git a/cmd/tendermint/commands/reindex_event.go b/cmd/tendermint/commands/reindex_event.go index 1dbce2f74..58f11657b 100644 --- a/cmd/tendermint/commands/reindex_event.go +++ b/cmd/tendermint/commands/reindex_event.go @@ -6,17 +6,17 @@ import ( "strings" "github.com/spf13/cobra" - tmdb "github.com/tendermint/tm-db" + dbm "github.com/tendermint/tm-db" abcitypes "github.com/tendermint/tendermint/abci/types" tmcfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/progressbar" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/state/indexer/sink/kv" - "github.com/tendermint/tendermint/state/indexer/sink/psql" - "github.com/tendermint/tendermint/store" + "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer/sink/kv" + "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" + "github.com/tendermint/tendermint/internal/store" + "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) @@ -29,11 +29,12 @@ var ReIndexEventCmd = &cobra.Command{ Use: "reindex-event", Short: "reindex events to the event store backends", Long: ` - reindex-event is an offline tooling to re-index block and tx events to the eventsinks, - you can run this command when the event store backend dropped/disconnected or you want to replace the backend. - The default start-height is 0, meaning the tooling will start reindex from the base block height(inclusive); and the - default end-height is 0, meaning the tooling will reindex until the latest block height(inclusive). User can omits - either or both arguments. +reindex-event is an offline tooling to re-index block and tx events to the eventsinks, +you can run this command when the event store backend dropped/disconnected or you want to +replace the backend. The default start-height is 0, meaning the tooling will start +reindex from the base block height(inclusive); and the default end-height is 0, meaning +the tooling will reindex until the latest block height(inclusive). User can omit +either or both arguments. `, Example: ` tendermint reindex-event @@ -129,17 +130,17 @@ func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) { } func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, error) { - dbType := tmdb.BackendType(cfg.DBBackend) + dbType := dbm.BackendType(cfg.DBBackend) // Get BlockStore - blockStoreDB, err := tmdb.NewDB("blockstore", dbType, cfg.DBDir()) + blockStoreDB, err := dbm.NewDB("blockstore", dbType, cfg.DBDir()) if err != nil { return nil, nil, err } blockStore := store.NewBlockStore(blockStoreDB) // Get StateStore - stateDB, err := tmdb.NewDB("state", dbType, cfg.DBDir()) + stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir()) if err != nil { return nil, nil, err } @@ -221,14 +222,15 @@ func checkValidHeight(bs state.BlockStore) error { } if startHeight < base { - return fmt.Errorf("%s (requested start height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, startHeight, base) + return fmt.Errorf("%s (requested start height: %d, base height: %d)", + coretypes.ErrHeightNotAvailable, startHeight, base) } height := bs.Height() if startHeight > height { return fmt.Errorf( - "%s (requested start height: %d, store height: %d)", ctypes.ErrHeightNotAvailable, startHeight, height) + "%s (requested start height: %d, store height: %d)", coretypes.ErrHeightNotAvailable, startHeight, height) } if endHeight == 0 || endHeight > height { @@ -238,13 +240,13 @@ func checkValidHeight(bs state.BlockStore) error { if endHeight < base { return fmt.Errorf( - "%s (requested end height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, endHeight, base) + "%s (requested end height: %d, base height: %d)", coretypes.ErrHeightNotAvailable, endHeight, base) } if endHeight < startHeight { return fmt.Errorf( "%s (requested the end height: %d is less than the start height: %d)", - ctypes.ErrInvalidRequest, startHeight, endHeight) + coretypes.ErrInvalidRequest, startHeight, endHeight) } return nil diff --git a/cmd/tendermint/commands/reindex_event_test.go b/cmd/tendermint/commands/reindex_event_test.go index 5d9459f5a..452a6b2a8 100644 --- a/cmd/tendermint/commands/reindex_event_test.go +++ b/cmd/tendermint/commands/reindex_event_test.go @@ -11,9 +11,9 @@ import ( abcitypes "github.com/tendermint/tendermint/abci/types" tmcfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/state/mocks" prototmstate "github.com/tendermint/tendermint/proto/tendermint/state" - "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/state/mocks" "github.com/tendermint/tendermint/types" ) diff --git a/cmd/tendermint/commands/replay.go b/cmd/tendermint/commands/replay.go index e92274042..2cd4c966a 100644 --- a/cmd/tendermint/commands/replay.go +++ b/cmd/tendermint/commands/replay.go @@ -9,8 +9,8 @@ import ( var ReplayCmd = &cobra.Command{ Use: "replay", Short: "Replay messages from WAL", - Run: func(cmd *cobra.Command, args []string) { - consensus.RunReplayFile(config.BaseConfig, config.Consensus, false) + RunE: func(cmd *cobra.Command, args []string) error { + return consensus.RunReplayFile(cmd.Context(), logger, config.BaseConfig, config.Consensus, false) }, } @@ -19,7 +19,7 @@ var ReplayCmd = &cobra.Command{ var ReplayConsoleCmd = &cobra.Command{ Use: "replay-console", Short: "Replay messages from WAL in a console", - Run: func(cmd *cobra.Command, args []string) { - consensus.RunReplayFile(config.BaseConfig, config.Consensus, true) + RunE: func(cmd *cobra.Command, args []string) error { + return consensus.RunReplayFile(cmd.Context(), logger, config.BaseConfig, config.Consensus, true) }, } diff --git a/cmd/tendermint/commands/reset_priv_validator.go b/cmd/tendermint/commands/reset_priv_validator.go index 8745e55d8..5f3e54700 100644 --- a/cmd/tendermint/commands/reset_priv_validator.go +++ b/cmd/tendermint/commands/reset_priv_validator.go @@ -37,7 +37,7 @@ var ResetPrivValidatorCmd = &cobra.Command{ // XXX: this is totally unsafe. // it's only suitable for testnets. func resetAll(cmd *cobra.Command, args []string) error { - return ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidator.KeyFile(), + return ResetAll(config.DBDir(), config.PrivValidator.KeyFile(), config.PrivValidator.StateFile(), logger) } @@ -49,12 +49,7 @@ func resetPrivValidator(cmd *cobra.Command, args []string) error { // ResetAll removes address book files plus all data, and resets the privValdiator data. // Exported so other CLI tools can use it. -func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logger log.Logger) error { - if keepAddrBook { - logger.Info("The address book remains intact") - } else { - removeAddrBook(addrBookFile, logger) - } +func ResetAll(dbDir, privValKeyFile, privValStateFile string, logger log.Logger) error { if err := os.RemoveAll(dbDir); err == nil { logger.Info("Removed all blockchain history", "dir", dbDir) } else { @@ -87,11 +82,3 @@ func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) err } return nil } - -func removeAddrBook(addrBookFile string, logger log.Logger) { - if err := os.Remove(addrBookFile); err == nil { - logger.Info("Removed existing address book", "file", addrBookFile) - } else if !os.IsNotExist(err) { - logger.Info("Error removing address book", "file", addrBookFile, "err", err) - } -} diff --git a/cmd/tendermint/commands/rollback.go b/cmd/tendermint/commands/rollback.go new file mode 100644 index 000000000..5aff232be --- /dev/null +++ b/cmd/tendermint/commands/rollback.go @@ -0,0 +1,46 @@ +package commands + +import ( + "fmt" + + "github.com/spf13/cobra" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/state" +) + +var RollbackStateCmd = &cobra.Command{ + Use: "rollback", + Short: "rollback tendermint state by one height", + Long: ` +A state rollback is performed to recover from an incorrect application state transition, +when Tendermint has persisted an incorrect app hash and is thus unable to make +progress. Rollback overwrites a state at height n with the state at height n - 1. +The application should also roll back to height n - 1. No blocks are removed, so upon +restarting Tendermint the transactions in block n will be re-executed against the +application. +`, + RunE: func(cmd *cobra.Command, args []string) error { + height, hash, err := RollbackState(config) + if err != nil { + return fmt.Errorf("failed to rollback state: %w", err) + } + + fmt.Printf("Rolled back state to height %d and hash %v", height, hash) + return nil + }, +} + +// RollbackState takes the state at the current height n and overwrites it with the state +// at height n - 1. Note state here refers to tendermint state not application state. +// Returns the latest state height and app hash alongside an error if there was one. +func RollbackState(config *cfg.Config) (int64, []byte, error) { + // use the parsed config to load the block and state store + blockStore, stateStore, err := loadStateAndBlockStore(config) + if err != nil { + return -1, nil, err + } + + // rollback the last state + return state.Rollback(blockStore, stateStore) +} diff --git a/cmd/tendermint/commands/root_test.go b/cmd/tendermint/commands/root_test.go index cd4bc9f5f..6d5143f4e 100644 --- a/cmd/tendermint/commands/root_test.go +++ b/cmd/tendermint/commands/root_test.go @@ -2,7 +2,6 @@ package commands import ( "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -167,5 +166,5 @@ func WriteConfigVals(dir string, vals map[string]string) error { data += fmt.Sprintf("%s = \"%s\"\n", k, v) } cfile := filepath.Join(dir, "config.toml") - return ioutil.WriteFile(cfile, []byte(data), 0600) + return os.WriteFile(cfile, []byte(data), 0600) } diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index c174fd967..feffbc2d0 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -3,16 +3,15 @@ package commands import ( "bytes" "crypto/sha256" - "errors" - "flag" "fmt" "io" "os" + "os/signal" + "syscall" "github.com/spf13/cobra" cfg "github.com/tendermint/tendermint/config" - tmos "github.com/tendermint/tendermint/libs/os" ) var ( @@ -35,22 +34,7 @@ func AddNodeFlags(cmd *cobra.Command) { "socket address to listen on for connections from external priv-validator process") // node flags - cmd.Flags().Bool("blocksync.enable", config.BlockSync.Enable, "enable fast blockchain syncing") - // TODO (https://github.com/tendermint/tendermint/issues/6908): remove this check after the v0.35 release cycle - // This check was added to give users an upgrade prompt to use the new flag for syncing. - // - // The pflag package does not have a native way to print a depcrecation warning - // and return an error. This logic was added to print a deprecation message to the user - // and then crash if the user attempts to use the old --fast-sync flag. - fs := flag.NewFlagSet("", flag.ExitOnError) - fs.Func("fast-sync", "deprecated", - func(string) error { - return errors.New("--fast-sync has been deprecated, please use --blocksync.enable") - }) - cmd.Flags().AddGoFlagSet(fs) - - cmd.Flags().MarkHidden("fast-sync") //nolint:errcheck cmd.Flags().BytesHexVar( &genesisHash, "genesis-hash", @@ -65,15 +49,11 @@ func AddNodeFlags(cmd *cobra.Command) { "proxy-app", config.ProxyApp, "proxy app address, or one of: 'kvstore',"+ - " 'persistent_kvstore' or 'noop' for local testing.") + " 'persistent_kvstore', 'e2e' or 'noop' for local testing.") cmd.Flags().String("abci", config.ABCI, "specify abci transport (socket | grpc)") // rpc flags cmd.Flags().String("rpc.laddr", config.RPC.ListenAddress, "RPC listen address. Port required") - cmd.Flags().String( - "rpc.grpc-laddr", - config.RPC.GRPCListenAddress, - "GRPC listen address (BroadcastTx only). Port required") cmd.Flags().Bool("rpc.unsafe", config.RPC.Unsafe, "enabled unsafe rpc methods") cmd.Flags().String("rpc.pprof-laddr", config.RPC.PprofListenAddress, "pprof listen address (https://golang.org/pkg/net/http/pprof)") @@ -84,8 +64,6 @@ func AddNodeFlags(cmd *cobra.Command) { "node listen address. (0.0.0.0:0 means any interface, any port)") cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes") cmd.Flags().String("p2p.persistent-peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers") - cmd.Flags().String("p2p.unconditional-peer-ids", - config.P2P.UnconditionalPeerIDs, "comma-delimited IDs of unconditional peers") cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding") cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "enable/disable Peer-Exchange") cmd.Flags().String("p2p.private-peer-ids", config.P2P.PrivatePeerIDs, "comma-delimited private peer IDs") @@ -126,28 +104,22 @@ func NewRunNodeCmd(nodeProvider cfg.ServiceProvider) *cobra.Command { return err } - n, err := nodeProvider(config, logger) + ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM) + defer cancel() + + n, err := nodeProvider(ctx, config, logger) if err != nil { return fmt.Errorf("failed to create node: %w", err) } - if err := n.Start(); err != nil { + if err := n.Start(ctx); err != nil { return fmt.Errorf("failed to start node: %w", err) } logger.Info("started node", "node", n.String()) - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(logger, func() { - if n.IsRunning() { - if err := n.Stop(); err != nil { - logger.Error("unable to stop the node", "error", err) - } - } - }) - - // Run forever. - select {} + <-ctx.Done() + return nil }, } diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go index a7307b38f..95955dd9b 100644 --- a/cmd/tendermint/commands/testnet.go +++ b/cmd/tendermint/commands/testnet.go @@ -226,7 +226,6 @@ func testnetFiles(cmd *cobra.Command, args []string) error { for i := 0; i < nValidators+nNonValidators; i++ { nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i)) config.SetRoot(nodeDir) - config.P2P.AddrBookStrict = false config.P2P.AllowDuplicateIP = true if populatePersistentPeers { persistentPeersWithoutSelf := make([]string, 0) @@ -240,7 +239,9 @@ func testnetFiles(cmd *cobra.Command, args []string) error { } config.Moniker = moniker(i) - cfg.WriteConfigFile(nodeDir, config) + if err := cfg.WriteConfigFile(nodeDir, config); err != nil { + return err + } } fmt.Printf("Successfully initialized %v node directories\n", nValidators+nNonValidators) diff --git a/cmd/tendermint/main.go b/cmd/tendermint/main.go index c006c297d..52a00e4c0 100644 --- a/cmd/tendermint/main.go +++ b/cmd/tendermint/main.go @@ -6,9 +6,9 @@ import ( cmd "github.com/tendermint/tendermint/cmd/tendermint/commands" "github.com/tendermint/tendermint/cmd/tendermint/commands/debug" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/cli" - nm "github.com/tendermint/tendermint/node" + "github.com/tendermint/tendermint/node" ) func main() { @@ -29,6 +29,7 @@ func main() { cmd.GenNodeKeyCmd, cmd.VersionCmd, cmd.InspectCmd, + cmd.RollbackStateCmd, cmd.MakeKeyMigrateCommand(), debug.DebugCmd, cli.NewCompletionCmd(rootCmd, true), @@ -42,12 +43,12 @@ func main() { // * Provide their own DB implementation // can copy this file and use something other than the // node.NewDefault function - nodeFunc := nm.NewDefault + nodeFunc := node.NewDefault // Create & start node rootCmd.AddCommand(cmd.NewRunNodeCmd(nodeFunc)) - cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv(filepath.Join("$HOME", cfg.DefaultTendermintDir))) + cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv(filepath.Join("$HOME", config.DefaultTendermintDir))) if err := cmd.Execute(); err != nil { panic(err) } diff --git a/config/config.go b/config/config.go index dfc4836da..3ee62ed88 100644 --- a/config/config.go +++ b/config/config.go @@ -4,7 +4,6 @@ import ( "encoding/hex" "errors" "fmt" - "io/ioutil" "net/http" "os" "path/filepath" @@ -28,12 +27,6 @@ const ( ModeFull = "full" ModeValidator = "validator" ModeSeed = "seed" - - BlockSyncV0 = "v0" - BlockSyncV2 = "v2" - - MempoolV0 = "v0" - MempoolV1 = "v1" ) // NOTE: Most of the structs & relevant comments + the @@ -54,16 +47,14 @@ var ( defaultPrivValKeyName = "priv_validator_key.json" defaultPrivValStateName = "priv_validator_state.json" - defaultNodeKeyName = "node_key.json" - defaultAddrBookName = "addrbook.json" + defaultNodeKeyName = "node_key.json" defaultConfigFilePath = filepath.Join(defaultConfigDir, defaultConfigFileName) defaultGenesisJSONPath = filepath.Join(defaultConfigDir, defaultGenesisJSONName) defaultPrivValKeyPath = filepath.Join(defaultConfigDir, defaultPrivValKeyName) defaultPrivValStatePath = filepath.Join(defaultDataDir, defaultPrivValStateName) - defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName) - defaultAddrBookPath = filepath.Join(defaultConfigDir, defaultAddrBookName) + defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName) ) // Config defines the top level configuration for a Tendermint node @@ -76,7 +67,6 @@ type Config struct { P2P *P2PConfig `mapstructure:"p2p"` Mempool *MempoolConfig `mapstructure:"mempool"` StateSync *StateSyncConfig `mapstructure:"statesync"` - BlockSync *BlockSyncConfig `mapstructure:"blocksync"` Consensus *ConsensusConfig `mapstructure:"consensus"` TxIndex *TxIndexConfig `mapstructure:"tx-index"` Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"` @@ -91,7 +81,6 @@ func DefaultConfig() *Config { P2P: DefaultP2PConfig(), Mempool: DefaultMempoolConfig(), StateSync: DefaultStateSyncConfig(), - BlockSync: DefaultBlockSyncConfig(), Consensus: DefaultConsensusConfig(), TxIndex: DefaultTxIndexConfig(), Instrumentation: DefaultInstrumentationConfig(), @@ -114,7 +103,6 @@ func TestConfig() *Config { P2P: TestP2PConfig(), Mempool: TestMempoolConfig(), StateSync: TestStateSyncConfig(), - BlockSync: TestBlockSyncConfig(), Consensus: TestConsensusConfig(), TxIndex: TestTxIndexConfig(), Instrumentation: TestInstrumentationConfig(), @@ -142,18 +130,12 @@ func (cfg *Config) ValidateBasic() error { if err := cfg.RPC.ValidateBasic(); err != nil { return fmt.Errorf("error in [rpc] section: %w", err) } - if err := cfg.P2P.ValidateBasic(); err != nil { - return fmt.Errorf("error in [p2p] section: %w", err) - } if err := cfg.Mempool.ValidateBasic(); err != nil { return fmt.Errorf("error in [mempool] section: %w", err) } if err := cfg.StateSync.ValidateBasic(); err != nil { return fmt.Errorf("error in [statesync] section: %w", err) } - if err := cfg.BlockSync.ValidateBasic(); err != nil { - return fmt.Errorf("error in [blocksync] section: %w", err) - } if err := cfg.Consensus.ValidateBasic(); err != nil { return fmt.Errorf("error in [consensus] section: %w", err) } @@ -283,7 +265,7 @@ func (cfg BaseConfig) NodeKeyFile() string { // LoadNodeKey loads NodeKey located in filePath. func (cfg BaseConfig) LoadNodeKeyID() (types.NodeID, error) { - jsonBytes, err := ioutil.ReadFile(cfg.NodeKeyFile()) + jsonBytes, err := os.ReadFile(cfg.NodeKeyFile()) if err != nil { return "", err } @@ -339,28 +321,6 @@ func (cfg BaseConfig) ValidateBasic() error { return fmt.Errorf("unknown mode: %v", cfg.Mode) } - // TODO (https://github.com/tendermint/tendermint/issues/6908) remove this check after the v0.35 release cycle. - // This check was added to give users an upgrade prompt to use the new - // configuration option in v0.35. In future release cycles they should no longer - // be using this configuration parameter so the check can be removed. - // The cfg.Other field can likely be removed at the same time if it is not referenced - // elsewhere as it was added to service this check. - if fs, ok := cfg.Other["fastsync"]; ok { - if _, ok := fs.(map[string]interface{}); ok { - return fmt.Errorf("a configuration section named 'fastsync' was found in the " + - "configuration file. The 'fastsync' section has been renamed to " + - "'blocksync', please update the 'fastsync' field in your configuration file to 'blocksync'") - } - } - if fs, ok := cfg.Other["fast-sync"]; ok { - if fs != "" { - return fmt.Errorf("a parameter named 'fast-sync' was found in the " + - "configuration file. The parameter to enable or disable quickly syncing with a blockchain" + - "has moved to the [blocksync] section of the configuration file as blocksync.enable. " + - "Please move the 'fast-sync' field in your configuration file to 'blocksync.enable'") - } - } - return nil } @@ -461,24 +421,10 @@ type RPCConfig struct { // A list of non simple headers the client is allowed to use with cross-domain requests. CORSAllowedHeaders []string `mapstructure:"cors-allowed-headers"` - // TCP or UNIX socket address for the gRPC server to listen on - // NOTE: This server only supports /broadcast_tx_commit - // Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36. - GRPCListenAddress string `mapstructure:"grpc-laddr"` - - // Maximum number of simultaneous connections. - // Does not include RPC (HTTP&WebSocket) connections. See max-open-connections - // If you want to accept a larger number than the default, make sure - // you increase your OS limits. - // 0 - unlimited. - // Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36. - GRPCMaxOpenConnections int `mapstructure:"grpc-max-open-connections"` - // Activate unsafe RPC commands like /dial-persistent-peers and /unsafe-flush-mempool Unsafe bool `mapstructure:"unsafe"` // Maximum number of simultaneous connections (including WebSocket). - // Does not include gRPC connections. See grpc-max-open-connections // If you want to accept a larger number than the default, make sure // you increase your OS limits. // 0 - unlimited. @@ -492,7 +438,7 @@ type RPCConfig struct { MaxSubscriptionClients int `mapstructure:"max-subscription-clients"` // Maximum number of unique queries a given client can /subscribe to - // If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set + // If you're using a Local RPC client and /broadcast_tx_commit, set this // to the estimated maximum number of broadcast_tx_commit calls per block. MaxSubscriptionsPerClient int `mapstructure:"max-subscriptions-per-client"` @@ -533,12 +479,10 @@ type RPCConfig struct { // DefaultRPCConfig returns a default configuration for the RPC server func DefaultRPCConfig() *RPCConfig { return &RPCConfig{ - ListenAddress: "tcp://127.0.0.1:26657", - CORSAllowedOrigins: []string{}, - CORSAllowedMethods: []string{http.MethodHead, http.MethodGet, http.MethodPost}, - CORSAllowedHeaders: []string{"Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time"}, - GRPCListenAddress: "", - GRPCMaxOpenConnections: 900, + ListenAddress: "tcp://127.0.0.1:26657", + CORSAllowedOrigins: []string{}, + CORSAllowedMethods: []string{http.MethodHead, http.MethodGet, http.MethodPost}, + CORSAllowedHeaders: []string{"Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time"}, Unsafe: false, MaxOpenConnections: 900, @@ -559,7 +503,6 @@ func DefaultRPCConfig() *RPCConfig { func TestRPCConfig() *RPCConfig { cfg := DefaultRPCConfig() cfg.ListenAddress = "tcp://127.0.0.1:36657" - cfg.GRPCListenAddress = "tcp://127.0.0.1:36658" cfg.Unsafe = true return cfg } @@ -567,9 +510,6 @@ func TestRPCConfig() *RPCConfig { // ValidateBasic performs basic validation (checking param bounds, etc.) and // returns an error if any check fails. func (cfg *RPCConfig) ValidateBasic() error { - if cfg.GRPCMaxOpenConnections < 0 { - return errors.New("grpc-max-open-connections can't be negative") - } if cfg.MaxOpenConnections < 0 { return errors.New("max-open-connections can't be negative") } @@ -647,25 +587,6 @@ type P2PConfig struct { //nolint: maligned // UPNP port forwarding UPNP bool `mapstructure:"upnp"` - // Path to address book - AddrBook string `mapstructure:"addr-book-file"` - - // Set true for strict address routability rules - // Set false for private or local networks - AddrBookStrict bool `mapstructure:"addr-book-strict"` - - // Maximum number of inbound peers - // - // TODO: Remove once p2p refactor is complete in favor of MaxConnections. - // ref: https://github.com/tendermint/tendermint/issues/5670 - MaxNumInboundPeers int `mapstructure:"max-num-inbound-peers"` - - // Maximum number of outbound peers to connect to, excluding persistent peers. - // - // TODO: Remove once p2p refactor is complete in favor of MaxConnections. - // ref: https://github.com/tendermint/tendermint/issues/5670 - MaxNumOutboundPeers int `mapstructure:"max-num-outbound-peers"` - // MaxConnections defines the maximum number of connected peers (inbound and // outbound). MaxConnections uint16 `mapstructure:"max-connections"` @@ -674,11 +595,15 @@ type P2PConfig struct { //nolint: maligned // attempts per IP address. MaxIncomingConnectionAttempts uint `mapstructure:"max-incoming-connection-attempts"` - // List of node IDs, to which a connection will be (re)established ignoring any existing limits - UnconditionalPeerIDs string `mapstructure:"unconditional-peer-ids"` + // Set true to enable the peer-exchange reactor + PexReactor bool `mapstructure:"pex"` - // Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) - PersistentPeersMaxDialPeriod time.Duration `mapstructure:"persistent-peers-max-dial-period"` + // Comma separated list of peer IDs to keep private (will not be gossiped to + // other peers) + PrivatePeerIDs string `mapstructure:"private-peer-ids"` + + // Toggle to disable guard against peers connecting from the same ip. + AllowDuplicateIP bool `mapstructure:"allow-duplicate-ip"` // Time to wait before flushing messages out on the connection FlushThrottleTimeout time.Duration `mapstructure:"flush-throttle-timeout"` @@ -692,16 +617,6 @@ type P2PConfig struct { //nolint: maligned // Rate at which packets can be received, in bytes/second RecvRate int64 `mapstructure:"recv-rate"` - // Set true to enable the peer-exchange reactor - PexReactor bool `mapstructure:"pex"` - - // Comma separated list of peer IDs to keep private (will not be gossiped to - // other peers) - PrivatePeerIDs string `mapstructure:"private-peer-ids"` - - // Toggle to disable guard against peers connecting from the same ip. - AllowDuplicateIP bool `mapstructure:"allow-duplicate-ip"` - // Peer connection configuration. HandshakeTimeout time.Duration `mapstructure:"handshake-timeout"` DialTimeout time.Duration `mapstructure:"dial-timeout"` @@ -710,13 +625,8 @@ type P2PConfig struct { //nolint: maligned // Force dial to fail TestDialFail bool `mapstructure:"test-dial-fail"` - // UseLegacy enables the "legacy" P2P implementation and - // disables the newer default implementation. This flag will - // be removed in a future release. - UseLegacy bool `mapstructure:"use-legacy"` - // Makes it possible to configure which queue backend the p2p - // layer uses. Options are: "fifo", "priority" and "wdrr", + // layer uses. Options are: "fifo" and "priority", // with the default being "priority". QueueType string `mapstructure:"queue-type"` } @@ -727,13 +637,8 @@ func DefaultP2PConfig() *P2PConfig { ListenAddress: "tcp://0.0.0.0:26656", ExternalAddress: "", UPNP: false, - AddrBook: defaultAddrBookPath, - AddrBookStrict: true, - MaxNumInboundPeers: 40, - MaxNumOutboundPeers: 10, MaxConnections: 64, MaxIncomingConnectionAttempts: 100, - PersistentPeersMaxDialPeriod: 0 * time.Second, FlushThrottleTimeout: 100 * time.Millisecond, // The MTU (Maximum Transmission Unit) for Ethernet is 1500 bytes. // The IP header and the TCP header take up 20 bytes each at least (unless @@ -749,39 +654,15 @@ func DefaultP2PConfig() *P2PConfig { DialTimeout: 3 * time.Second, TestDialFail: false, QueueType: "priority", - UseLegacy: false, } } -// TestP2PConfig returns a configuration for testing the peer-to-peer layer -func TestP2PConfig() *P2PConfig { - cfg := DefaultP2PConfig() - cfg.ListenAddress = "tcp://127.0.0.1:36656" - cfg.FlushThrottleTimeout = 10 * time.Millisecond - cfg.AllowDuplicateIP = true - return cfg -} - -// AddrBookFile returns the full path to the address book -func (cfg *P2PConfig) AddrBookFile() string { - return rootify(cfg.AddrBook, cfg.RootDir) -} - // ValidateBasic performs basic validation (checking param bounds, etc.) and // returns an error if any check fails. func (cfg *P2PConfig) ValidateBasic() error { - if cfg.MaxNumInboundPeers < 0 { - return errors.New("max-num-inbound-peers can't be negative") - } - if cfg.MaxNumOutboundPeers < 0 { - return errors.New("max-num-outbound-peers can't be negative") - } if cfg.FlushThrottleTimeout < 0 { return errors.New("flush-throttle-timeout can't be negative") } - if cfg.PersistentPeersMaxDialPeriod < 0 { - return errors.New("persistent-peers-max-dial-period can't be negative") - } if cfg.MaxPacketMsgPayloadSize < 0 { return errors.New("max-packet-msg-payload-size can't be negative") } @@ -794,12 +675,21 @@ func (cfg *P2PConfig) ValidateBasic() error { return nil } +// TestP2PConfig returns a configuration for testing the peer-to-peer layer +func TestP2PConfig() *P2PConfig { + cfg := DefaultP2PConfig() + cfg.ListenAddress = "tcp://127.0.0.1:36656" + cfg.AllowDuplicateIP = true + cfg.FlushThrottleTimeout = 10 * time.Millisecond + + return cfg +} + //----------------------------------------------------------------------------- // MempoolConfig // MempoolConfig defines the configuration options for the Tendermint mempool. type MempoolConfig struct { - Version string `mapstructure:"version"` RootDir string `mapstructure:"home"` Recheck bool `mapstructure:"recheck"` Broadcast bool `mapstructure:"broadcast"` @@ -849,7 +739,6 @@ type MempoolConfig struct { // DefaultMempoolConfig returns a default configuration for the Tendermint mempool. func DefaultMempoolConfig() *MempoolConfig { return &MempoolConfig{ - Version: MempoolV1, Recheck: true, Broadcast: true, // Each signature verification takes .5ms, Size reduced until we implement @@ -1018,42 +907,6 @@ func (cfg *StateSyncConfig) ValidateBasic() error { return nil } -//----------------------------------------------------------------------------- - -// BlockSyncConfig (formerly known as FastSync) defines the configuration for the Tendermint block sync service -// If this node is many blocks behind the tip of the chain, BlockSync -// allows them to catchup quickly by downloading blocks in parallel -// and verifying their commits. -type BlockSyncConfig struct { - Enable bool `mapstructure:"enable"` - Version string `mapstructure:"version"` -} - -// DefaultBlockSyncConfig returns a default configuration for the block sync service -func DefaultBlockSyncConfig() *BlockSyncConfig { - return &BlockSyncConfig{ - Enable: true, - Version: BlockSyncV0, - } -} - -// TestBlockSyncConfig returns a default configuration for the block sync. -func TestBlockSyncConfig() *BlockSyncConfig { - return DefaultBlockSyncConfig() -} - -// ValidateBasic performs basic validation. -func (cfg *BlockSyncConfig) ValidateBasic() error { - switch cfg.Version { - case BlockSyncV0: - return nil - case BlockSyncV2: - return errors.New("blocksync version v2 is no longer supported. Please use v0") - default: - return fmt.Errorf("unknown blocksync version %s", cfg.Version) - } -} - //----------------------------------------------------------------------------- // ConsensusConfig diff --git a/config/config_test.go b/config/config_test.go index 075cedc6a..304eeb0ce 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -66,7 +66,6 @@ func TestRPCConfigValidateBasic(t *testing.T) { assert.NoError(t, cfg.ValidateBasic()) fieldsToTest := []string{ - "GRPCMaxOpenConnections", "MaxOpenConnections", "MaxSubscriptionClients", "MaxSubscriptionsPerClient", @@ -82,26 +81,6 @@ func TestRPCConfigValidateBasic(t *testing.T) { } } -func TestP2PConfigValidateBasic(t *testing.T) { - cfg := TestP2PConfig() - assert.NoError(t, cfg.ValidateBasic()) - - fieldsToTest := []string{ - "MaxNumInboundPeers", - "MaxNumOutboundPeers", - "FlushThrottleTimeout", - "MaxPacketMsgPayloadSize", - "SendRate", - "RecvRate", - } - - for _, fieldName := range fieldsToTest { - reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1) - assert.Error(t, cfg.ValidateBasic()) - reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) - } -} - func TestMempoolConfigValidateBasic(t *testing.T) { cfg := TestMempoolConfig() assert.NoError(t, cfg.ValidateBasic()) @@ -125,18 +104,6 @@ func TestStateSyncConfigValidateBasic(t *testing.T) { require.NoError(t, cfg.ValidateBasic()) } -func TestBlockSyncConfigValidateBasic(t *testing.T) { - cfg := TestBlockSyncConfig() - assert.NoError(t, cfg.ValidateBasic()) - - // tamper with version - cfg.Version = "v2" - assert.Error(t, cfg.ValidateBasic()) - - cfg.Version = "invalid" - assert.Error(t, cfg.ValidateBasic()) -} - func TestConsensusConfig_ValidateBasic(t *testing.T) { // nolint: lll testcases := map[string]struct { @@ -187,3 +154,21 @@ func TestInstrumentationConfigValidateBasic(t *testing.T) { cfg.MaxOpenConnections = -1 assert.Error(t, cfg.ValidateBasic()) } + +func TestP2PConfigValidateBasic(t *testing.T) { + cfg := TestP2PConfig() + assert.NoError(t, cfg.ValidateBasic()) + + fieldsToTest := []string{ + "FlushThrottleTimeout", + "MaxPacketMsgPayloadSize", + "SendRate", + "RecvRate", + } + + for _, fieldName := range fieldsToTest { + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1) + assert.Error(t, cfg.ValidateBasic()) + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) + } +} diff --git a/config/db.go b/config/db.go index 3ae274a50..f508354e0 100644 --- a/config/db.go +++ b/config/db.go @@ -1,13 +1,16 @@ package config import ( + "context" + + dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - db "github.com/tendermint/tm-db" ) // ServiceProvider takes a config and a logger and returns a ready to go Node. -type ServiceProvider func(*Config, log.Logger) (service.Service, error) +type ServiceProvider func(context.Context, *Config, log.Logger) (service.Service, error) // DBContext specifies config information for loading a new DB. type DBContext struct { @@ -16,11 +19,11 @@ type DBContext struct { } // DBProvider takes a DBContext and returns an instantiated DB. -type DBProvider func(*DBContext) (db.DB, error) +type DBProvider func(*DBContext) (dbm.DB, error) // DefaultDBProvider returns a database using the DBBackend and DBDir // specified in the Config. -func DefaultDBProvider(ctx *DBContext) (db.DB, error) { - dbType := db.BackendType(ctx.Config.DBBackend) - return db.NewDB(ctx.ID, dbType, ctx.Config.DBDir()) +func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) { + dbType := dbm.BackendType(ctx.Config.DBBackend) + return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()) } diff --git a/config/toml.go b/config/toml.go index 1cb3c0615..0baf4859f 100644 --- a/config/toml.go +++ b/config/toml.go @@ -3,7 +3,6 @@ package config import ( "bytes" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -45,23 +44,29 @@ func EnsureRoot(rootDir string) { // WriteConfigFile renders config using the template and writes it to configFilePath. // This function is called by cmd/tendermint/commands/init.go -func WriteConfigFile(rootDir string, config *Config) { - var buffer bytes.Buffer - - if err := configTemplate.Execute(&buffer, config); err != nil { - panic(err) - } - - configFilePath := filepath.Join(rootDir, defaultConfigFilePath) - - mustWriteFile(configFilePath, buffer.Bytes(), 0644) +func WriteConfigFile(rootDir string, config *Config) error { + return config.WriteToTemplate(filepath.Join(rootDir, defaultConfigFilePath)) } -func writeDefaultConfigFileIfNone(rootDir string) { +// WriteToTemplate writes the config to the exact file specified by +// the path, in the default toml template and does not mangle the path +// or filename at all. +func (cfg *Config) WriteToTemplate(path string) error { + var buffer bytes.Buffer + + if err := configTemplate.Execute(&buffer, cfg); err != nil { + return err + } + + return writeFile(path, buffer.Bytes(), 0644) +} + +func writeDefaultConfigFileIfNone(rootDir string) error { configFilePath := filepath.Join(rootDir, defaultConfigFilePath) if !tmos.FileExists(configFilePath) { - WriteConfigFile(rootDir, DefaultConfig()) + return WriteConfigFile(rootDir, DefaultConfig()) } + return nil } // Note: any changes to the comments/variables/mapstructure @@ -159,15 +164,15 @@ state-file = "{{ js .PrivValidator.State }}" # when the listenAddr is prefixed with grpc instead of tcp it will use the gRPC Client laddr = "{{ .PrivValidator.ListenAddr }}" -# Client certificate generated while creating needed files for secure connection. +# Path to the client certificate generated while creating needed files for secure connection. # If a remote validator address is provided but no certificate, the connection will be insecure client-certificate-file = "{{ js .PrivValidator.ClientCertificate }}" # Client key generated while creating certificates for secure connection -validator-client-key-file = "{{ js .PrivValidator.ClientKey }}" +client-key-file = "{{ js .PrivValidator.ClientKey }}" -# Path Root Certificate Authority used to sign both client and server certificates -certificate-authority = "{{ js .PrivValidator.RootCA }}" +# Path to the Root Certificate Authority used to sign both client and server certificates +root-ca-file = "{{ js .PrivValidator.RootCA }}" ####################################################################### @@ -193,26 +198,10 @@ cors-allowed-methods = [{{ range .RPC.CORSAllowedMethods }}{{ printf "%q, " . }} # A list of non simple headers the client is allowed to use with cross-domain requests cors-allowed-headers = [{{ range .RPC.CORSAllowedHeaders }}{{ printf "%q, " . }}{{end}}] -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36. -grpc-laddr = "{{ .RPC.GRPCListenAddress }}" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max-open-connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36. -grpc-max-open-connections = {{ .RPC.GRPCMaxOpenConnections }} - # Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool unsafe = {{ .RPC.Unsafe }} # Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc-max-open-connections # If you want to accept a larger number than the default, make sure # you increase your OS limits. # 0 - unlimited. @@ -226,8 +215,8 @@ max-open-connections = {{ .RPC.MaxOpenConnections }} max-subscription-clients = {{ .RPC.MaxSubscriptionClients }} # Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. +# If you're using a Local RPC client and /broadcast_tx_commit, set this +# to the estimated maximum number of broadcast_tx_commit calls per block. max-subscriptions-per-client = {{ .RPC.MaxSubscriptionsPerClient }} # How long to wait for a tx to be committed during /broadcast_tx_commit. @@ -265,9 +254,6 @@ pprof-laddr = "{{ .RPC.PprofListenAddress }}" ####################################################### [p2p] -# Enable the legacy p2p layer. -use-legacy = {{ .P2P.UseLegacy }} - # Select the p2p internal queue queue-type = "{{ .P2P.QueueType }}" @@ -299,52 +285,12 @@ persistent-peers = "{{ .P2P.PersistentPeers }}" # UPNP port forwarding upnp = {{ .P2P.UPNP }} -# Path to address book -# TODO: Remove once p2p refactor is complete in favor of peer store. -addr-book-file = "{{ js .P2P.AddrBook }}" - -# Set true for strict address routability rules -# Set false for private or local networks -addr-book-strict = {{ .P2P.AddrBookStrict }} - -# Maximum number of inbound peers -# -# TODO: Remove once p2p refactor is complete in favor of MaxConnections. -# ref: https://github.com/tendermint/tendermint/issues/5670 -max-num-inbound-peers = {{ .P2P.MaxNumInboundPeers }} - -# Maximum number of outbound peers to connect to, excluding persistent peers -# -# TODO: Remove once p2p refactor is complete in favor of MaxConnections. -# ref: https://github.com/tendermint/tendermint/issues/5670 -max-num-outbound-peers = {{ .P2P.MaxNumOutboundPeers }} - # Maximum number of connections (inbound and outbound). max-connections = {{ .P2P.MaxConnections }} # Rate limits the number of incoming connection attempts per IP address. max-incoming-connection-attempts = {{ .P2P.MaxIncomingConnectionAttempts }} -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -# TODO: Remove once p2p refactor is complete. -# ref: https://github.com/tendermint/tendermint/issues/5670 -unconditional-peer-ids = "{{ .P2P.UnconditionalPeerIDs }}" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent-peers-max-dial-period = "{{ .P2P.PersistentPeersMaxDialPeriod }}" - -# Time to wait before flushing messages out on the connection -flush-throttle-timeout = "{{ .P2P.FlushThrottleTimeout }}" - -# Maximum size of a message packet payload, in bytes -max-packet-msg-payload-size = {{ .P2P.MaxPacketMsgPayloadSize }} - -# Rate at which packets can be sent, in bytes/second -send-rate = {{ .P2P.SendRate }} - -# Rate at which packets can be received, in bytes/second -recv-rate = {{ .P2P.RecvRate }} - # Set true to enable the peer-exchange reactor pex = {{ .P2P.PexReactor }} @@ -359,16 +305,28 @@ allow-duplicate-ip = {{ .P2P.AllowDuplicateIP }} handshake-timeout = "{{ .P2P.HandshakeTimeout }}" dial-timeout = "{{ .P2P.DialTimeout }}" +# Time to wait before flushing messages out on the connection +# TODO: Remove once MConnConnection is removed. +flush-throttle-timeout = "{{ .P2P.FlushThrottleTimeout }}" + +# Maximum size of a message packet payload, in bytes +# TODO: Remove once MConnConnection is removed. +max-packet-msg-payload-size = {{ .P2P.MaxPacketMsgPayloadSize }} + +# Rate at which packets can be sent, in bytes/second +# TODO: Remove once MConnConnection is removed. +send-rate = {{ .P2P.SendRate }} + +# Rate at which packets can be received, in bytes/second +# TODO: Remove once MConnConnection is removed. +recv-rate = {{ .P2P.RecvRate }} + + ####################################################### ### Mempool Configuration Option ### ####################################################### [mempool] -# Mempool version to use: -# 1) "v0" - The legacy non-prioritized mempool reactor. -# 2) "v1" (default) - The prioritized mempool reactor. -version = "{{ .Mempool.Version }}" - recheck = {{ .Mempool.Recheck }} broadcast = {{ .Mempool.Broadcast }} @@ -437,8 +395,8 @@ rpc-servers = "{{ StringsJoin .StateSync.RPCServers "," }}" trust-height = {{ .StateSync.TrustHeight }} trust-hash = "{{ .StateSync.TrustHash }}" -# The trust period should be set so that Tendermint can detect and gossip misbehavior before -# it is considered expired. For chains based on the Cosmos SDK, one day less than the unbonding +# The trust period should be set so that Tendermint can detect and gossip misbehavior before +# it is considered expired. For chains based on the Cosmos SDK, one day less than the unbonding # period should suffice. trust-period = "{{ .StateSync.TrustPeriod }}" @@ -457,21 +415,6 @@ chunk-request-timeout = "{{ .StateSync.ChunkRequestTimeout }}" # The number of concurrent chunk and block fetchers to run (default: 4). fetchers = "{{ .StateSync.Fetchers }}" -####################################################### -### Block Sync Configuration Connections ### -####################################################### -[blocksync] - -# If this node is many blocks behind the tip of the chain, BlockSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -enable = {{ .BlockSync.Enable }} - -# Block Sync version to use: -# 1) "v0" (default) - the standard Block Sync implementation -# 2) "v2" - DEPRECATED, please use v0 -version = "{{ .BlockSync.Version }}" - ####################################################### ### Consensus Configuration Options ### ####################################################### @@ -519,7 +462,7 @@ peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}" [tx-index] # The backend database list to back the indexer. -# If list contains null, meaning no indexer service will be used. +# If list contains "null" or "", meaning no indexer service will be used. # # The application will set which txs to index. In some cases a node operator will be able # to decide which txs to index based on configuration set in the application. @@ -527,8 +470,8 @@ peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}" # Options: # 1) "null" # 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. # 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. indexer = [{{ range $i, $e := .TxIndex.Indexer }}{{if $i}}, {{end}}{{ printf "%q" $e}}{{end}}] # The PostgreSQL connection configuration, the connection format: @@ -560,22 +503,22 @@ namespace = "{{ .Instrumentation.Namespace }}" /****** these are for test settings ***********/ -func ResetTestRoot(testName string) *Config { +func ResetTestRoot(testName string) (*Config, error) { return ResetTestRootWithChainID(testName, "") } -func ResetTestRootWithChainID(testName string, chainID string) *Config { +func ResetTestRootWithChainID(testName string, chainID string) (*Config, error) { // create a unique, concurrency-safe test directory under os.TempDir() - rootDir, err := ioutil.TempDir("", fmt.Sprintf("%s-%s_", chainID, testName)) + rootDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s_", chainID, testName)) if err != nil { - panic(err) + return nil, err } // ensure config and data subdirs are created if err := tmos.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil { - panic(err) + return nil, err } if err := tmos.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil { - panic(err) + return nil, err } conf := DefaultConfig() @@ -584,26 +527,36 @@ func ResetTestRootWithChainID(testName string, chainID string) *Config { privStateFilePath := filepath.Join(rootDir, conf.PrivValidator.State) // Write default config file if missing. - writeDefaultConfigFileIfNone(rootDir) + if err := writeDefaultConfigFileIfNone(rootDir); err != nil { + return nil, err + } + if !tmos.FileExists(genesisFilePath) { if chainID == "" { chainID = "tendermint_test" } testGenesis := fmt.Sprintf(testGenesisFmt, chainID) - mustWriteFile(genesisFilePath, []byte(testGenesis), 0644) + if err := writeFile(genesisFilePath, []byte(testGenesis), 0644); err != nil { + return nil, err + } } // we always overwrite the priv val - mustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644) - mustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0644) + if err := writeFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644); err != nil { + return nil, err + } + if err := writeFile(privStateFilePath, []byte(testPrivValidatorState), 0644); err != nil { + return nil, err + } config := TestConfig().SetRoot(rootDir) - return config + return config, nil } -func mustWriteFile(filePath string, contents []byte, mode os.FileMode) { - if err := ioutil.WriteFile(filePath, contents, mode); err != nil { - tmos.Exit(fmt.Sprintf("failed to write file: %v", err)) +func writeFile(filePath string, contents []byte, mode os.FileMode) error { + if err := os.WriteFile(filePath, contents, mode); err != nil { + return fmt.Errorf("failed to write file: %w", err) } + return nil } var testGenesisFmt = `{ diff --git a/config/toml_test.go b/config/toml_test.go index ccf818d65..c062d25e4 100644 --- a/config/toml_test.go +++ b/config/toml_test.go @@ -1,7 +1,6 @@ package config import ( - "io/ioutil" "os" "path/filepath" "strings" @@ -23,18 +22,18 @@ func TestEnsureRoot(t *testing.T) { require := require.New(t) // setup temp dir for test - tmpDir, err := ioutil.TempDir("", "config-test") - require.Nil(err) + tmpDir, err := os.MkdirTemp("", "config-test") + require.NoError(err) defer os.RemoveAll(tmpDir) // create root dir EnsureRoot(tmpDir) - WriteConfigFile(tmpDir, DefaultConfig()) + require.NoError(WriteConfigFile(tmpDir, DefaultConfig())) // make sure config is set properly - data, err := ioutil.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath)) - require.Nil(err) + data, err := os.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath)) + require.NoError(err) checkConfig(t, string(data)) @@ -47,12 +46,13 @@ func TestEnsureTestRoot(t *testing.T) { testName := "ensureTestRoot" // create root dir - cfg := ResetTestRoot(testName) + cfg, err := ResetTestRoot(testName) + require.NoError(err) defer os.RemoveAll(cfg.RootDir) rootDir := cfg.RootDir // make sure config is set properly - data, err := ioutil.ReadFile(filepath.Join(rootDir, defaultConfigFilePath)) + data, err := os.ReadFile(filepath.Join(rootDir, defaultConfigFilePath)) require.Nil(err) checkConfig(t, string(data)) @@ -70,7 +70,6 @@ func checkConfig(t *testing.T, configFile string) { "moniker", "seeds", "proxy-app", - "blocksync", "create-empty-blocks", "peer", "timeout", diff --git a/crypto/armor/armor.go b/crypto/armor/armor.go deleted file mode 100644 index c15d070e6..000000000 --- a/crypto/armor/armor.go +++ /dev/null @@ -1,39 +0,0 @@ -package armor - -import ( - "bytes" - "fmt" - "io/ioutil" - - "golang.org/x/crypto/openpgp/armor" -) - -func EncodeArmor(blockType string, headers map[string]string, data []byte) string { - buf := new(bytes.Buffer) - w, err := armor.Encode(buf, blockType, headers) - if err != nil { - panic(fmt.Errorf("could not encode ascii armor: %s", err)) - } - _, err = w.Write(data) - if err != nil { - panic(fmt.Errorf("could not encode ascii armor: %s", err)) - } - err = w.Close() - if err != nil { - panic(fmt.Errorf("could not encode ascii armor: %s", err)) - } - return buf.String() -} - -func DecodeArmor(armorStr string) (blockType string, headers map[string]string, data []byte, err error) { - buf := bytes.NewBufferString(armorStr) - block, err := armor.Decode(buf) - if err != nil { - return "", nil, nil, err - } - data, err = ioutil.ReadAll(block.Body) - if err != nil { - return "", nil, nil, err - } - return block.Type, block.Header, data, nil -} diff --git a/crypto/armor/armor_test.go b/crypto/armor/armor_test.go deleted file mode 100644 index 8ecfaa0e1..000000000 --- a/crypto/armor/armor_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package armor - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestArmor(t *testing.T) { - blockType := "MINT TEST" - data := []byte("somedata") - armorStr := EncodeArmor(blockType, nil, data) - - // Decode armorStr and test for equivalence. - blockType2, _, data2, err := DecodeArmor(armorStr) - require.Nil(t, err, "%+v", err) - assert.Equal(t, blockType, blockType2) - assert.Equal(t, data, data2) -} diff --git a/crypto/encoding/codec.go b/crypto/encoding/codec.go index 64e4e7c6f..37249bcb3 100644 --- a/crypto/encoding/codec.go +++ b/crypto/encoding/codec.go @@ -8,34 +8,34 @@ import ( "github.com/tendermint/tendermint/crypto/secp256k1" "github.com/tendermint/tendermint/crypto/sr25519" "github.com/tendermint/tendermint/libs/json" - pc "github.com/tendermint/tendermint/proto/tendermint/crypto" + cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" ) func init() { - json.RegisterType((*pc.PublicKey)(nil), "tendermint.crypto.PublicKey") - json.RegisterType((*pc.PublicKey_Ed25519)(nil), "tendermint.crypto.PublicKey_Ed25519") - json.RegisterType((*pc.PublicKey_Secp256K1)(nil), "tendermint.crypto.PublicKey_Secp256K1") + json.RegisterType((*cryptoproto.PublicKey)(nil), "tendermint.crypto.PublicKey") + json.RegisterType((*cryptoproto.PublicKey_Ed25519)(nil), "tendermint.crypto.PublicKey_Ed25519") + json.RegisterType((*cryptoproto.PublicKey_Secp256K1)(nil), "tendermint.crypto.PublicKey_Secp256K1") } // PubKeyToProto takes crypto.PubKey and transforms it to a protobuf Pubkey -func PubKeyToProto(k crypto.PubKey) (pc.PublicKey, error) { - var kp pc.PublicKey +func PubKeyToProto(k crypto.PubKey) (cryptoproto.PublicKey, error) { + var kp cryptoproto.PublicKey switch k := k.(type) { case ed25519.PubKey: - kp = pc.PublicKey{ - Sum: &pc.PublicKey_Ed25519{ + kp = cryptoproto.PublicKey{ + Sum: &cryptoproto.PublicKey_Ed25519{ Ed25519: k, }, } case secp256k1.PubKey: - kp = pc.PublicKey{ - Sum: &pc.PublicKey_Secp256K1{ + kp = cryptoproto.PublicKey{ + Sum: &cryptoproto.PublicKey_Secp256K1{ Secp256K1: k, }, } case sr25519.PubKey: - kp = pc.PublicKey{ - Sum: &pc.PublicKey_Sr25519{ + kp = cryptoproto.PublicKey{ + Sum: &cryptoproto.PublicKey_Sr25519{ Sr25519: k, }, } @@ -46,9 +46,9 @@ func PubKeyToProto(k crypto.PubKey) (pc.PublicKey, error) { } // PubKeyFromProto takes a protobuf Pubkey and transforms it to a crypto.Pubkey -func PubKeyFromProto(k pc.PublicKey) (crypto.PubKey, error) { +func PubKeyFromProto(k cryptoproto.PublicKey) (crypto.PubKey, error) { switch k := k.Sum.(type) { - case *pc.PublicKey_Ed25519: + case *cryptoproto.PublicKey_Ed25519: if len(k.Ed25519) != ed25519.PubKeySize { return nil, fmt.Errorf("invalid size for PubKeyEd25519. Got %d, expected %d", len(k.Ed25519), ed25519.PubKeySize) @@ -56,7 +56,7 @@ func PubKeyFromProto(k pc.PublicKey) (crypto.PubKey, error) { pk := make(ed25519.PubKey, ed25519.PubKeySize) copy(pk, k.Ed25519) return pk, nil - case *pc.PublicKey_Secp256K1: + case *cryptoproto.PublicKey_Secp256K1: if len(k.Secp256K1) != secp256k1.PubKeySize { return nil, fmt.Errorf("invalid size for PubKeySecp256k1. Got %d, expected %d", len(k.Secp256K1), secp256k1.PubKeySize) @@ -64,7 +64,7 @@ func PubKeyFromProto(k pc.PublicKey) (crypto.PubKey, error) { pk := make(secp256k1.PubKey, secp256k1.PubKeySize) copy(pk, k.Secp256K1) return pk, nil - case *pc.PublicKey_Sr25519: + case *cryptoproto.PublicKey_Sr25519: if len(k.Sr25519) != sr25519.PubKeySize { return nil, fmt.Errorf("invalid size for PubKeySr25519. Got %d, expected %d", len(k.Sr25519), sr25519.PubKeySize) diff --git a/crypto/secp256k1/secp256k1_nocgo.go b/crypto/secp256k1/secp256k1_nocgo.go index cba9bbe4c..6b52dc5d2 100644 --- a/crypto/secp256k1/secp256k1_nocgo.go +++ b/crypto/secp256k1/secp256k1_nocgo.go @@ -1,3 +1,4 @@ +//go:build !libsecp256k1 // +build !libsecp256k1 package secp256k1 diff --git a/crypto/secp256k1/secp256k1_test.go b/crypto/secp256k1/secp256k1_test.go index f8bf29971..7a1109293 100644 --- a/crypto/secp256k1/secp256k1_test.go +++ b/crypto/secp256k1/secp256k1_test.go @@ -5,14 +5,13 @@ import ( "math/big" "testing" + underlyingSecp256k1 "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcutil/base58" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/secp256k1" - - underlyingSecp256k1 "github.com/btcsuite/btcd/btcec" ) type keyData struct { diff --git a/crypto/xchacha20poly1305/xchachapoly_test.go b/crypto/xchacha20poly1305/xchachapoly_test.go index b17b1c376..75953d72d 100644 --- a/crypto/xchacha20poly1305/xchachapoly_test.go +++ b/crypto/xchacha20poly1305/xchachapoly_test.go @@ -2,8 +2,8 @@ package xchacha20poly1305 import ( "bytes" - cr "crypto/rand" - mr "math/rand" + crand "crypto/rand" + mrand "math/rand" "testing" ) @@ -19,23 +19,23 @@ func TestRandom(t *testing.T) { var nonce [24]byte var key [32]byte - al := mr.Intn(128) - pl := mr.Intn(16384) + al := mrand.Intn(128) + pl := mrand.Intn(16384) ad := make([]byte, al) plaintext := make([]byte, pl) - _, err := cr.Read(key[:]) + _, err := crand.Read(key[:]) if err != nil { t.Errorf("error on read: %w", err) } - _, err = cr.Read(nonce[:]) + _, err = crand.Read(nonce[:]) if err != nil { t.Errorf("error on read: %w", err) } - _, err = cr.Read(ad) + _, err = crand.Read(ad) if err != nil { t.Errorf("error on read: %w", err) } - _, err = cr.Read(plaintext) + _, err = crand.Read(plaintext) if err != nil { t.Errorf("error on read: %w", err) } @@ -59,7 +59,7 @@ func TestRandom(t *testing.T) { } if len(ad) > 0 { - alterAdIdx := mr.Intn(len(ad)) + alterAdIdx := mrand.Intn(len(ad)) ad[alterAdIdx] ^= 0x80 if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { t.Errorf("random #%d: Open was successful after altering additional data", i) @@ -67,14 +67,14 @@ func TestRandom(t *testing.T) { ad[alterAdIdx] ^= 0x80 } - alterNonceIdx := mr.Intn(aead.NonceSize()) + alterNonceIdx := mrand.Intn(aead.NonceSize()) nonce[alterNonceIdx] ^= 0x80 if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { t.Errorf("random #%d: Open was successful after altering nonce", i) } nonce[alterNonceIdx] ^= 0x80 - alterCtIdx := mr.Intn(len(ct)) + alterCtIdx := mrand.Intn(len(ct)) ct[alterCtIdx] ^= 0x80 if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { t.Errorf("random #%d: Open was successful after altering ciphertext", i) diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js index 59012fba1..b1077de20 100644 --- a/docs/.vuepress/config.js +++ b/docs/.vuepress/config.js @@ -34,6 +34,10 @@ module.exports = { "label": "v0.34", "key": "v0.34" }, + { + "label": "v0.35", + "key": "v0.35" + }, { "label": "master", "key": "master" @@ -48,10 +52,6 @@ module.exports = { { title: 'Resources', children: [ - { - title: 'Developer Sessions', - path: '/DEV_SESSIONS.html' - }, { title: 'RPC', path: 'https://docs.tendermint.com/master/rpc/', @@ -78,7 +78,7 @@ module.exports = { }, footer: { question: { - text: 'Chat with Tendermint developers in Discord or reach out on the Tendermint Forum to learn more.' + text: 'Chat with Tendermint developers in Discord or reach out on the Tendermint Forum to learn more.' }, logo: '/logo-bw.svg', textLink: { diff --git a/docs/DOCS_README.md b/docs/DOCS_README.md index 04883e462..da06785d5 100644 --- a/docs/DOCS_README.md +++ b/docs/DOCS_README.md @@ -2,17 +2,27 @@ The documentation for Tendermint Core is hosted at: -- +- -built from the files in this (`/docs`) directory for -[master](https://github.com/tendermint/tendermint/tree/master/docs) respectively. +built from the files in this [`docs` directory for `master`](https://github.com/tendermint/tendermint/tree/master/docs) +and other supported release branches. ## How It Works -There is a CircleCI job listening for changes in the `/docs` directory, on both -the `master` branch. Any updates to files in this directory -on those branches will automatically trigger a website deployment. Under the hood, -the private website repository has a `make build-docs` target consumed by a CircleCI job in that repo. +There is a [GitHub Actions workflow](https://github.com/tendermint/docs/actions/workflows/deployment.yml) +in the `tendermint/docs` repository that clones and builds the documentation +site from the contents of this `docs` directory, for `master` and for the +backport branch of each supported release. Under the hood, this workflow runs +`make build-docs` from the [Makefile](../Makefile#L214). + +The list of supported versions are defined in [`config.js`](./.vuepress/config.js), +which defines the UI menu on the documentation site, and also in +[`docs/versions`](./versions), which determines which branches are built. + +The last entry in the `docs/versions` file determines which version is linked +by default from the generated `index.html`. This should generally be the most +recent release, rather than `master`, so that new users are not confused by +documentation for unreleased features. ## README diff --git a/docs/app-dev/readme.md b/docs/app-dev/readme.md index 51e88fc34..46ce06ca0 100644 --- a/docs/app-dev/readme.md +++ b/docs/app-dev/readme.md @@ -1,7 +1,6 @@ --- order: false parent: + title: "Building Applications" order: 3 ---- - -# Apps +--- \ No newline at end of file diff --git a/docs/architecture/adr-006-trust-metric.md b/docs/architecture/adr-006-trust-metric.md index 6fa77a609..608978207 100644 --- a/docs/architecture/adr-006-trust-metric.md +++ b/docs/architecture/adr-006-trust-metric.md @@ -178,7 +178,7 @@ type TrustMetricStore struct { } // OnStart implements Service -func (tms *TrustMetricStore) OnStart() error {} +func (tms *TrustMetricStore) OnStart(context.Context) error { return nil } // OnStop implements Service func (tms *TrustMetricStore) OnStop() {} diff --git a/docs/architecture/adr-065-custom-event-indexing.md b/docs/architecture/adr-065-custom-event-indexing.md index b5c86ecfa..83a96de48 100644 --- a/docs/architecture/adr-065-custom-event-indexing.md +++ b/docs/architecture/adr-065-custom-event-indexing.md @@ -25,6 +25,7 @@ - April 28, 2021: Specify search capabilities are only supported through the KV indexer (@marbar3778) - May 19, 2021: Update the SQL schema and the eventsink interface (@jayt106) - Aug 30, 2021: Update the SQL schema and the psql implementation (@creachadair) +- Oct 5, 2021: Clarify goals and implementation changes (@creachadair) ## Status @@ -73,19 +74,38 @@ the database used. We will adopt a similar approach to that of the Cosmos SDK's `KVStore` state listening described in [ADR-038](https://github.com/cosmos/cosmos-sdk/blob/master/docs/architecture/adr-038-state-listening.md). -Namely, we will perform the following: +We will implement the following changes: - Introduce a new interface, `EventSink`, that all data sinks must implement. - Augment the existing `tx_index.indexer` configuration to now accept a series - of one or more indexer types, i.e sinks. + of one or more indexer types, i.e., sinks. - Combine the current `TxIndexer` and `BlockIndexer` into a single `KVEventSink` that implements the `EventSink` interface. -- Introduce an additional `EventSink` that is backed by [PostgreSQL](https://www.postgresql.org/). - - Implement the necessary schemas to support both block and transaction event - indexing. +- Introduce an additional `EventSink` implementation that is backed by + [PostgreSQL](https://www.postgresql.org/). + - Implement the necessary schemas to support both block and transaction event indexing. - Update `IndexerService` to use a series of `EventSinks`. -- Proxy queries to the relevant sink's native query layer. -- Update all relevant RPC methods. + +In addition: + +- The Postgres indexer implementation will _not_ implement the proprietary `kv` + query language. Users wishing to write queries against the Postgres indexer + will connect to the underlying DBMS directly and use SQL queries based on the + indexing schema. + + Future custom indexer implementations will not be required to support the + proprietary query language either. + +- For now, the existing `kv` indexer will be left in place with its current + query support, but will be marked as deprecated in a subsequent release, and + the documentation will be updated to encourage users who need to query the + event index to migrate to the Postgres indexer. + +- In the future we may remove the `kv` indexer entirely, or replace it with a + different implementation; that decision is deferred as future work. + +- In the future, we may remove the index query endpoints from the RPC service + entirely; that decision is deferred as future work, but recommended. ## Detailed Design diff --git a/docs/architecture/adr-071-proposer-based-timestamps.md b/docs/architecture/adr-071-proposer-based-timestamps.md index c23488005..9bb1c245d 100644 --- a/docs/architecture/adr-071-proposer-based-timestamps.md +++ b/docs/architecture/adr-071-proposer-based-timestamps.md @@ -1,45 +1,13 @@ # ADR 71: Proposer-Based Timestamps -* [Changelog](#changelog) -* [Status](#status) -* [Context](#context) -* [Alternative Approaches](#alternative-approaches) - * [Remove timestamps altogether](#remove-timestamps-altogether) -* [Decision](#decision) -* [Detailed Design](#detailed-design) - * [Overview](#overview) - * [Proposal Timestamp and Block Timestamp](#proposal-timestamp-and-block-timestamp) - * [Saving the timestamp across heights](#saving-the-timestamp-across-heights) - * [Changes to `CommitSig`](#changes-to-commitsig) - * [Changes to `Commit`](#changes-to-commit) - * [Changes to `Vote` messages](#changes-to-vote-messages) - * [New consensus parameters](#new-consensus-parameters) - * [Changes to `Header`](#changes-to-header) - * [Changes to the block proposal step](#changes-to-the-block-proposal-step) - * [Proposer selects proposal timestamp](#proposer-selects-proposal-timestamp) - * [Proposer selects block timestamp](#proposer-selects-block-timestamp) - * [Proposer waits](#proposer-waits) - * [Changes to the propose step timeout](#changes-to-the-propose-step-timeout) - * [Changes to validation rules](#changes-to-validation-rules) - * [Proposal timestamp validation](#proposal-timestamp-validation) - * [Block timestamp validation](#block-timestamp-validation) - * [Changes to the prevote step](#changes-to-the-prevote-step) - * [Changes to the precommit step](#changes-to-the-precommit-step) - * [Changes to locking a block](#changes-to-locking-a-block) - * [Remove voteTime Completely](#remove-votetime-completely) -* [Future Improvements](#future-improvements) -* [Consequences](#consequences) - * [Positive](#positive) - * [Neutral](#neutral) - * [Negative](#negative) -* [References](#references) - ## Changelog - July 15 2021: Created by @williambanfield - Aug 4 2021: Draft completed by @williambanfield - Aug 5 2021: Draft updated to include data structure changes by @williambanfield - Aug 20 2021: Language edits completed by @williambanfield + - Oct 25 2021: Update the ADR to match updated spec from @cason by @williambanfield + - Nov 10 2021: Additional language updates by @williambanfield per feedback from @cason ## Status @@ -68,7 +36,7 @@ However, their currently known Unix time may be greatly divergent from the block The proposer-based timestamps specification suggests an alternative approach for producing block timestamps that remedies these issues. Proposer-based timestamps alter the current mechanism for producing block timestamps in two main ways: -1. The block proposer is amended to offer up its currently known Unix time as the timestamp for the next block. +1. The block proposer is amended to offer up its currently known Unix time as the timestamp for the next block instead of the `BFTTime`. 1. Correct validators only approve the proposed block timestamp if it is close enough to their own currently known Unix time. The result of these changes is a more meaningful timestamp that cannot be controlled by `<= 2/3` of the validator voting power. @@ -111,45 +79,9 @@ Implementing proposer-based timestamps will require a few changes to Tendermint These changes will be to the following components: * The `internal/consensus/` package. * The `state/` package. -* The `Vote`, `CommitSig`, `Commit` and `Header` types. +* The `Vote`, `CommitSig` and `Header` types. * The consensus parameters. -### Proposal Timestamp and Block Timestamp - -This design discusses two timestamps: (1) The timestamp in the block and (2) the timestamp in the proposal message. -The existence and use of both of these timestamps can get a bit confusing, so some background is given here to clarify their uses. - -The [proposal message currently has a timestamp](https://github.com/tendermint/tendermint/blob/e5312942e30331e7c42b75426da2c6c9c00ae476/types/proposal.go#L31). -This timestamp is the current Unix time known to the proposer when sending the `Proposal` message. -This timestamp is not currently used as part of consensus. -The changes in this ADR will begin using the proposal message timestamp as part of consensus. -We will refer to this as the **proposal timestamp** throughout this design. - -The block has a timestamp field [in the header](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/types/block.go#L338). -This timestamp is set currently as part of Tendermint’s `BFTtime` algorithm. -It is set when a block is proposed and it is checked by the validators when they are deciding to prevote the block. -This field will continue to be used but the logic for creating and validating this timestamp will change. -We will refer to this as the **block timestamp** throughout this design. - -At a high level, the proposal timestamp from height `H` is used as the block timestamp at height `H+1`. -The following image shows this relationship. -The rest of this document describes the code changes that will make this possible. - -![](./img/pbts-message.png) - -### Saving the timestamp across heights - -Currently, `BFTtime` uses `LastCommit` to construct the block timestamp. -The `LastCommit` is created at height `H-1` and is saved in the state store to be included in the block at height `H`. -`BFTtime` takes the weighted median of the timestamps in `LastCommit.CommitSig` to build the timestamp for height `H`. - -For proposer-based timestamps, the `LastCommit.CommitSig` timestamps will no longer be used to build the timestamps for height `H`. -Instead, the proposal timestamp from height `H-1` will become the block timestamp for height `H`. -To enable this, we will add a `Timestamp` field to the `Commit` struct. -This field will be populated at each height with the proposal timestamp decided on at the previous height. -This timestamp will also be saved with the rest of the commit in the state store [when the commit is finalized](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/state.go#L1611) so that it can be recovered if Tendermint crashes. -Changes to the `CommitSig` and `Commit` struct are detailed below. - ### Changes to `CommitSig` The [CommitSig](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L604) struct currently contains a timestamp. @@ -167,32 +99,14 @@ type CommitSig struct { } ``` -### Changes to `Commit` - -The [Commit](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L746) struct does not currently contain a timestamp. -The timestamps in the `Commit.CommitSig` entries are currently used to build the block timestamp. -With these timestamps removed, the commit time will instead be stored in the `Commit` struct. - -`Commit` will be updated as follows. - -```diff -type Commit struct { - Height int64 `json:"height"` - Round int32 `json:"round"` -++ Timestamp time.Time `json:"timestamp"` - BlockID BlockID `json:"block_id"` - Signatures []CommitSig `json:"signatures"` -} -``` - ### Changes to `Vote` messages `Precommit` and `Prevote` messages use a common [Vote struct](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/vote.go#L50). This struct currently contains a timestamp. This timestamp is set using the [voteTime](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/state.go#L2241) function and therefore vote times correspond to the current Unix time known to the validator. For precommits, this timestamp is used to construct the [CommitSig that is included in the block in the LastCommit](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/block.go#L754) field. -For prevotes, this field is unused. -Proposer-based timestamps will use the [RoundState.Proposal](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/internal/consensus/types/round_state.go#L76) timestamp to construct the `signedBytes` `CommitSig`. +For prevotes, this field is currently unused. +Proposer-based timestamps will use the timestamp that the proposer sets into the block and will therefore no longer require that a timestamp be included in the vote messages. This timestamp is therefore no longer useful and will be dropped. `Vote` will be updated as follows: @@ -250,58 +164,28 @@ type TimestampParams struct { } ``` -### Changes to `Header` - -The [Header](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L338) struct currently contains a timestamp. -This timestamp is set as the `BFTtime` derived from the block's `LastCommit.CommitSig` timestamps. -This timestamp will no longer be derived from the `LastCommit.CommitSig` timestamps and will instead be included directly into the block's `LastCommit`. -This timestamp will therfore be identical in both the `Header` and the `LastCommit`. -To clarify that the timestamp in the header corresponds to the `LastCommit`'s time, we will rename this timestamp field to `last_timestamp`. - -`Header` will be updated as follows: - -```diff -type Header struct { - // basic block info - Version version.Consensus `json:"version"` - ChainID string `json:"chain_id"` - Height int64 `json:"height"` --- Time time.Time `json:"time"` -++ LastTimestamp time.Time `json:"last_timestamp"` - - // prev block info - LastBlockID BlockID `json:"last_block_id"` - - // hashes of block data - LastCommitHash tmbytes.HexBytes `json:"last_commit_hash"` - DataHash tmbytes.HexBytes `json:"data_hash"` - - // hashes from the app output from the prev block - ValidatorsHash tmbytes.HexBytes `json:"validators_hash"` - NextValidatorsHash tmbytes.HexBytes `json:"next_validators_hash"` - ConsensusHash tmbytes.HexBytes `json:"consensus_hash"` - AppHash tmbytes.HexBytes `json:"app_hash"` - - // root hash of all results from the txs from the previous block - LastResultsHash tmbytes.HexBytes `json:"last_results_hash"` - - // consensus info - EvidenceHash tmbytes.HexBytes `json:"evidence_hash"` - ProposerAddress Address `json:"proposer_address"` -} -``` - ### Changes to the block proposal step -#### Proposer selects proposal timestamp - -The proposal logic already [sets the Unix time known to the validator](https://github.com/tendermint/tendermint/blob/2abfe20114ee3bb3adfee817589033529a804e4d/types/proposal.go#L44) into the `Proposal` message. -This satisfies the proposer-based timestamp specification and does not need to change. - #### Proposer selects block timestamp -The proposal timestamp that was decided in height `H-1` will be stored in the `State` struct's in the `RoundState.LastCommit` field. -The proposer will select this timestamp to use as the block timestamp at height `H`. +Tendermint currently uses the `BFTTime` algorithm to produce the block's `Header.Timestamp`. +The [proposal logic](https://github.com/tendermint/tendermint/blob/68ca65f5d79905abd55ea999536b1a3685f9f19d/internal/state/state.go#L269) sets the weighted median of the times in the `LastCommit.CommitSigs` as the proposed block's `Header.Timestamp`. + +In proposer-based timestamps, the proposer will still set a timestamp into the `Header.Timestamp`. +The timestamp the proposer sets into the `Header` will change depending on if the block has previously received a [polka](https://github.com/tendermint/tendermint/blob/053651160f496bb44b107a434e3e6482530bb287/docs/introduction/what-is-tendermint.md#consensus-overview) or not. + +#### Proposal of a block that has not previously received a polka + +If a proposer is proposing a new block, then it will set the Unix time currently known to the proposer into the `Header.Timestamp` field. +The proposer will also set this same timestamp into the `Timestamp` field of the `Proposal` message that it issues. + +#### Re-proposal of a block that has previously received a polka + +If a proposer is re-proposing a block that has previously received a polka on the network, then the proposer does not update the `Header.Timestamp` of that block. +Instead, the proposer simply re-proposes the exact same block. +This way, the proposed block has the exact same block ID as the previously proposed block and the validators that have already received that block do not need to attempt to receive it again. + +The proposer will set the re-proposed block's `Header.Timestamp` as the `Proposal` message's `Timestamp`. #### Proposer waits @@ -310,72 +194,94 @@ In `BFTTime`, if a validator’s clock was behind, the [validator added 1 millis A goal of adding proposer-based timestamps is to enforce some degree of clock synchronization, so having a mechanism that completely ignores the Unix time of the validator time no longer works. Validator clocks will not be perfectly in sync. -Therefore, the proposer’s current known Unix time may be less than the `LastCommit.Timestamp`. -If the proposer’s current known Unix time is less than the `LastCommit.Timestamp`, the proposer will sleep until its known Unix time exceeds `LastCommit.Timestamp`. +Therefore, the proposer’s current known Unix time may be less than the previous block's `Header.Time`. +If the proposer’s current known Unix time is less than the previous block's `Header.Time`, the proposer will sleep until its known Unix time exceeds it. This change will require amending the [defaultDecideProposal](https://github.com/tendermint/tendermint/blob/822893615564cb20b002dd5cf3b42b8d364cb7d9/internal/consensus/state.go#L1180) method. -This method should now block until the proposer’s time is greater than `LastCommit.Timestamp`. +This method should now schedule a timeout that fires when the proposer’s time is greater than the previous block's `Header.Time`. +When the timeout fires, the proposer will finally issue the `Proposal` message. #### Changes to the propose step timeout Currently, a validator waiting for a proposal will proceed past the propose step if the configured propose timeout is reached and no proposal is seen. -Proposer-based timestamps requires changing this timeout logic. +Proposer-based timestamps requires changing this timeout logic. -The proposer will now wait until its current known Unix time exceeds the `LastCommit.Timestamp` to propose a block. +The proposer will now wait until its current known Unix time exceeds the previous block's `Header.Time` to propose a block. The validators must now take this and some other factors into account when deciding when to timeout the propose step. Specifically, the propose step timeout must also take into account potential inaccuracy in the validator’s clock and in the clock of the proposer. Additionally, there may be a delay communicating the proposal message from the proposer to the other validators. -Therefore, validators waiting for a proposal must wait until after the `LastCommit.Timestamp` before timing out. -To account for possible inaccuracy in its own clock, inaccuracy in the proposer’s clock, and message delay, validators waiting for a proposal will wait until `LastCommit.Timesatmp + 2*ACCURACY + MSGDELAY`. +Therefore, validators waiting for a proposal must wait until after the previous block's `Header.Time` before timing out. +To account for possible inaccuracy in its own clock, inaccuracy in the proposer’s clock, and message delay, validators waiting for a proposal will wait until the previous block's `Header.Time + 2*ACCURACY + MSGDELAY`. The spec defines this as `waitingTime`. The [propose step’s timeout is set in enterPropose](https://github.com/tendermint/tendermint/blob/822893615564cb20b002dd5cf3b42b8d364cb7d9/internal/consensus/state.go#L1108) in `state.go`. `enterPropose` will be changed to calculate waiting time using the new consensus parameters. The timeout in `enterPropose` will then be set as the maximum of `waitingTime` and the [configured proposal step timeout](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/config/config.go#L1013). -### Changes to validation rules +### Changes to proposal validation rules -The rules for validating that a proposal is valid will need slight modification to implement proposer-based timestamps. -Specifically, we will change the validation logic to ensure that the proposal timestamp is `timely` and we will modify the way the block timestamp is validated as well. +The rules for validating a proposed block will be modification to implement proposer-based timestamps. +We will change the validation logic to ensure that a proposal is `timely`. -#### Proposal timestamp validation +Per the proposer-based timestamps spec, `timely` only needs to be checked if a block has not received a +2/3 majority of `Prevotes` in a round. +If a block previously received a +2/3 majority of prevotes in a previous round, then +2/3 of the voting power considered the block's timestamp near enough to their own currently known Unix time in that round. -Adding proposal timestamp validation is a reasonably straightforward change. -The current Unix time known to the proposer is already included in the [Proposal message](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/types/proposal.go#L31). -Once the proposal is received, the complete message is stored in the `RoundState.Proposal` field. -The precommit and prevote validation logic does not currently use this timestamp. -This validation logic will be updated to check that the proposal timestamp is within `PRECISION` of the current Unix time known to the validators. -If the timestamp is not within `PRECISION` of the current Unix time known to the validator, the proposal will not be considered it valid. -The validator will also check that the proposal time is greater than the block timestamp from the previous height. +The validation logic will be updated to check `timely` for blocks that did not previously receive +2/3 prevotes in a round. +Receiving +2/3 prevotes in a round is frequently referred to as a 'polka' and we will use this term for simplicity. -If no valid proposal is received by the proposal timeout, the validator will prevote nil. -This is identical to the current logic. +#### Current timestamp validation logic -#### Block timestamp validation +To provide a better understanding of the changes needed to timestamp validation, we will first detail how timestamp validation works currently in Tendermint. The [validBlock function](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/state/validation.go#L14) currently [validates the proposed block timestamp in three ways](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/state/validation.go#L118). First, the validation logic checks that this timestamp is greater than the previous block’s timestamp. -Additionally, it validates that the block timestamp is correctly calculated as the weighted median of the timestamps in the [block’s LastCommit](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/block.go#L48). -Finally, the logic also authenticates the timestamps in the `LastCommit`. -The cryptographic signature in each `CommitSig` is created by signing a hash of fields in the block with the validator’s private key. -One of the items in this `signedBytes` hash is derived from the timestamp in the `CommitSig`. -To authenticate the `CommitSig` timestamp, the validator builds a hash of fields that includes the timestamp and checks this hash against the provided signature. + +Second, it validates that the block timestamp is correctly calculated as the weighted median of the timestamps in the [block’s LastCommit](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/block.go#L48). + +Finally, the validation logic authenticates the timestamps in the `LastCommit.CommitSig`. +The cryptographic signature in each `CommitSig` is created by signing a hash of fields in the block with the voting validator’s private key. +One of the items in this `signedBytes` hash is the timestamp in the `CommitSig`. +To authenticate the `CommitSig` timestamp, the validator authenticating votes builds a hash of fields that includes the `CommitSig` timestamp and checks this hash against the signature. This takes place in the [VerifyCommit function](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/validation.go#L25). -The logic to validate that the block timestamp is greater than the previous block’s timestamp also works for proposer-based timestamps and will not change. +#### Remove unused timestamp validation logic `BFTTime` validation is no longer applicable and will be removed. -Validators will no longer check that the block timestamp is a weighted median of `LastCommit` timestamps. -This will mean removing the call to [MedianTime in the validateBlock function](https://github.com/tendermint/tendermint/blob/4db71da68e82d5cb732b235eeb2fd69d62114b45/state/validation.go#L117). +This means that validators will no longer check that the block timestamp is a weighted median of `LastCommit` timestamps. +Specifically, we will remove the call to [MedianTime in the validateBlock function](https://github.com/tendermint/tendermint/blob/4db71da68e82d5cb732b235eeb2fd69d62114b45/state/validation.go#L117). The `MedianTime` function can be completely removed. -The `LastCommit` timestamps may also be removed. -The `signedBytes` validation logic in `VerifyCommit` will be slightly altered. -The `CommitSig`s in the block’s `LastCommit` will no longer each contain a timestamp. -The validation logic will instead include the `LastCommit.Timestamp` in the hash of fields for generating the `signedBytes`. -The cryptographic signatures included in the `CommitSig`s will then be checked against this `signedBytes` hash to authenticate the timestamp. -Specifically, the `VerifyCommit` function will be updated to use this new timestamp. +Since `CommitSig`s will no longer contain a timestamp, the validator authenticating a commit will no longer include the `CommitSig` timestamp in the hash of fields it builds to check against the cryptographic signature. + +#### Timestamp validation when a block has not received a polka + +The [POLRound](https://github.com/tendermint/tendermint/blob/68ca65f5d79905abd55ea999536b1a3685f9f19d/types/proposal.go#L29) in the `Proposal` message indicates which round the block received a polka. +A negative value in the `POLRound` field indicates that the block has not previously been proposed on the network. +Therefore the validation logic will check for timely when `POLRound < 0`. + +When a validator receives a `Proposal` message, the validator will check that the `Proposal.Timestamp` is at most `PRECISION` greater than the current Unix time known to the validator, and at minimum `PRECISION + MSGDELAY` less than the current Unix time known to the validator. +If the timestamp is not within these bounds, the proposed block will not be considered `timely`. + +Once a full block matching the `Proposal` message is received, the validator will also check that the timestamp in the `Header.Timestamp` of the block matches this `Proposal.Timestamp`. +Using the `Proposal.Timestamp` to check `timely` allows for the `MSGDELAY` parameter to be more finely tuned since `Proposal` messages do not change sizes and are therefore faster to gossip than full blocks across the network. + +A validator will also check that the proposed timestamp is greater than the timestamp of the block for the previous height. +If the timestamp is not greater than the previous block's timestamp, the block will not be considered valid, which is the same as the current logic. + +#### Timestamp validation when a block has received a polka + +When a block is re-proposed that has already received a +2/3 majority of `Prevote`s on the network, the `Proposal` message for the re-proposed block is created with a `POLRound` that is `>= 0`. +A validator will not check that the `Proposal` is `timely` if the propose message has a non-negative `POLRound`. +If the `POLRound` is non-negative, each validator will simply ensure that it received the `Prevote` messages for the proposed block in the round indicated by `POLRound`. + +If the validator did not receive `Prevote` messages for the proposed block in `POLRound`, then it will prevote nil. +Validators already check that +2/3 prevotes were seen in `POLRound`, so this does not represent a change to the prevote logic. + +A validator will also check that the proposed timestamp is greater than the timestamp of the block for the previous height. +If the timestamp is not greater than the previous block's timestamp, the block will not be considered valid, which is the same as the current logic. + +Additionally, this validation logic can be updated to check that the `Proposal.Timestamp` matches the `Header.Timestamp` of the proposed block, but it is less relevant since checking that votes were received is sufficient to ensure the block timestamp is correct. ### Changes to the prevote step @@ -383,26 +289,14 @@ Currently, a validator will prevote a proposal in one of three cases: * Case 1: Validator has no locked block and receives a valid proposal. * Case 2: Validator has a locked block and receives a valid proposal matching its locked block. -* Case 3: Validator has a locked block, sees a valid proposal not matching its locked block but sees +â…” prevotes for the new proposal’s block. +* Case 3: Validator has a locked block, sees a valid proposal not matching its locked block but sees +â…” prevotes for the proposal’s block, either in the current round or in a round greater than or equal to the round in which it locked its locked block. The only change we will make to the prevote step is to what a validator considers a valid proposal as detailed above. ### Changes to the precommit step The precommit step will not require much modification. -Its proposal validation rules will change in the same ways that validation will change in the prevote step. - -### Changes to locking a block -When a validator receives a valid proposed block and +2/3 prevotes for that block, it stores the block as its ‘locked block’ in the [RoundState.ValidBlock](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/types/round_state.go#L85) field. -In each subsequent round it will prevote that block. -A validator will only change which block it has locked if it sees +2/3 prevotes for a different block. - -This mechanism will remain largely unchanged. -The only difference is the addition of proposal timestamp validation. -A validator will prevote nil in a round if the proposal message it received is not `timely`. -Prevoting nil in this case will not cause a validator to ‘unlock’ its locked block. -This difference is an incidental result of the changes to prevote validation. -It is included in this design for completeness and to clarify that no additional changes will be made to block locking. +Its proposal validation rules will change in the same ways that validation will change in the prevote step with the exception of the `timely` check: precommit validation will never check that the timestamp is `timely`. ### Remove voteTime Completely diff --git a/docs/networks/README.md b/docs/networks/README.md deleted file mode 100644 index 8528f44ed..000000000 --- a/docs/networks/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -order: 1 -parent: - title: Networks - order: 6 ---- - -# Overview - -Use [Docker Compose](./docker-compose.md) to spin up Tendermint testnets on your -local machine. - -Use [Terraform and Ansible](./terraform-and-ansible.md) to deploy Tendermint -testnets to the cloud. - -See the `tendermint testnet --help` command for more help initializing testnets. diff --git a/docs/nodes/README.md b/docs/nodes/README.md index 9be6febf0..fd9056e0d 100644 --- a/docs/nodes/README.md +++ b/docs/nodes/README.md @@ -1,7 +1,7 @@ --- order: 1 parent: - title: Nodes + title: Node Operators order: 4 --- diff --git a/docs/nodes/configuration.md b/docs/nodes/configuration.md index ffdbaffa2..0c11df6f7 100644 --- a/docs/nodes/configuration.md +++ b/docs/nodes/configuration.md @@ -16,8 +16,7 @@ the parameters set with their default values. It will look something like the file below, however, double check by inspecting the `config.toml` created with your version of `tendermint` installed: -```toml -# This is a TOML config file. +```toml# This is a TOML config file. # For more information, see https://github.com/toml-lang/toml # NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or @@ -34,14 +33,14 @@ like the file below, however, double check by inspecting the proxy-app = "tcp://127.0.0.1:26658" # A custom human readable name for this node -moniker = "anonymous" +moniker = "ape" # Mode of Node: full | validator | seed (default: "validator") # * validator node (default) # - all reactors # - with priv_validator_key.json, priv_validator_state.json -# * full node +# * full node # - all reactors # - No priv_validator_key.json, priv_validator_state.json # * seed node @@ -49,6 +48,11 @@ moniker = "anonymous" # - No priv_validator_key.json, priv_validator_state.json mode = "validator" +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast-sync = true + # Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb # * goleveldb (github.com/syndtr/goleveldb - most popular implementation) # - pure go @@ -84,16 +88,6 @@ log-format = "plain" # Path to the JSON file containing the initial validator set and other meta data genesis-file = "config/genesis.json" -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv-validator-key-file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv-validator-state-file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv-validator-laddr = "" - # Path to the JSON file containing the private key to use for node authentication in the p2p protocol node-key-file = "config/node_key.json" @@ -105,6 +99,33 @@ abci = "socket" filter-peers = false +####################################################### +### Priv Validator Configuration ### +####################################################### +[priv-validator] + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +key-file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +state-file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for Tendermint to listen on for +# connections from an external PrivValidator process +# when the listenAddr is prefixed with grpc instead of tcp it will use the gRPC Client +laddr = "" + +# Path to the client certificate generated while creating needed files for secure connection. +# If a remote validator address is provided but no certificate, the connection will be insecure +client-certificate-file = "" + +# Client key generated while creating certificates for secure connection +validator-client-key-file = "" + +# Path to the Root Certificate Authority used to sign both client and server certificates +certificate-authority = "" + + ####################################################################### ### Advanced Configuration Options ### ####################################################################### @@ -130,6 +151,7 @@ cors-allowed-headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", # TCP or UNIX socket address for the gRPC server to listen on # NOTE: This server only supports /broadcast_tx_commit +# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36. grpc-laddr = "" # Maximum number of simultaneous connections. @@ -139,9 +161,10 @@ grpc-laddr = "" # 0 - unlimited. # Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} # 1024 - 40 - 10 - 50 = 924 = ~900 +# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36. grpc-max-open-connections = 900 -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool unsafe = false # Maximum number of simultaneous connections (including WebSocket). @@ -198,18 +221,31 @@ pprof-laddr = "" ####################################################### [p2p] +# Select the p2p internal queue +queue-type = "priority" + # Address to listen for incoming connections laddr = "tcp://0.0.0.0:26656" # Address to advertise to peers for them to dial # If empty, will use the same port as the laddr, # and will introspect on the listener or use UPnP -# to figure out the address. +# to figure out the address. ip and port are required +# example: 159.89.10.97:26656 external-address = "" # Comma separated list of seed nodes to connect to +# We only use these if we can’t connect to peers in the addrbook +# NOTE: not used by the new PEX reactor. Please use BootstrapPeers instead. +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 seeds = "" +# Comma separated list of peers to be added to the peer store +# on startup. Either BootstrapPeers or PersistentPeers are +# needed for peer discovery +bootstrap-peers = "" + # Comma separated list of nodes to keep persistent connections to persistent-peers = "" @@ -217,6 +253,8 @@ persistent-peers = "" upnp = false # Path to address book +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 addr-book-file = "config/addrbook.json" # Set true for strict address routability rules @@ -224,9 +262,15 @@ addr-book-file = "config/addrbook.json" addr-book-strict = true # Maximum number of inbound peers +# +# TODO: Remove once p2p refactor is complete in favor of MaxConnections. +# ref: https://github.com/tendermint/tendermint/issues/5670 max-num-inbound-peers = 40 # Maximum number of outbound peers to connect to, excluding persistent peers +# +# TODO: Remove once p2p refactor is complete in favor of MaxConnections. +# ref: https://github.com/tendermint/tendermint/issues/5670 max-num-outbound-peers = 10 # Maximum number of connections (inbound and outbound). @@ -236,27 +280,40 @@ max-connections = 64 max-incoming-connection-attempts = 100 # List of node IDs, to which a connection will be (re)established ignoring any existing limits +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 unconditional-peer-ids = "" # Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 persistent-peers-max-dial-period = "0s" # Time to wait before flushing messages out on the connection +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 flush-throttle-timeout = "100ms" # Maximum size of a message packet payload, in bytes -max-packet-msg-payload-size = 1024 +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 +max-packet-msg-payload-size = 1400 # Rate at which packets can be sent, in bytes/second +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 send-rate = 5120000 # Rate at which packets can be received, in bytes/second +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 recv-rate = 5120000 # Set true to enable the peer-exchange reactor pex = true # Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +# Warning: IPs will be exposed at /net_info, for more information https://github.com/tendermint/tendermint/issues/3055 private-peer-ids = "" # Toggle to disable guard against peers connecting from the same ip. @@ -349,8 +406,15 @@ discovery-time = "15s" # Will create a new, randomly named directory within, and remove it when done. temp-dir = "" +# The timeout duration before re-requesting a chunk, possibly from a different +# peer (default: 15 seconds). +chunk-request-timeout = "15s" + +# The number of concurrent chunk and block fetchers to run (default: 4). +fetchers = "4" + ####################################################### -### BlockSync Configuration Connections ### +### Block Sync Configuration Connections ### ####################################################### [blocksync] @@ -410,7 +474,8 @@ peer-query-maj23-sleep-duration = "2s" ####################################################### [tx-index] -# What indexer to use for transactions +# The backend database list to back the indexer. +# If list contains "null" or "", meaning no indexer service will be used. # # The application will set which txs to index. In some cases a node operator will be able # to decide which txs to index based on configuration set in the application. @@ -418,8 +483,13 @@ peer-query-maj23-sleep-duration = "2s" # Options: # 1) "null" # 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" +# 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. +indexer = ["kv"] + +# The PostgreSQL connection configuration, the connection format: +# postgresql://:@:/? +psql-conn = "" ####################################################### ### Instrumentation Configuration Options ### @@ -520,10 +590,61 @@ This section will cover settings within the p2p section of the `config.toml`. - `external-address` = is the address that will be advertised for other nodes to use. We recommend setting this field with your public IP and p2p port. - > We recommend setting an external address. When used in a private network, Tendermint Core currently doesn't advertise the node's public address. There is active and ongoing work to improve the P2P system, but this is a helpful workaround for now. -- `seeds` = is a list of comma separated seed nodes that you will connect upon a start and ask for peers. A seed node is a node that does not participate in consensus but only helps propagate peers to nodes in the networks - `persistent-peers` = is a list of comma separated peers that you will always want to be connected to. If you're already connected to the maximum number of peers, persistent peers will not be added. -- `max-num-inbound-peers` = is the maximum number of peers you will accept inbound connections from at one time (where they dial your address and initiate the connection). -- `max-num-outbound-peers` = is the maximum number of peers you will initiate outbound connects to at one time (where you dial their address and initiate the connection). -- `unconditional-peer-ids` = is similar to `persistent-peers` except that these peers will be connected to even if you are already connected to the maximum number of peers. This can be a validator node ID on your sentry node. - `pex` = turns the peer exchange reactor on or off. Validator node will want the `pex` turned off so it would not begin gossiping to unknown peers on the network. PeX can also be turned off for statically configured networks with fixed network connectivity. For full nodes on open, dynamic networks, it should be turned on. - `private-peer-ids` = is a comma-separated list of node ids that will _not_ be exposed to other peers (i.e., you will not tell other peers about the ids in this list). This can be filled with a validator's node id. + +Recently the Tendermint Team conducted a refactor of the p2p layer. This lead to multiple config paramters being deprecated and/or replaced. + +We will cover the new and deprecated parameters below. +### New Parameters + +There are three new parameters, which are enabled if use-legacy is set to false. + +- `queue-type` = sets a type of queue to use in the p2p layer. There are three options available `fifo`, `priority` and `wdrr`. The default is priority +- `bootstrap-peers` = is a list of comma seperated peers which will be used to bootstrap the address book. +- `max-connections` = is the max amount of allowed inbound and outbound connections. +### Deprecated Parameters + +> Note: For Tendermint 0.35, there are two p2p implementations. The old version is used by deafult with the deprecated fields. The new implementation uses different config parameters, explained above. + +- `max-num-inbound-peers` = is the maximum number of peers you will accept inbound connections from at one time (where they dial your address and initiate the connection). *This was replaced by `max-connections`* +- `max-num-outbound-peers` = is the maximum number of peers you will initiate outbound connects to at one time (where you dial their address and initiate the connection).*This was replaced by `max-connections`* +- `unconditional-peer-ids` = is similar to `persistent-peers` except that these peers will be connected to even if you are already connected to the maximum number of peers. This can be a validator node ID on your sentry node. *Deprecated* +- `seeds` = is a list of comma separated seed nodes that you will connect upon a start and ask for peers. A seed node is a node that does not participate in consensus but only helps propagate peers to nodes in the networks *Deprecated, replaced by bootstrap peers* + +## Indexing Settings + +Operators can configure indexing via the `[tx_index]` section. The `indexer` +field takes a series of supported indexers. If `null` is included, indexing will +be turned off regardless of other values provided. + +### Supported Indexers + +#### KV + +The `kv` indexer type is an embedded key-value store supported by the main +underlying Tendermint database. Using the `kv` indexer type allows you to query +for block and transaction events directly against Tendermint's RPC. However, the +query syntax is limited and so this indexer type might be deprecated or removed +entirely in the future. + +#### PostgreSQL + +The `psql` indexer type allows an operator to enable block and transaction event +indexing by proxying it to an external PostgreSQL instance allowing for the events +to be stored in relational models. Since the events are stored in a RDBMS, operators +can leverage SQL to perform a series of rich and complex queries that are not +supported by the `kv` indexer type. Since operators can leverage SQL directly, +searching is not enabled for the `psql` indexer type via Tendermint's RPC -- any +such query will fail. + +Note, the SQL schema is stored in `state/indexer/sink/psql/schema.sql` and operators +must explicitly create the relations prior to starting Tendermint and enabling +the `psql` indexer type. + +Example: + +```shell +$ psql ... -f state/indexer/sink/psql/schema.sql +``` diff --git a/docs/nodes/metrics.md b/docs/nodes/metrics.md index baaa9a812..6589e044a 100644 --- a/docs/nodes/metrics.md +++ b/docs/nodes/metrics.md @@ -20,6 +20,7 @@ The following metrics are available: | **Name** | **Type** | **Tags** | **Description** | | -------------------------------------- | --------- | ------------- | ---------------------------------------------------------------------- | +| abci_connection_method_timing | Histogram | method, type | Timings for each of the ABCI methods | | consensus_height | Gauge | | Height of the chain | | consensus_validators | Gauge | | Number of validators | | consensus_validators_power | Gauge | | Total voting power of all validators | @@ -55,6 +56,16 @@ The following metrics are available: Percentage of missing + byzantine validators: -```md -((consensus\_byzantine\_validators\_power + consensus\_missing\_validators\_power) / consensus\_validators\_power) * 100 +```prometheus +((consensus_byzantine_validators_power + consensus_missing_validators_power) / consensus_validators_power) * 100 +``` + +Rate at which the application is responding to each ABCI method call. +``` +sum(rate(tendermint_abci_connection_method_timing_count[5m])) by (method) +``` + +The 95th percentile response time for the application to the `deliver_tx` ABCI method call. +``` +histogram_quantile(0.95, sum by(le) (rate(tendermint_abci_connection_method_timing_bucket{method="deliver_tx"}[5m]))) ``` diff --git a/docs/package-lock.json b/docs/package-lock.json index 1c362b60a..c6f21bd6b 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -1,12541 +1,8 @@ { "name": "docs", "version": "1.0.0", - "lockfileVersion": 2, + "lockfileVersion": 1, "requires": true, - "packages": { - "": { - "version": "1.0.0", - "license": "ISC", - "dependencies": { - "vuepress-theme-cosmos": "^1.0.182" - }, - "devDependencies": { - "watchpack": "^2.2.0" - } - }, - "node_modules/@algolia/cache-browser-local-storage": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.8.6.tgz", - "integrity": "sha512-Bam7otzjIEgrRXWmk0Amm1+B3ROI5dQnUfJEBjIy0YPM0kMahEoJXCw6160tGKxJLl1g6icoC953nGshQKO7cA==", - "dependencies": { - "@algolia/cache-common": "4.8.6" - } - }, - "node_modules/@algolia/cache-common": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.8.6.tgz", - "integrity": "sha512-eGQlsXU5G7n4RvV/K6qe6lRAeL6EKAYPT3yZDBjCW4pAh7JWta+77a7BwUQkTqXN1MEQWZXjex3E4z/vFpzNrg==" - }, - "node_modules/@algolia/cache-in-memory": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.8.6.tgz", - "integrity": "sha512-kbJrvCFANxL/l5Pq1NFyHLRphKDwmqcD/OJga0IbNKEulRGDPkt1+pC7/q8d2ikP12adBjLLg2CVias9RJpIaw==", - "dependencies": { - "@algolia/cache-common": "4.8.6" - } - }, - "node_modules/@algolia/client-account": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.8.6.tgz", - "integrity": "sha512-FQVJE/BgCb78jtG7V0r30sMl9P5JKsrsOacGtGF2YebqI0YF25y8Z1nO39lbdjahxUS3QkDw2d0P2EVMj65g2Q==", - "dependencies": { - "@algolia/client-common": "4.8.6", - "@algolia/client-search": "4.8.6", - "@algolia/transporter": "4.8.6" - } - }, - "node_modules/@algolia/client-analytics": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.8.6.tgz", - "integrity": "sha512-ZBYFUlzNaWDFtt0rYHI7xbfVX0lPWU9lcEEXI/BlnkRgEkm247H503tNatPQFA1YGkob52EU18sV1eJ+OFRBLA==", - "dependencies": { - "@algolia/client-common": "4.8.6", - "@algolia/client-search": "4.8.6", - "@algolia/requester-common": "4.8.6", - "@algolia/transporter": "4.8.6" - } - }, - "node_modules/@algolia/client-common": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.8.6.tgz", - "integrity": "sha512-8dI+K3Nvbes2YRZm2LY7bdCUD05e60BhacrMLxFuKxnBGuNehME1wbxq/QxcG1iNFJlxLIze5TxIcNN3+pn76g==", - "dependencies": { - "@algolia/requester-common": "4.8.6", - "@algolia/transporter": "4.8.6" - } - }, - "node_modules/@algolia/client-recommendation": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/client-recommendation/-/client-recommendation-4.8.6.tgz", - "integrity": "sha512-Kg8DpjwvaWWujNx6sAUrSL+NTHxFe/UNaliCcSKaMhd3+FiPXN+CrSkO0KWR7I+oK2qGBTG/2Y0BhFOJ5/B/RA==", - "dependencies": { - "@algolia/client-common": "4.8.6", - "@algolia/requester-common": "4.8.6", - "@algolia/transporter": "4.8.6" - } - }, - "node_modules/@algolia/client-search": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.8.6.tgz", - "integrity": "sha512-vXLS6umL/9G3bwqc6pkrS9K5/s8coq55mpfRARL+bs0NsToOf77WSTdwzlxv/KdbVF7dHjXgUpBvJ6RyR4ZdAw==", - "dependencies": { - "@algolia/client-common": "4.8.6", - "@algolia/requester-common": "4.8.6", - "@algolia/transporter": "4.8.6" - } - }, - "node_modules/@algolia/logger-common": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.8.6.tgz", - "integrity": "sha512-FMRxZGdDxSzd0/Mv0R1021FvUt0CcbsQLYeyckvSWX8w+Uk4o0lcV6UtZdERVR5XZsGOqoXLMIYDbR2vkbGbVw==" - }, - "node_modules/@algolia/logger-console": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.8.6.tgz", - "integrity": "sha512-TYw9lwUCjvApC6Z0zn36T6gkCl7hbfJmnU+Z/D8pFJ3Yp7lz06S3oWGjbdrULrYP1w1VOhjd0X7/yGNsMhzutQ==", - "dependencies": { - "@algolia/logger-common": "4.8.6" - } - }, - "node_modules/@algolia/requester-browser-xhr": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.8.6.tgz", - "integrity": "sha512-omh6uJ3CJXOmcrU9M3/KfGg8XkUuGJGIMkqEbkFvIebpBJxfs6TVs0ziNeMFAcAfhi8/CGgpLbDSgJtWdGQa6w==", - "dependencies": { - "@algolia/requester-common": "4.8.6" - } - }, - "node_modules/@algolia/requester-common": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.8.6.tgz", - "integrity": "sha512-r5xJqq/D9KACkI5DgRbrysVL5DUUagikpciH0k0zjBbm+cXiYfpmdflo/h6JnY6kmvWgjr/4DoeTjKYb/0deAQ==" - }, - "node_modules/@algolia/requester-node-http": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.8.6.tgz", - "integrity": "sha512-TB36OqTVOKyHCOtdxhn/IJyI/NXi/BWy8IEbsiWwwZWlL79NWHbetj49jXWFolEYEuu8PgDjjZGpRhypSuO9XQ==", - "dependencies": { - "@algolia/requester-common": "4.8.6" - } - }, - "node_modules/@algolia/transporter": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.8.6.tgz", - "integrity": "sha512-NRb31J0TP7EPoVMpXZ4yAtr61d26R8KGaf6qdULknvq5sOVHuuH4PwmF08386ERfIsgnM/OBhl+uzwACdCIjSg==", - "dependencies": { - "@algolia/cache-common": "4.8.6", - "@algolia/logger-common": "4.8.6", - "@algolia/requester-common": "4.8.6" - } - }, - "node_modules/@babel/code-frame": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.13.tgz", - "integrity": "sha512-HV1Cm0Q3ZrpCR93tkWOYiuYIgLxZXZFVG2VgK+MBWjUqZTundupbfx2aXarXuw5Ko5aMcjtJgbSs4vUGBS5v6g==", - "dependencies": { - "@babel/highlight": "^7.12.13" - } - }, - "node_modules/@babel/compat-data": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.13.15.tgz", - "integrity": "sha512-ltnibHKR1VnrU4ymHyQ/CXtNXI6yZC0oJThyW78Hft8XndANwi+9H+UIklBDraIjFEJzw8wmcM427oDd9KS5wA==" - }, - "node_modules/@babel/core": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.13.15.tgz", - "integrity": "sha512-6GXmNYeNjS2Uz+uls5jalOemgIhnTMeaXo+yBUA72kC2uX/8VW6XyhVIo2L8/q0goKQA3EVKx0KOQpVKSeWadQ==", - "dependencies": { - "@babel/code-frame": "^7.12.13", - "@babel/generator": "^7.13.9", - "@babel/helper-compilation-targets": "^7.13.13", - "@babel/helper-module-transforms": "^7.13.14", - "@babel/helpers": "^7.13.10", - "@babel/parser": "^7.13.15", - "@babel/template": "^7.12.13", - "@babel/traverse": "^7.13.15", - "@babel/types": "^7.13.14", - "convert-source-map": "^1.7.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.2", - "json5": "^2.1.2", - "semver": "^6.3.0", - "source-map": "^0.5.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core/node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/@babel/core/node_modules/json5": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.0.tgz", - "integrity": "sha512-f+8cldu7X/y7RAJurMEJmdoKXGB/X550w2Nr3tTbezL6RwEE/iMcm+tZnXeoZtKuOq6ft8+CqzEkrIgx1fPoQA==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "json5": "lib/cli.js" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/@babel/core/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/@babel/core/node_modules/source-map": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/@babel/generator": { - "version": "7.13.9", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.13.9.tgz", - "integrity": "sha512-mHOOmY0Axl/JCTkxTU6Lf5sWOg/v8nUa+Xkt4zMTftX0wqmb6Sh7J8gvcehBw7q0AhrhAR+FDacKjCZ2X8K+Sw==", - "dependencies": { - "@babel/types": "^7.13.0", - "jsesc": "^2.5.1", - "source-map": "^0.5.0" - } - }, - "node_modules/@babel/generator/node_modules/source-map": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/@babel/helper-annotate-as-pure": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.12.13.tgz", - "integrity": "sha512-7YXfX5wQ5aYM/BOlbSccHDbuXXFPxeoUmfWtz8le2yTkTZc+BxsiEnENFoi2SlmA8ewDkG2LgIMIVzzn2h8kfw==", - "dependencies": { - "@babel/types": "^7.12.13" - } - }, - "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.12.13.tgz", - "integrity": "sha512-CZOv9tGphhDRlVjVkAgm8Nhklm9RzSmWpX2my+t7Ua/KT616pEzXsQCjinzvkRvHWJ9itO4f296efroX23XCMA==", - "dependencies": { - "@babel/helper-explode-assignable-expression": "^7.12.13", - "@babel/types": "^7.12.13" - } - }, - "node_modules/@babel/helper-compilation-targets": { - "version": "7.13.13", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.13.13.tgz", - "integrity": "sha512-q1kcdHNZehBwD9jYPh3WyXcsFERi39X4I59I3NadciWtNDyZ6x+GboOxncFK0kXlKIv6BJm5acncehXWUjWQMQ==", - "dependencies": { - "@babel/compat-data": "^7.13.12", - "@babel/helper-validator-option": "^7.12.17", - "browserslist": "^4.14.5", - "semver": "^6.3.0" - } - }, - "node_modules/@babel/helper-create-class-features-plugin": { - "version": "7.13.11", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.13.11.tgz", - "integrity": "sha512-ays0I7XYq9xbjCSvT+EvysLgfc3tOkwCULHjrnscGT3A9qD4sk3wXnJ3of0MAWsWGjdinFvajHU2smYuqXKMrw==", - "dependencies": { - "@babel/helper-function-name": "^7.12.13", - "@babel/helper-member-expression-to-functions": "^7.13.0", - "@babel/helper-optimise-call-expression": "^7.12.13", - "@babel/helper-replace-supers": "^7.13.0", - "@babel/helper-split-export-declaration": "^7.12.13" - } - }, - "node_modules/@babel/helper-create-regexp-features-plugin": { - "version": "7.12.17", - "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.12.17.tgz", - "integrity": "sha512-p2VGmBu9oefLZ2nQpgnEnG0ZlRPvL8gAGvPUMQwUdaE8k49rOMuZpOwdQoy5qJf6K8jL3bcAMhVUlHAjIgJHUg==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.12.13", - "regexpu-core": "^4.7.1" - } - }, - "node_modules/@babel/helper-define-polyfill-provider": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.2.0.tgz", - "integrity": "sha512-JT8tHuFjKBo8NnaUbblz7mIu1nnvUDiHVjXXkulZULyidvo/7P6TY7+YqpV37IfF+KUFxmlK04elKtGKXaiVgw==", - "dependencies": { - "@babel/helper-compilation-targets": "^7.13.0", - "@babel/helper-module-imports": "^7.12.13", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/traverse": "^7.13.0", - "debug": "^4.1.1", - "lodash.debounce": "^4.0.8", - "resolve": "^1.14.2", - "semver": "^6.1.2" - } - }, - "node_modules/@babel/helper-define-polyfill-provider/node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/@babel/helper-define-polyfill-provider/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/@babel/helper-explode-assignable-expression": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.13.0.tgz", - "integrity": "sha512-qS0peLTDP8kOisG1blKbaoBg/o9OSa1qoumMjTK5pM+KDTtpxpsiubnCGP34vK8BXGcb2M9eigwgvoJryrzwWA==", - "dependencies": { - "@babel/types": "^7.13.0" - } - }, - "node_modules/@babel/helper-function-name": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.12.13.tgz", - "integrity": "sha512-TZvmPn0UOqmvi5G4vvw0qZTpVptGkB1GL61R6lKvrSdIxGm5Pky7Q3fpKiIkQCAtRCBUwB0PaThlx9vebCDSwA==", - "dependencies": { - "@babel/helper-get-function-arity": "^7.12.13", - "@babel/template": "^7.12.13", - "@babel/types": "^7.12.13" - } - }, - "node_modules/@babel/helper-get-function-arity": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.12.13.tgz", - "integrity": "sha512-DjEVzQNz5LICkzN0REdpD5prGoidvbdYk1BVgRUOINaWJP2t6avB27X1guXK1kXNrX0WMfsrm1A/ZBthYuIMQg==", - "dependencies": { - "@babel/types": "^7.12.13" - } - }, - "node_modules/@babel/helper-hoist-variables": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.13.0.tgz", - "integrity": "sha512-0kBzvXiIKfsCA0y6cFEIJf4OdzfpRuNk4+YTeHZpGGc666SATFKTz6sRncwFnQk7/ugJ4dSrCj6iJuvW4Qwr2g==", - "dependencies": { - "@babel/traverse": "^7.13.0", - "@babel/types": "^7.13.0" - } - }, - "node_modules/@babel/helper-member-expression-to-functions": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.13.12.tgz", - "integrity": "sha512-48ql1CLL59aKbU94Y88Xgb2VFy7a95ykGRbJJaaVv+LX5U8wFpLfiGXJJGUozsmA1oEh/o5Bp60Voq7ACyA/Sw==", - "dependencies": { - "@babel/types": "^7.13.12" - } - }, - "node_modules/@babel/helper-module-imports": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.13.12.tgz", - "integrity": "sha512-4cVvR2/1B693IuOvSI20xqqa/+bl7lqAMR59R4iu39R9aOX8/JoYY1sFaNvUMyMBGnHdwvJgUrzNLoUZxXypxA==", - "dependencies": { - "@babel/types": "^7.13.12" - } - }, - "node_modules/@babel/helper-module-transforms": { - "version": "7.13.14", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.13.14.tgz", - "integrity": "sha512-QuU/OJ0iAOSIatyVZmfqB0lbkVP0kDRiKj34xy+QNsnVZi/PA6BoSoreeqnxxa9EHFAIL0R9XOaAR/G9WlIy5g==", - "dependencies": { - "@babel/helper-module-imports": "^7.13.12", - "@babel/helper-replace-supers": "^7.13.12", - "@babel/helper-simple-access": "^7.13.12", - "@babel/helper-split-export-declaration": "^7.12.13", - "@babel/helper-validator-identifier": "^7.12.11", - "@babel/template": "^7.12.13", - "@babel/traverse": "^7.13.13", - "@babel/types": "^7.13.14" - } - }, - "node_modules/@babel/helper-optimise-call-expression": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.12.13.tgz", - "integrity": "sha512-BdWQhoVJkp6nVjB7nkFWcn43dkprYauqtk++Py2eaf/GRDFm5BxRqEIZCiHlZUGAVmtwKcsVL1dC68WmzeFmiA==", - "dependencies": { - "@babel/types": "^7.12.13" - } - }, - "node_modules/@babel/helper-plugin-utils": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.13.0.tgz", - "integrity": "sha512-ZPafIPSwzUlAoWT8DKs1W2VyF2gOWthGd5NGFMsBcMMol+ZhK+EQY/e6V96poa6PA/Bh+C9plWN0hXO1uB8AfQ==" - }, - "node_modules/@babel/helper-remap-async-to-generator": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.13.0.tgz", - "integrity": "sha512-pUQpFBE9JvC9lrQbpX0TmeNIy5s7GnZjna2lhhcHC7DzgBs6fWn722Y5cfwgrtrqc7NAJwMvOa0mKhq6XaE4jg==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.12.13", - "@babel/helper-wrap-function": "^7.13.0", - "@babel/types": "^7.13.0" - } - }, - "node_modules/@babel/helper-replace-supers": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.13.12.tgz", - "integrity": "sha512-Gz1eiX+4yDO8mT+heB94aLVNCL+rbuT2xy4YfyNqu8F+OI6vMvJK891qGBTqL9Uc8wxEvRW92Id6G7sDen3fFw==", - "dependencies": { - "@babel/helper-member-expression-to-functions": "^7.13.12", - "@babel/helper-optimise-call-expression": "^7.12.13", - "@babel/traverse": "^7.13.0", - "@babel/types": "^7.13.12" - } - }, - "node_modules/@babel/helper-simple-access": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.13.12.tgz", - "integrity": "sha512-7FEjbrx5SL9cWvXioDbnlYTppcZGuCY6ow3/D5vMggb2Ywgu4dMrpTJX0JdQAIcRRUElOIxF3yEooa9gUb9ZbA==", - "dependencies": { - "@babel/types": "^7.13.12" - } - }, - "node_modules/@babel/helper-skip-transparent-expression-wrappers": { - "version": "7.12.1", - "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.12.1.tgz", - "integrity": "sha512-Mf5AUuhG1/OCChOJ/HcADmvcHM42WJockombn8ATJG3OnyiSxBK/Mm5x78BQWvmtXZKHgbjdGL2kin/HOLlZGA==", - "dependencies": { - "@babel/types": "^7.12.1" - } - }, - "node_modules/@babel/helper-split-export-declaration": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.12.13.tgz", - "integrity": "sha512-tCJDltF83htUtXx5NLcaDqRmknv652ZWCHyoTETf1CXYJdPC7nohZohjUgieXhv0hTJdRf2FjDueFehdNucpzg==", - "dependencies": { - "@babel/types": "^7.12.13" - } - }, - "node_modules/@babel/helper-validator-identifier": { - "version": "7.12.11", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.12.11.tgz", - "integrity": "sha512-np/lG3uARFybkoHokJUmf1QfEvRVCPbmQeUQpKow5cQ3xWrV9i3rUHodKDJPQfTVX61qKi+UdYk8kik84n7XOw==" - }, - "node_modules/@babel/helper-validator-option": { - "version": "7.12.17", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.12.17.tgz", - "integrity": "sha512-TopkMDmLzq8ngChwRlyjR6raKD6gMSae4JdYDB8bByKreQgG0RBTuKe9LRxW3wFtUnjxOPRKBDwEH6Mg5KeDfw==" - }, - "node_modules/@babel/helper-wrap-function": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.13.0.tgz", - "integrity": "sha512-1UX9F7K3BS42fI6qd2A4BjKzgGjToscyZTdp1DjknHLCIvpgne6918io+aL5LXFcER/8QWiwpoY902pVEqgTXA==", - "dependencies": { - "@babel/helper-function-name": "^7.12.13", - "@babel/template": "^7.12.13", - "@babel/traverse": "^7.13.0", - "@babel/types": "^7.13.0" - } - }, - "node_modules/@babel/helpers": { - "version": "7.13.10", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.13.10.tgz", - "integrity": "sha512-4VO883+MWPDUVRF3PhiLBUFHoX/bsLTGFpFK/HqvvfBZz2D57u9XzPVNFVBTc0PW/CWR9BXTOKt8NF4DInUHcQ==", - "dependencies": { - "@babel/template": "^7.12.13", - "@babel/traverse": "^7.13.0", - "@babel/types": "^7.13.0" - } - }, - "node_modules/@babel/highlight": { - "version": "7.13.10", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.13.10.tgz", - "integrity": "sha512-5aPpe5XQPzflQrFwL1/QoeHkP2MsA4JCntcXHRhEsdsfPVkvPi2w7Qix4iV7t5S/oC9OodGrggd8aco1g3SZFg==", - "dependencies": { - "@babel/helper-validator-identifier": "^7.12.11", - "chalk": "^2.0.0", - "js-tokens": "^4.0.0" - } - }, - "node_modules/@babel/parser": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.13.15.tgz", - "integrity": "sha512-b9COtcAlVEQljy/9fbcMHpG+UIW9ReF+gpaxDHTlZd0c6/UU9ng8zdySAW9sRTzpvcdCHn6bUcbuYUgGzLAWVQ==", - "bin": { - "parser": "bin/babel-parser.js" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.13.12.tgz", - "integrity": "sha512-d0u3zWKcoZf379fOeJdr1a5WPDny4aOFZ6hlfKivgK0LY7ZxNfoaHL2fWwdGtHyVvra38FC+HVYkO+byfSA8AQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-skip-transparent-expression-wrappers": "^7.12.1", - "@babel/plugin-proposal-optional-chaining": "^7.13.12" - } - }, - "node_modules/@babel/plugin-proposal-async-generator-functions": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.13.15.tgz", - "integrity": "sha512-VapibkWzFeoa6ubXy/NgV5U2U4MVnUlvnx6wo1XhlsaTrLYWE0UFpDQsVrmn22q5CzeloqJ8gEMHSKxuee6ZdA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-remap-async-to-generator": "^7.13.0", - "@babel/plugin-syntax-async-generators": "^7.8.4" - } - }, - "node_modules/@babel/plugin-proposal-class-properties": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.13.0.tgz", - "integrity": "sha512-KnTDjFNC1g+45ka0myZNvSBFLhNCLN+GeGYLDEA8Oq7MZ6yMgfLoIRh86GRT0FjtJhZw8JyUskP9uvj5pHM9Zg==", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-proposal-decorators": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.13.15.tgz", - "integrity": "sha512-ibAMAqUm97yzi+LPgdr5Nqb9CMkeieGHvwPg1ywSGjZrZHQEGqE01HmOio8kxRpA/+VtOHouIVy2FMpBbtltjA==", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.13.11", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/plugin-syntax-decorators": "^7.12.13" - } - }, - "node_modules/@babel/plugin-proposal-dynamic-import": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.13.8.tgz", - "integrity": "sha512-ONWKj0H6+wIRCkZi9zSbZtE/r73uOhMVHh256ys0UzfM7I3d4n+spZNWjOnJv2gzopumP2Wxi186vI8N0Y2JyQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/plugin-syntax-dynamic-import": "^7.8.3" - } - }, - "node_modules/@babel/plugin-proposal-export-namespace-from": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.12.13.tgz", - "integrity": "sha512-INAgtFo4OnLN3Y/j0VwAgw3HDXcDtX+C/erMvWzuV9v71r7urb6iyMXu7eM9IgLr1ElLlOkaHjJ0SbCmdOQ3Iw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3" - } - }, - "node_modules/@babel/plugin-proposal-json-strings": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.13.8.tgz", - "integrity": "sha512-w4zOPKUFPX1mgvTmL/fcEqy34hrQ1CRcGxdphBc6snDnnqJ47EZDIyop6IwXzAC8G916hsIuXB2ZMBCExC5k7Q==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/plugin-syntax-json-strings": "^7.8.3" - } - }, - "node_modules/@babel/plugin-proposal-logical-assignment-operators": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.13.8.tgz", - "integrity": "sha512-aul6znYB4N4HGweImqKn59Su9RS8lbUIqxtXTOcAGtNIDczoEFv+l1EhmX8rUBp3G1jMjKJm8m0jXVp63ZpS4A==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" - } - }, - "node_modules/@babel/plugin-proposal-nullish-coalescing-operator": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.13.8.tgz", - "integrity": "sha512-iePlDPBn//UhxExyS9KyeYU7RM9WScAG+D3Hhno0PLJebAEpDZMocbDe64eqynhNAnwz/vZoL/q/QB2T1OH39A==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" - } - }, - "node_modules/@babel/plugin-proposal-numeric-separator": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.12.13.tgz", - "integrity": "sha512-O1jFia9R8BUCl3ZGB7eitaAPu62TXJRHn7rh+ojNERCFyqRwJMTmhz+tJ+k0CwI6CLjX/ee4qW74FSqlq9I35w==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13", - "@babel/plugin-syntax-numeric-separator": "^7.10.4" - } - }, - "node_modules/@babel/plugin-proposal-object-rest-spread": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.13.8.tgz", - "integrity": "sha512-DhB2EuB1Ih7S3/IRX5AFVgZ16k3EzfRbq97CxAVI1KSYcW+lexV8VZb7G7L8zuPVSdQMRn0kiBpf/Yzu9ZKH0g==", - "dependencies": { - "@babel/compat-data": "^7.13.8", - "@babel/helper-compilation-targets": "^7.13.8", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-transform-parameters": "^7.13.0" - } - }, - "node_modules/@babel/plugin-proposal-optional-catch-binding": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.13.8.tgz", - "integrity": "sha512-0wS/4DUF1CuTmGo+NiaHfHcVSeSLj5S3e6RivPTg/2k3wOv3jO35tZ6/ZWsQhQMvdgI7CwphjQa/ccarLymHVA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" - } - }, - "node_modules/@babel/plugin-proposal-optional-chaining": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.13.12.tgz", - "integrity": "sha512-fcEdKOkIB7Tf4IxrgEVeFC4zeJSTr78no9wTdBuZZbqF64kzllU0ybo2zrzm7gUQfxGhBgq4E39oRs8Zx/RMYQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-skip-transparent-expression-wrappers": "^7.12.1", - "@babel/plugin-syntax-optional-chaining": "^7.8.3" - } - }, - "node_modules/@babel/plugin-proposal-private-methods": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.13.0.tgz", - "integrity": "sha512-MXyyKQd9inhx1kDYPkFRVOBXQ20ES8Pto3T7UZ92xj2mY0EVD8oAVzeyYuVfy/mxAdTSIayOvg+aVzcHV2bn6Q==", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-proposal-unicode-property-regex": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.12.13.tgz", - "integrity": "sha512-XyJmZidNfofEkqFV5VC/bLabGmO5QzenPO/YOfGuEbgU+2sSwMmio3YLb4WtBgcmmdwZHyVyv8on77IUjQ5Gvg==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.12.13", - "@babel/helper-plugin-utils": "^7.12.13" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/plugin-syntax-async-generators": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", - "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "node_modules/@babel/plugin-syntax-class-properties": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", - "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-syntax-decorators": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.12.13.tgz", - "integrity": "sha512-Rw6aIXGuqDLr6/LoBBYE57nKOzQpz/aDkKlMqEwH+Vp0MXbG6H/TfRjaY343LKxzAKAMXIHsQ8JzaZKuDZ9MwA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-syntax-dynamic-import": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", - "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "node_modules/@babel/plugin-syntax-export-namespace-from": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", - "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.3" - } - }, - "node_modules/@babel/plugin-syntax-json-strings": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", - "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "node_modules/@babel/plugin-syntax-jsx": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.13.tgz", - "integrity": "sha512-d4HM23Q1K7oq/SLNmG6mRt85l2csmQ0cHRaxRXjKW0YFdEXqlZ5kzFQKH5Uc3rDJECgu+yCRgPkG04Mm98R/1g==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-syntax-logical-assignment-operators": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", - "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4" - } - }, - "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", - "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "node_modules/@babel/plugin-syntax-numeric-separator": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", - "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4" - } - }, - "node_modules/@babel/plugin-syntax-object-rest-spread": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", - "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "node_modules/@babel/plugin-syntax-optional-catch-binding": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", - "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "node_modules/@babel/plugin-syntax-optional-chaining": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", - "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "node_modules/@babel/plugin-syntax-top-level-await": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.12.13.tgz", - "integrity": "sha512-A81F9pDwyS7yM//KwbCSDqy3Uj4NMIurtplxphWxoYtNPov7cJsDkAFNNyVlIZ3jwGycVsurZ+LtOA8gZ376iQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-arrow-functions": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.13.0.tgz", - "integrity": "sha512-96lgJagobeVmazXFaDrbmCLQxBysKu7U6Do3mLsx27gf5Dk85ezysrs2BZUpXD703U/Su1xTBDxxar2oa4jAGg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-async-to-generator": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.13.0.tgz", - "integrity": "sha512-3j6E004Dx0K3eGmhxVJxwwI89CTJrce7lg3UrtFuDAVQ/2+SJ/h/aSFOeE6/n0WB1GsOffsJp6MnPQNQ8nmwhg==", - "dependencies": { - "@babel/helper-module-imports": "^7.12.13", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-remap-async-to-generator": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-block-scoped-functions": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.12.13.tgz", - "integrity": "sha512-zNyFqbc3kI/fVpqwfqkg6RvBgFpC4J18aKKMmv7KdQ/1GgREapSJAykLMVNwfRGO3BtHj3YQZl8kxCXPcVMVeg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-block-scoping": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.12.13.tgz", - "integrity": "sha512-Pxwe0iqWJX4fOOM2kEZeUuAxHMWb9nK+9oh5d11bsLoB0xMg+mkDpt0eYuDZB7ETrY9bbcVlKUGTOGWy7BHsMQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-classes": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.13.0.tgz", - "integrity": "sha512-9BtHCPUARyVH1oXGcSJD3YpsqRLROJx5ZNP6tN5vnk17N0SVf9WCtf8Nuh1CFmgByKKAIMstitKduoCmsaDK5g==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.12.13", - "@babel/helper-function-name": "^7.12.13", - "@babel/helper-optimise-call-expression": "^7.12.13", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-replace-supers": "^7.13.0", - "@babel/helper-split-export-declaration": "^7.12.13", - "globals": "^11.1.0" - } - }, - "node_modules/@babel/plugin-transform-computed-properties": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.13.0.tgz", - "integrity": "sha512-RRqTYTeZkZAz8WbieLTvKUEUxZlUTdmL5KGMyZj7FnMfLNKV4+r5549aORG/mgojRmFlQMJDUupwAMiF2Q7OUg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-destructuring": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.13.0.tgz", - "integrity": "sha512-zym5em7tePoNT9s964c0/KU3JPPnuq7VhIxPRefJ4/s82cD+q1mgKfuGRDMCPL0HTyKz4dISuQlCusfgCJ86HA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-dotall-regex": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.12.13.tgz", - "integrity": "sha512-foDrozE65ZFdUC2OfgeOCrEPTxdB3yjqxpXh8CH+ipd9CHd4s/iq81kcUpyH8ACGNEPdFqbtzfgzbT/ZGlbDeQ==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.12.13", - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-duplicate-keys": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.12.13.tgz", - "integrity": "sha512-NfADJiiHdhLBW3pulJlJI2NB0t4cci4WTZ8FtdIuNc2+8pslXdPtRRAEWqUY+m9kNOk2eRYbTAOipAxlrOcwwQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-exponentiation-operator": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.12.13.tgz", - "integrity": "sha512-fbUelkM1apvqez/yYx1/oICVnGo2KM5s63mhGylrmXUxK/IAXSIf87QIxVfZldWf4QsOafY6vV3bX8aMHSvNrA==", - "dependencies": { - "@babel/helper-builder-binary-assignment-operator-visitor": "^7.12.13", - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-for-of": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.13.0.tgz", - "integrity": "sha512-IHKT00mwUVYE0zzbkDgNRP6SRzvfGCYsOxIRz8KsiaaHCcT9BWIkO+H9QRJseHBLOGBZkHUdHiqj6r0POsdytg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-function-name": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.12.13.tgz", - "integrity": "sha512-6K7gZycG0cmIwwF7uMK/ZqeCikCGVBdyP2J5SKNCXO5EOHcqi+z7Jwf8AmyDNcBgxET8DrEtCt/mPKPyAzXyqQ==", - "dependencies": { - "@babel/helper-function-name": "^7.12.13", - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-literals": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.12.13.tgz", - "integrity": "sha512-FW+WPjSR7hiUxMcKqyNjP05tQ2kmBCdpEpZHY1ARm96tGQCCBvXKnpjILtDplUnJ/eHZ0lALLM+d2lMFSpYJrQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-member-expression-literals": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.12.13.tgz", - "integrity": "sha512-kxLkOsg8yir4YeEPHLuO2tXP9R/gTjpuTOjshqSpELUN3ZAg2jfDnKUvzzJxObun38sw3wm4Uu69sX/zA7iRvg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-modules-amd": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.13.0.tgz", - "integrity": "sha512-EKy/E2NHhY/6Vw5d1k3rgoobftcNUmp9fGjb9XZwQLtTctsRBOTRO7RHHxfIky1ogMN5BxN7p9uMA3SzPfotMQ==", - "dependencies": { - "@babel/helper-module-transforms": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0", - "babel-plugin-dynamic-import-node": "^2.3.3" - } - }, - "node_modules/@babel/plugin-transform-modules-commonjs": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.13.8.tgz", - "integrity": "sha512-9QiOx4MEGglfYZ4XOnU79OHr6vIWUakIj9b4mioN8eQIoEh+pf5p/zEB36JpDFWA12nNMiRf7bfoRvl9Rn79Bw==", - "dependencies": { - "@babel/helper-module-transforms": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-simple-access": "^7.12.13", - "babel-plugin-dynamic-import-node": "^2.3.3" - } - }, - "node_modules/@babel/plugin-transform-modules-systemjs": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.13.8.tgz", - "integrity": "sha512-hwqctPYjhM6cWvVIlOIe27jCIBgHCsdH2xCJVAYQm7V5yTMoilbVMi9f6wKg0rpQAOn6ZG4AOyvCqFF/hUh6+A==", - "dependencies": { - "@babel/helper-hoist-variables": "^7.13.0", - "@babel/helper-module-transforms": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-validator-identifier": "^7.12.11", - "babel-plugin-dynamic-import-node": "^2.3.3" - } - }, - "node_modules/@babel/plugin-transform-modules-umd": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.13.0.tgz", - "integrity": "sha512-D/ILzAh6uyvkWjKKyFE/W0FzWwasv6vPTSqPcjxFqn6QpX3u8DjRVliq4F2BamO2Wee/om06Vyy+vPkNrd4wxw==", - "dependencies": { - "@babel/helper-module-transforms": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.12.13.tgz", - "integrity": "sha512-Xsm8P2hr5hAxyYblrfACXpQKdQbx4m2df9/ZZSQ8MAhsadw06+jW7s9zsSw6he+mJZXRlVMyEnVktJo4zjk1WA==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-new-target": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.12.13.tgz", - "integrity": "sha512-/KY2hbLxrG5GTQ9zzZSc3xWiOy379pIETEhbtzwZcw9rvuaVV4Fqy7BYGYOWZnaoXIQYbbJ0ziXLa/sKcGCYEQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-object-super": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.12.13.tgz", - "integrity": "sha512-JzYIcj3XtYspZDV8j9ulnoMPZZnF/Cj0LUxPOjR89BdBVx+zYJI9MdMIlUZjbXDX+6YVeS6I3e8op+qQ3BYBoQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13", - "@babel/helper-replace-supers": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-parameters": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.13.0.tgz", - "integrity": "sha512-Jt8k/h/mIwE2JFEOb3lURoY5C85ETcYPnbuAJ96zRBzh1XHtQZfs62ChZ6EP22QlC8c7Xqr9q+e1SU5qttwwjw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-property-literals": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.12.13.tgz", - "integrity": "sha512-nqVigwVan+lR+g8Fj8Exl0UQX2kymtjcWfMOYM1vTYEKujeyv2SkMgazf2qNcK7l4SDiKyTA/nHCPqL4e2zo1A==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-regenerator": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.13.15.tgz", - "integrity": "sha512-Bk9cOLSz8DiurcMETZ8E2YtIVJbFCPGW28DJWUakmyVWtQSm6Wsf0p4B4BfEr/eL2Nkhe/CICiUiMOCi1TPhuQ==", - "dependencies": { - "regenerator-transform": "^0.14.2" - } - }, - "node_modules/@babel/plugin-transform-reserved-words": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.12.13.tgz", - "integrity": "sha512-xhUPzDXxZN1QfiOy/I5tyye+TRz6lA7z6xaT4CLOjPRMVg1ldRf0LHw0TDBpYL4vG78556WuHdyO9oi5UmzZBg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-runtime": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.13.15.tgz", - "integrity": "sha512-d+ezl76gx6Jal08XngJUkXM4lFXK/5Ikl9Mh4HKDxSfGJXmZ9xG64XT2oivBzfxb/eQ62VfvoMkaCZUKJMVrBA==", - "dependencies": { - "@babel/helper-module-imports": "^7.13.12", - "@babel/helper-plugin-utils": "^7.13.0", - "babel-plugin-polyfill-corejs2": "^0.2.0", - "babel-plugin-polyfill-corejs3": "^0.2.0", - "babel-plugin-polyfill-regenerator": "^0.2.0", - "semver": "^6.3.0" - } - }, - "node_modules/@babel/plugin-transform-shorthand-properties": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.12.13.tgz", - "integrity": "sha512-xpL49pqPnLtf0tVluuqvzWIgLEhuPpZzvs2yabUHSKRNlN7ScYU7aMlmavOeyXJZKgZKQRBlh8rHbKiJDraTSw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-spread": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.13.0.tgz", - "integrity": "sha512-V6vkiXijjzYeFmQTr3dBxPtZYLPcUfY34DebOU27jIl2M/Y8Egm52Hw82CSjjPqd54GTlJs5x+CR7HeNr24ckg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-skip-transparent-expression-wrappers": "^7.12.1" - } - }, - "node_modules/@babel/plugin-transform-sticky-regex": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.12.13.tgz", - "integrity": "sha512-Jc3JSaaWT8+fr7GRvQP02fKDsYk4K/lYwWq38r/UGfaxo89ajud321NH28KRQ7xy1Ybc0VUE5Pz8psjNNDUglg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-template-literals": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.13.0.tgz", - "integrity": "sha512-d67umW6nlfmr1iehCcBv69eSUSySk1EsIS8aTDX4Xo9qajAh6mYtcl4kJrBkGXuxZPEgVr7RVfAvNW6YQkd4Mw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-typeof-symbol": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.12.13.tgz", - "integrity": "sha512-eKv/LmUJpMnu4npgfvs3LiHhJua5fo/CysENxa45YCQXZwKnGCQKAg87bvoqSW1fFT+HA32l03Qxsm8ouTY3ZQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-unicode-escapes": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.12.13.tgz", - "integrity": "sha512-0bHEkdwJ/sN/ikBHfSmOXPypN/beiGqjo+o4/5K+vxEFNPRPdImhviPakMKG4x96l85emoa0Z6cDflsdBusZbw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-unicode-regex": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.12.13.tgz", - "integrity": "sha512-mDRzSNY7/zopwisPZ5kM9XKCfhchqIYwAKRERtEnhYscZB79VRekuRSoYbN0+KVe3y8+q1h6A4svXtP7N+UoCA==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.12.13", - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/preset-env": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.13.15.tgz", - "integrity": "sha512-D4JAPMXcxk69PKe81jRJ21/fP/uYdcTZ3hJDF5QX2HSI9bBxxYw/dumdR6dGumhjxlprHPE4XWoPaqzZUVy2MA==", - "dependencies": { - "@babel/compat-data": "^7.13.15", - "@babel/helper-compilation-targets": "^7.13.13", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-validator-option": "^7.12.17", - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.13.12", - "@babel/plugin-proposal-async-generator-functions": "^7.13.15", - "@babel/plugin-proposal-class-properties": "^7.13.0", - "@babel/plugin-proposal-dynamic-import": "^7.13.8", - "@babel/plugin-proposal-export-namespace-from": "^7.12.13", - "@babel/plugin-proposal-json-strings": "^7.13.8", - "@babel/plugin-proposal-logical-assignment-operators": "^7.13.8", - "@babel/plugin-proposal-nullish-coalescing-operator": "^7.13.8", - "@babel/plugin-proposal-numeric-separator": "^7.12.13", - "@babel/plugin-proposal-object-rest-spread": "^7.13.8", - "@babel/plugin-proposal-optional-catch-binding": "^7.13.8", - "@babel/plugin-proposal-optional-chaining": "^7.13.12", - "@babel/plugin-proposal-private-methods": "^7.13.0", - "@babel/plugin-proposal-unicode-property-regex": "^7.12.13", - "@babel/plugin-syntax-async-generators": "^7.8.4", - "@babel/plugin-syntax-class-properties": "^7.12.13", - "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3", - "@babel/plugin-syntax-json-strings": "^7.8.3", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", - "@babel/plugin-syntax-numeric-separator": "^7.10.4", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", - "@babel/plugin-syntax-optional-chaining": "^7.8.3", - "@babel/plugin-syntax-top-level-await": "^7.12.13", - "@babel/plugin-transform-arrow-functions": "^7.13.0", - "@babel/plugin-transform-async-to-generator": "^7.13.0", - "@babel/plugin-transform-block-scoped-functions": "^7.12.13", - "@babel/plugin-transform-block-scoping": "^7.12.13", - "@babel/plugin-transform-classes": "^7.13.0", - "@babel/plugin-transform-computed-properties": "^7.13.0", - "@babel/plugin-transform-destructuring": "^7.13.0", - "@babel/plugin-transform-dotall-regex": "^7.12.13", - "@babel/plugin-transform-duplicate-keys": "^7.12.13", - "@babel/plugin-transform-exponentiation-operator": "^7.12.13", - "@babel/plugin-transform-for-of": "^7.13.0", - "@babel/plugin-transform-function-name": "^7.12.13", - "@babel/plugin-transform-literals": "^7.12.13", - "@babel/plugin-transform-member-expression-literals": "^7.12.13", - "@babel/plugin-transform-modules-amd": "^7.13.0", - "@babel/plugin-transform-modules-commonjs": "^7.13.8", - "@babel/plugin-transform-modules-systemjs": "^7.13.8", - "@babel/plugin-transform-modules-umd": "^7.13.0", - "@babel/plugin-transform-named-capturing-groups-regex": "^7.12.13", - "@babel/plugin-transform-new-target": "^7.12.13", - "@babel/plugin-transform-object-super": "^7.12.13", - "@babel/plugin-transform-parameters": "^7.13.0", - "@babel/plugin-transform-property-literals": "^7.12.13", - "@babel/plugin-transform-regenerator": "^7.13.15", - "@babel/plugin-transform-reserved-words": "^7.12.13", - "@babel/plugin-transform-shorthand-properties": "^7.12.13", - "@babel/plugin-transform-spread": "^7.13.0", - "@babel/plugin-transform-sticky-regex": "^7.12.13", - "@babel/plugin-transform-template-literals": "^7.13.0", - "@babel/plugin-transform-typeof-symbol": "^7.12.13", - "@babel/plugin-transform-unicode-escapes": "^7.12.13", - "@babel/plugin-transform-unicode-regex": "^7.12.13", - "@babel/preset-modules": "^0.1.4", - "@babel/types": "^7.13.14", - "babel-plugin-polyfill-corejs2": "^0.2.0", - "babel-plugin-polyfill-corejs3": "^0.2.0", - "babel-plugin-polyfill-regenerator": "^0.2.0", - "core-js-compat": "^3.9.0", - "semver": "^6.3.0" - } - }, - "node_modules/@babel/preset-modules": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.4.tgz", - "integrity": "sha512-J36NhwnfdzpmH41M1DrnkkgAqhZaqr/NBdPfQ677mLzlaXo+oDiv1deyCDtgAhz8p328otdob0Du7+xgHGZbKg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.0.0", - "@babel/plugin-proposal-unicode-property-regex": "^7.4.4", - "@babel/plugin-transform-dotall-regex": "^7.4.4", - "@babel/types": "^7.4.4", - "esutils": "^2.0.2" - } - }, - "node_modules/@babel/runtime": { - "version": "7.13.10", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.13.10.tgz", - "integrity": "sha512-4QPkjJq6Ns3V/RgpEahRk+AGfL0eO6RHHtTWoNNr5mO49G6B5+X6d6THgWEAvTrznU5xYpbAlVKRYcsCgh/Akw==", - "dependencies": { - "regenerator-runtime": "^0.13.4" - } - }, - "node_modules/@babel/template": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.12.13.tgz", - "integrity": "sha512-/7xxiGA57xMo/P2GVvdEumr8ONhFOhfgq2ihK3h1e6THqzTAkHbkXgB0xI9yeTfIUoH3+oAeHhqm/I43OTbbjA==", - "dependencies": { - "@babel/code-frame": "^7.12.13", - "@babel/parser": "^7.12.13", - "@babel/types": "^7.12.13" - } - }, - "node_modules/@babel/traverse": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.13.15.tgz", - "integrity": "sha512-/mpZMNvj6bce59Qzl09fHEs8Bt8NnpEDQYleHUPZQ3wXUMvXi+HJPLars68oAbmp839fGoOkv2pSL2z9ajCIaQ==", - "dependencies": { - "@babel/code-frame": "^7.12.13", - "@babel/generator": "^7.13.9", - "@babel/helper-function-name": "^7.12.13", - "@babel/helper-split-export-declaration": "^7.12.13", - "@babel/parser": "^7.13.15", - "@babel/types": "^7.13.14", - "debug": "^4.1.0", - "globals": "^11.1.0" - } - }, - "node_modules/@babel/traverse/node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/@babel/traverse/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/@babel/types": { - "version": "7.13.14", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.13.14.tgz", - "integrity": "sha512-A2aa3QTkWoyqsZZFl56MLUsfmh7O0gN41IPvXAE/++8ojpbz12SszD7JEGYVdn4f9Kt4amIei07swF1h4AqmmQ==", - "dependencies": { - "@babel/helper-validator-identifier": "^7.12.11", - "lodash": "^4.17.19", - "to-fast-properties": "^2.0.0" - } - }, - "node_modules/@cosmos-ui/vue": { - "version": "0.35.0", - "resolved": "https://registry.npmjs.org/@cosmos-ui/vue/-/vue-0.35.0.tgz", - "integrity": "sha512-WTCJBWSoiDckgvXWPByKkQ7ZVSf9LSMsizIAHBnsi0Zp3GOaEqPNBpgjGt2JEhpDPr7+YwyIgmqQ0S3D+Hq5iQ==", - "dependencies": { - "algoliasearch": "^4.1.0", - "axios": "^0.19.2", - "clipboard-copy": "^3.1.0", - "fuse.js": "^3.4.6", - "hotkeys-js": "^3.7.3", - "js-base64": "^2.5.2", - "lodash": "^4.17.15", - "markdown-it": "^10.0.0", - "prismjs": "^1.19.0", - "querystring": "^0.2.0", - "tiny-cookie": "^2.3.1", - "vue": "^2.6.10" - } - }, - "node_modules/@cosmos-ui/vue/node_modules/axios": { - "version": "0.19.2", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.19.2.tgz", - "integrity": "sha512-fjgm5MvRHLhx+osE2xoekY70AhARk3a6hkN+3Io1jc00jtquGvxYlKlsFUhmUET0V5te6CcZI7lcv2Ym61mjHA==", - "dependencies": { - "follow-redirects": "1.5.10" - } - }, - "node_modules/@cosmos-ui/vue/node_modules/entities": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/entities/-/entities-2.0.3.tgz", - "integrity": "sha512-MyoZ0jgnLvB2X3Lg5HqpFmn1kybDiIfEQmKzTb5apr51Rb+T3KdmMiqa70T+bhGnyv7bQ6WMj2QMHpGMmlrUYQ==" - }, - "node_modules/@cosmos-ui/vue/node_modules/markdown-it": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-10.0.0.tgz", - "integrity": "sha512-YWOP1j7UbDNz+TumYP1kpwnP0aEa711cJjrAQrzd0UXlbJfc5aAq0F/PZHjiioqDC1NKgvIMX+o+9Bk7yuM2dg==", - "dependencies": { - "argparse": "^1.0.7", - "entities": "~2.0.0", - "linkify-it": "^2.0.0", - "mdurl": "^1.0.1", - "uc.micro": "^1.0.5" - }, - "bin": { - "markdown-it": "bin/markdown-it.js" - } - }, - "node_modules/@mrmlnc/readdir-enhanced": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/@mrmlnc/readdir-enhanced/-/readdir-enhanced-2.2.1.tgz", - "integrity": "sha512-bPHp6Ji8b41szTOcaP63VlnbbO5Ny6dwAATtY6JTjh5N2OLrb5Qk/Th5cRkRQhkWCt+EJsYrNB0MiL+Gpn6e3g==", - "dependencies": { - "call-me-maybe": "^1.0.1", - "glob-to-regexp": "^0.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@nodelib/fs.stat": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-1.1.3.tgz", - "integrity": "sha512-shAmDyaQC4H92APFoIaVDHCx5bStIocgvbwQyxPRrbUY20V1EYTbSDchWbuwlMG3V17cprZhA6+78JfB+3DTPw==", - "engines": { - "node": ">= 6" - } - }, - "node_modules/@sindresorhus/is": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.14.0.tgz", - "integrity": "sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ==", - "engines": { - "node": ">=6" - } - }, - "node_modules/@szmarczak/http-timer": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-1.1.2.tgz", - "integrity": "sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA==", - "dependencies": { - "defer-to-connect": "^1.0.1" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/@types/glob": { - "version": "7.1.3", - "resolved": "https://registry.npmjs.org/@types/glob/-/glob-7.1.3.tgz", - "integrity": "sha512-SEYeGAIQIQX8NN6LDKprLjbrd5dARM5EXsd8GI/A5l0apYI1fGMWgPHSe4ZKL4eozlAyI+doUE9XbYS4xCkQ1w==", - "dependencies": { - "@types/minimatch": "*", - "@types/node": "*" - } - }, - "node_modules/@types/json-schema": { - "version": "7.0.7", - "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.7.tgz", - "integrity": "sha512-cxWFQVseBm6O9Gbw1IWb8r6OS4OhSt3hPZLkFApLjM8TEXROBuQGLAH2i2gZpcXdLBIrpXuTDhH7Vbm1iXmNGA==" - }, - "node_modules/@types/minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-1z8k4wzFnNjVK/tlxvrWuK5WMt6mydWWP7+zvH5eFep4oj+UkrfiJTRtjCeBXNpwaA/FYqqtb4/QS4ianFpIRA==" - }, - "node_modules/@types/node": { - "version": "14.14.37", - "resolved": "https://registry.npmjs.org/@types/node/-/node-14.14.37.tgz", - "integrity": "sha512-XYmBiy+ohOR4Lh5jE379fV2IU+6Jn4g5qASinhitfyO71b/sCo6MKsMLF5tc7Zf2CE8hViVQyYSobJNke8OvUw==" - }, - "node_modules/@types/q": { - "version": "1.5.4", - "resolved": "https://registry.npmjs.org/@types/q/-/q-1.5.4.tgz", - "integrity": "sha512-1HcDas8SEj4z1Wc696tH56G8OlRaH/sqZOynNNB+HF0WOeXPaxTtbYzJY2oEfiUxjSKjhCKr+MvR7dCHcEelug==" - }, - "node_modules/@vue/babel-helper-vue-jsx-merge-props": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@vue/babel-helper-vue-jsx-merge-props/-/babel-helper-vue-jsx-merge-props-1.2.1.tgz", - "integrity": "sha512-QOi5OW45e2R20VygMSNhyQHvpdUwQZqGPc748JLGCYEy+yp8fNFNdbNIGAgZmi9e+2JHPd6i6idRuqivyicIkA==" - }, - "node_modules/@vue/babel-helper-vue-transform-on": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@vue/babel-helper-vue-transform-on/-/babel-helper-vue-transform-on-1.0.2.tgz", - "integrity": "sha512-hz4R8tS5jMn8lDq6iD+yWL6XNB699pGIVLk7WSJnn1dbpjaazsjZQkieJoRX6gW5zpYSCFqQ7jUquPNY65tQYA==" - }, - "node_modules/@vue/babel-plugin-jsx": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@vue/babel-plugin-jsx/-/babel-plugin-jsx-1.0.4.tgz", - "integrity": "sha512-Vu5gsabUdsiWc4vQarg46xWJGs8pMEJyyMQAKA1vO+F4+aR4/jaxWxPCOvZ7XvVyy+ecSbwQp/qIyDVje360UQ==", - "dependencies": { - "@babel/helper-module-imports": "^7.0.0", - "@babel/plugin-syntax-jsx": "^7.0.0", - "@babel/template": "^7.0.0", - "@babel/traverse": "^7.0.0", - "@babel/types": "^7.0.0", - "@vue/babel-helper-vue-transform-on": "^1.0.2", - "camelcase": "^6.0.0", - "html-tags": "^3.1.0", - "svg-tags": "^1.0.0" - } - }, - "node_modules/@vue/babel-plugin-transform-vue-jsx": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@vue/babel-plugin-transform-vue-jsx/-/babel-plugin-transform-vue-jsx-1.2.1.tgz", - "integrity": "sha512-HJuqwACYehQwh1fNT8f4kyzqlNMpBuUK4rSiSES5D4QsYncv5fxFsLyrxFPG2ksO7t5WP+Vgix6tt6yKClwPzA==", - "dependencies": { - "@babel/helper-module-imports": "^7.0.0", - "@babel/plugin-syntax-jsx": "^7.2.0", - "@vue/babel-helper-vue-jsx-merge-props": "^1.2.1", - "html-tags": "^2.0.0", - "lodash.kebabcase": "^4.1.1", - "svg-tags": "^1.0.0" - } - }, - "node_modules/@vue/babel-plugin-transform-vue-jsx/node_modules/html-tags": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-2.0.0.tgz", - "integrity": "sha1-ELMKOGCF9Dzt41PMj6fLDe7qZos=", - "engines": { - "node": ">=4" - } - }, - "node_modules/@vue/babel-preset-app": { - "version": "4.5.12", - "resolved": "https://registry.npmjs.org/@vue/babel-preset-app/-/babel-preset-app-4.5.12.tgz", - "integrity": "sha512-8q67ORQ9O0Ms0nlqsXTVhaBefRBaLrzPxOewAZhdcO7onHwcO5/wRdWtHhZgfpCZlhY7NogkU16z3WnorSSkEA==", - "dependencies": { - "@babel/core": "^7.11.0", - "@babel/helper-compilation-targets": "^7.9.6", - "@babel/helper-module-imports": "^7.8.3", - "@babel/plugin-proposal-class-properties": "^7.8.3", - "@babel/plugin-proposal-decorators": "^7.8.3", - "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-syntax-jsx": "^7.8.3", - "@babel/plugin-transform-runtime": "^7.11.0", - "@babel/preset-env": "^7.11.0", - "@babel/runtime": "^7.11.0", - "@vue/babel-plugin-jsx": "^1.0.3", - "@vue/babel-preset-jsx": "^1.2.4", - "babel-plugin-dynamic-import-node": "^2.3.3", - "core-js": "^3.6.5", - "core-js-compat": "^3.6.5", - "semver": "^6.1.0" - } - }, - "node_modules/@vue/babel-preset-jsx": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@vue/babel-preset-jsx/-/babel-preset-jsx-1.2.4.tgz", - "integrity": "sha512-oRVnmN2a77bYDJzeGSt92AuHXbkIxbf/XXSE3klINnh9AXBmVS1DGa1f0d+dDYpLfsAKElMnqKTQfKn7obcL4w==", - "dependencies": { - "@vue/babel-helper-vue-jsx-merge-props": "^1.2.1", - "@vue/babel-plugin-transform-vue-jsx": "^1.2.1", - "@vue/babel-sugar-composition-api-inject-h": "^1.2.1", - "@vue/babel-sugar-composition-api-render-instance": "^1.2.4", - "@vue/babel-sugar-functional-vue": "^1.2.2", - "@vue/babel-sugar-inject-h": "^1.2.2", - "@vue/babel-sugar-v-model": "^1.2.3", - "@vue/babel-sugar-v-on": "^1.2.3" - } - }, - "node_modules/@vue/babel-sugar-composition-api-inject-h": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-composition-api-inject-h/-/babel-sugar-composition-api-inject-h-1.2.1.tgz", - "integrity": "sha512-4B3L5Z2G+7s+9Bwbf+zPIifkFNcKth7fQwekVbnOA3cr3Pq71q71goWr97sk4/yyzH8phfe5ODVzEjX7HU7ItQ==", - "dependencies": { - "@babel/plugin-syntax-jsx": "^7.2.0" - } - }, - "node_modules/@vue/babel-sugar-composition-api-render-instance": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-composition-api-render-instance/-/babel-sugar-composition-api-render-instance-1.2.4.tgz", - "integrity": "sha512-joha4PZznQMsxQYXtR3MnTgCASC9u3zt9KfBxIeuI5g2gscpTsSKRDzWQt4aqNIpx6cv8On7/m6zmmovlNsG7Q==", - "dependencies": { - "@babel/plugin-syntax-jsx": "^7.2.0" - } - }, - "node_modules/@vue/babel-sugar-functional-vue": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-functional-vue/-/babel-sugar-functional-vue-1.2.2.tgz", - "integrity": "sha512-JvbgGn1bjCLByIAU1VOoepHQ1vFsroSA/QkzdiSs657V79q6OwEWLCQtQnEXD/rLTA8rRit4rMOhFpbjRFm82w==", - "dependencies": { - "@babel/plugin-syntax-jsx": "^7.2.0" - } - }, - "node_modules/@vue/babel-sugar-inject-h": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-inject-h/-/babel-sugar-inject-h-1.2.2.tgz", - "integrity": "sha512-y8vTo00oRkzQTgufeotjCLPAvlhnpSkcHFEp60+LJUwygGcd5Chrpn5480AQp/thrxVm8m2ifAk0LyFel9oCnw==", - "dependencies": { - "@babel/plugin-syntax-jsx": "^7.2.0" - } - }, - "node_modules/@vue/babel-sugar-v-model": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-v-model/-/babel-sugar-v-model-1.2.3.tgz", - "integrity": "sha512-A2jxx87mySr/ulAsSSyYE8un6SIH0NWHiLaCWpodPCVOlQVODCaSpiR4+IMsmBr73haG+oeCuSvMOM+ttWUqRQ==", - "dependencies": { - "@babel/plugin-syntax-jsx": "^7.2.0", - "@vue/babel-helper-vue-jsx-merge-props": "^1.2.1", - "@vue/babel-plugin-transform-vue-jsx": "^1.2.1", - "camelcase": "^5.0.0", - "html-tags": "^2.0.0", - "svg-tags": "^1.0.0" - } - }, - "node_modules/@vue/babel-sugar-v-model/node_modules/camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/@vue/babel-sugar-v-model/node_modules/html-tags": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-2.0.0.tgz", - "integrity": "sha1-ELMKOGCF9Dzt41PMj6fLDe7qZos=", - "engines": { - "node": ">=4" - } - }, - "node_modules/@vue/babel-sugar-v-on": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-v-on/-/babel-sugar-v-on-1.2.3.tgz", - "integrity": "sha512-kt12VJdz/37D3N3eglBywV8GStKNUhNrsxChXIV+o0MwVXORYuhDTHJRKPgLJRb/EY3vM2aRFQdxJBp9CLikjw==", - "dependencies": { - "@babel/plugin-syntax-jsx": "^7.2.0", - "@vue/babel-plugin-transform-vue-jsx": "^1.2.1", - "camelcase": "^5.0.0" - } - }, - "node_modules/@vue/babel-sugar-v-on/node_modules/camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/@vue/component-compiler-utils": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/@vue/component-compiler-utils/-/component-compiler-utils-3.2.0.tgz", - "integrity": "sha512-lejBLa7xAMsfiZfNp7Kv51zOzifnb29FwdnMLa96z26kXErPFioSf9BMcePVIQ6/Gc6/mC0UrPpxAWIHyae0vw==", - "dependencies": { - "consolidate": "^0.15.1", - "hash-sum": "^1.0.2", - "lru-cache": "^4.1.2", - "merge-source-map": "^1.1.0", - "postcss": "^7.0.14", - "postcss-selector-parser": "^6.0.2", - "prettier": "^1.18.2", - "source-map": "~0.6.1", - "vue-template-es2015-compiler": "^1.9.0" - }, - "optionalDependencies": { - "prettier": "^1.18.2" - } - }, - "node_modules/@vue/component-compiler-utils/node_modules/lru-cache": { - "version": "4.1.5", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz", - "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==", - "dependencies": { - "pseudomap": "^1.0.2", - "yallist": "^2.1.2" - } - }, - "node_modules/@vue/component-compiler-utils/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/@vue/component-compiler-utils/node_modules/yallist": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz", - "integrity": "sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI=" - }, - "node_modules/@vuepress/core": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/core/-/core-1.8.2.tgz", - "integrity": "sha512-lh9BLC06k9s0wxTuWtCkiNj49fkbW87enp0XSrFZHEoyDGSGndQjZmMMErcHc5Hx7nrW1nzc33sPH1NNtJl0hw==", - "dependencies": { - "@babel/core": "^7.8.4", - "@vue/babel-preset-app": "^4.1.2", - "@vuepress/markdown": "1.8.2", - "@vuepress/markdown-loader": "1.8.2", - "@vuepress/plugin-last-updated": "1.8.2", - "@vuepress/plugin-register-components": "1.8.2", - "@vuepress/shared-utils": "1.8.2", - "autoprefixer": "^9.5.1", - "babel-loader": "^8.0.4", - "cache-loader": "^3.0.0", - "chokidar": "^2.0.3", - "connect-history-api-fallback": "^1.5.0", - "copy-webpack-plugin": "^5.0.2", - "core-js": "^3.6.4", - "cross-spawn": "^6.0.5", - "css-loader": "^2.1.1", - "file-loader": "^3.0.1", - "js-yaml": "^3.13.1", - "lru-cache": "^5.1.1", - "mini-css-extract-plugin": "0.6.0", - "optimize-css-assets-webpack-plugin": "^5.0.1", - "portfinder": "^1.0.13", - "postcss-loader": "^3.0.0", - "postcss-safe-parser": "^4.0.1", - "toml": "^3.0.0", - "url-loader": "^1.0.1", - "vue": "^2.6.10", - "vue-loader": "^15.7.1", - "vue-router": "^3.4.5", - "vue-server-renderer": "^2.6.10", - "vue-template-compiler": "^2.6.10", - "vuepress-html-webpack-plugin": "^3.2.0", - "vuepress-plugin-container": "^2.0.2", - "webpack": "^4.8.1", - "webpack-chain": "^6.0.0", - "webpack-dev-server": "^3.5.1", - "webpack-merge": "^4.1.2", - "webpackbar": "3.2.0" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/@vuepress/markdown": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/markdown/-/markdown-1.8.2.tgz", - "integrity": "sha512-zznBHVqW+iBkznF/BO/GY9RFu53khyl0Ey0PnGqvwCJpRLNan6y5EXgYumtjw2GSYn5nDTTALYxtyNBdz64PKg==", - "dependencies": { - "@vuepress/shared-utils": "1.8.2", - "markdown-it": "^8.4.1", - "markdown-it-anchor": "^5.0.2", - "markdown-it-chain": "^1.3.0", - "markdown-it-emoji": "^1.4.0", - "markdown-it-table-of-contents": "^0.4.0", - "prismjs": "^1.13.0" - } - }, - "node_modules/@vuepress/markdown-loader": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/markdown-loader/-/markdown-loader-1.8.2.tgz", - "integrity": "sha512-mWzFXikCUcAN/chpKkqZpRYKdo0312hMv8cBea2hvrJYV6y4ODB066XKvXN8JwOcxuCjxWYJkhWGr+pXq1oTtw==", - "dependencies": { - "@vuepress/markdown": "1.8.2", - "loader-utils": "^1.1.0", - "lru-cache": "^5.1.1" - } - }, - "node_modules/@vuepress/markdown/node_modules/entities": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.2.tgz", - "integrity": "sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w==" - }, - "node_modules/@vuepress/markdown/node_modules/markdown-it": { - "version": "8.4.2", - "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-8.4.2.tgz", - "integrity": "sha512-GcRz3AWTqSUphY3vsUqQSFMbgR38a4Lh3GWlHRh/7MRwz8mcu9n2IO7HOh+bXHrR9kOPDl5RNCaEsrneb+xhHQ==", - "dependencies": { - "argparse": "^1.0.7", - "entities": "~1.1.1", - "linkify-it": "^2.0.0", - "mdurl": "^1.0.1", - "uc.micro": "^1.0.5" - }, - "bin": { - "markdown-it": "bin/markdown-it.js" - } - }, - "node_modules/@vuepress/plugin-active-header-links": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-active-header-links/-/plugin-active-header-links-1.8.2.tgz", - "integrity": "sha512-JmXAQg8D7J8mcKe2Ue3BZ9dOCzJMJXP4Cnkkc/IrqfDg0ET0l96gYWZohCqlvRIWt4f0VPiFAO4FLYrW+hko+g==", - "dependencies": { - "lodash.debounce": "^4.0.8" - } - }, - "node_modules/@vuepress/plugin-google-analytics": { - "version": "1.7.1", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-google-analytics/-/plugin-google-analytics-1.7.1.tgz", - "integrity": "sha512-27fQzRMsqGYpMf+ruyhsdfLv/n6z6b6LutFLE/pH66Itlh6ox9ew31x0pqYBbWIC/a4lBfXYUwFvi+DEvlb1EQ==" - }, - "node_modules/@vuepress/plugin-last-updated": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-last-updated/-/plugin-last-updated-1.8.2.tgz", - "integrity": "sha512-pYIRZi52huO9b6HY3JQNPKNERCLzMHejjBRt9ekdnJ1xhLs4MmRvt37BoXjI/qzvXkYtr7nmGgnKThNBVRTZuA==", - "dependencies": { - "cross-spawn": "^6.0.5" - } - }, - "node_modules/@vuepress/plugin-nprogress": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-nprogress/-/plugin-nprogress-1.8.2.tgz", - "integrity": "sha512-3TOBee2NM3WLr1tdjDTGfrAMggjN+OlEPyKyv8FqThsVkDYhw48O3HwqlThp9KX7UbL3ExxIFBwWRFLC+kYrdw==", - "dependencies": { - "nprogress": "^0.2.0" - } - }, - "node_modules/@vuepress/plugin-register-components": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-register-components/-/plugin-register-components-1.8.2.tgz", - "integrity": "sha512-6SUq3nHFMEh9qKFnjA8QnrNxj0kLs7+Gspq1OBU8vtu0NQmSvLFZVaMV7pzT/9zN2nO5Pld5qhsUJv1g71MrEA==", - "dependencies": { - "@vuepress/shared-utils": "1.8.2" - } - }, - "node_modules/@vuepress/plugin-search": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-search/-/plugin-search-1.8.2.tgz", - "integrity": "sha512-JrSJr9o0Kar14lVtZ4wfw39pplxvvMh8vDBD9oW09a+6Zi/4bySPGdcdaqdqGW+OHSiZNvG+6uyfKSBBBqF6PA==" - }, - "node_modules/@vuepress/shared-utils": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/shared-utils/-/shared-utils-1.8.2.tgz", - "integrity": "sha512-6kGubc7iBDWruEBUU7yR+sQ++SOhMuvKWvWeTZJKRZedthycdzYz7QVpua0FaZSAJm5/dIt8ymU4WQvxTtZgTQ==", - "dependencies": { - "chalk": "^2.3.2", - "escape-html": "^1.0.3", - "fs-extra": "^7.0.1", - "globby": "^9.2.0", - "gray-matter": "^4.0.1", - "hash-sum": "^1.0.2", - "semver": "^6.0.0", - "toml": "^3.0.0", - "upath": "^1.1.0" - } - }, - "node_modules/@vuepress/theme-default": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/theme-default/-/theme-default-1.8.2.tgz", - "integrity": "sha512-rE7M1rs3n2xp4a/GrweO8EGwqFn3EA5gnFWdVmVIHyr7C1nix+EqjpPQF1SVWNnIrDdQuCw38PqS+oND1K2vYw==", - "dependencies": { - "@vuepress/plugin-active-header-links": "1.8.2", - "@vuepress/plugin-nprogress": "1.8.2", - "@vuepress/plugin-search": "1.8.2", - "docsearch.js": "^2.5.2", - "lodash": "^4.17.15", - "stylus": "^0.54.8", - "stylus-loader": "^3.0.2", - "vuepress-plugin-container": "^2.0.2", - "vuepress-plugin-smooth-scroll": "^0.0.3" - } - }, - "node_modules/@webassemblyjs/ast": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.9.0.tgz", - "integrity": "sha512-C6wW5L+b7ogSDVqymbkkvuW9kruN//YisMED04xzeBBqjHa2FYnmvOlS6Xj68xWQRgWvI9cIglsjFowH/RJyEA==", - "dependencies": { - "@webassemblyjs/helper-module-context": "1.9.0", - "@webassemblyjs/helper-wasm-bytecode": "1.9.0", - "@webassemblyjs/wast-parser": "1.9.0" - } - }, - "node_modules/@webassemblyjs/floating-point-hex-parser": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.9.0.tgz", - "integrity": "sha512-TG5qcFsS8QB4g4MhrxK5TqfdNe7Ey/7YL/xN+36rRjl/BlGE/NcBvJcqsRgCP6Z92mRE+7N50pRIi8SmKUbcQA==" - }, - "node_modules/@webassemblyjs/helper-api-error": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.9.0.tgz", - "integrity": "sha512-NcMLjoFMXpsASZFxJ5h2HZRcEhDkvnNFOAKneP5RbKRzaWJN36NC4jqQHKwStIhGXu5mUWlUUk7ygdtrO8lbmw==" - }, - "node_modules/@webassemblyjs/helper-buffer": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.9.0.tgz", - "integrity": "sha512-qZol43oqhq6yBPx7YM3m9Bv7WMV9Eevj6kMi6InKOuZxhw+q9hOkvq5e/PpKSiLfyetpaBnogSbNCfBwyB00CA==" - }, - "node_modules/@webassemblyjs/helper-code-frame": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-code-frame/-/helper-code-frame-1.9.0.tgz", - "integrity": "sha512-ERCYdJBkD9Vu4vtjUYe8LZruWuNIToYq/ME22igL+2vj2dQ2OOujIZr3MEFvfEaqKoVqpsFKAGsRdBSBjrIvZA==", - "dependencies": { - "@webassemblyjs/wast-printer": "1.9.0" - } - }, - "node_modules/@webassemblyjs/helper-fsm": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-fsm/-/helper-fsm-1.9.0.tgz", - "integrity": "sha512-OPRowhGbshCb5PxJ8LocpdX9Kl0uB4XsAjl6jH/dWKlk/mzsANvhwbiULsaiqT5GZGT9qinTICdj6PLuM5gslw==" - }, - "node_modules/@webassemblyjs/helper-module-context": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-module-context/-/helper-module-context-1.9.0.tgz", - "integrity": "sha512-MJCW8iGC08tMk2enck1aPW+BE5Cw8/7ph/VGZxwyvGbJwjktKkDK7vy7gAmMDx88D7mhDTCNKAW5tED+gZ0W8g==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0" - } - }, - "node_modules/@webassemblyjs/helper-wasm-bytecode": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.9.0.tgz", - "integrity": "sha512-R7FStIzyNcd7xKxCZH5lE0Bqy+hGTwS3LJjuv1ZVxd9O7eHCedSdrId/hMOd20I+v8wDXEn+bjfKDLzTepoaUw==" - }, - "node_modules/@webassemblyjs/helper-wasm-section": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.9.0.tgz", - "integrity": "sha512-XnMB8l3ek4tvrKUUku+IVaXNHz2YsJyOOmz+MMkZvh8h1uSJpSen6vYnw3IoQ7WwEuAhL8Efjms1ZWjqh2agvw==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/helper-buffer": "1.9.0", - "@webassemblyjs/helper-wasm-bytecode": "1.9.0", - "@webassemblyjs/wasm-gen": "1.9.0" - } - }, - "node_modules/@webassemblyjs/ieee754": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.9.0.tgz", - "integrity": "sha512-dcX8JuYU/gvymzIHc9DgxTzUUTLexWwt8uCTWP3otys596io0L5aW02Gb1RjYpx2+0Jus1h4ZFqjla7umFniTg==", - "dependencies": { - "@xtuc/ieee754": "^1.2.0" - } - }, - "node_modules/@webassemblyjs/leb128": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.9.0.tgz", - "integrity": "sha512-ENVzM5VwV1ojs9jam6vPys97B/S65YQtv/aanqnU7D8aSoHFX8GyhGg0CMfyKNIHBuAVjy3tlzd5QMMINa7wpw==", - "dependencies": { - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@webassemblyjs/utf8": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.9.0.tgz", - "integrity": "sha512-GZbQlWtopBTP0u7cHrEx+73yZKrQoBMpwkGEIqlacljhXCkVM1kMQge/Mf+csMJAjEdSwhOyLAS0AoR3AG5P8w==" - }, - "node_modules/@webassemblyjs/wasm-edit": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.9.0.tgz", - "integrity": "sha512-FgHzBm80uwz5M8WKnMTn6j/sVbqilPdQXTWraSjBwFXSYGirpkSWE2R9Qvz9tNiTKQvoKILpCuTjBKzOIm0nxw==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/helper-buffer": "1.9.0", - "@webassemblyjs/helper-wasm-bytecode": "1.9.0", - "@webassemblyjs/helper-wasm-section": "1.9.0", - "@webassemblyjs/wasm-gen": "1.9.0", - "@webassemblyjs/wasm-opt": "1.9.0", - "@webassemblyjs/wasm-parser": "1.9.0", - "@webassemblyjs/wast-printer": "1.9.0" - } - }, - "node_modules/@webassemblyjs/wasm-gen": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.9.0.tgz", - "integrity": "sha512-cPE3o44YzOOHvlsb4+E9qSqjc9Qf9Na1OO/BHFy4OI91XDE14MjFN4lTMezzaIWdPqHnsTodGGNP+iRSYfGkjA==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/helper-wasm-bytecode": "1.9.0", - "@webassemblyjs/ieee754": "1.9.0", - "@webassemblyjs/leb128": "1.9.0", - "@webassemblyjs/utf8": "1.9.0" - } - }, - "node_modules/@webassemblyjs/wasm-opt": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.9.0.tgz", - "integrity": "sha512-Qkjgm6Anhm+OMbIL0iokO7meajkzQD71ioelnfPEj6r4eOFuqm4YC3VBPqXjFyyNwowzbMD+hizmprP/Fwkl2A==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/helper-buffer": "1.9.0", - "@webassemblyjs/wasm-gen": "1.9.0", - "@webassemblyjs/wasm-parser": "1.9.0" - } - }, - "node_modules/@webassemblyjs/wasm-parser": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.9.0.tgz", - "integrity": "sha512-9+wkMowR2AmdSWQzsPEjFU7njh8HTO5MqO8vjwEHuM+AMHioNqSBONRdr0NQQ3dVQrzp0s8lTcYqzUdb7YgELA==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/helper-api-error": "1.9.0", - "@webassemblyjs/helper-wasm-bytecode": "1.9.0", - "@webassemblyjs/ieee754": "1.9.0", - "@webassemblyjs/leb128": "1.9.0", - "@webassemblyjs/utf8": "1.9.0" - } - }, - "node_modules/@webassemblyjs/wast-parser": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-parser/-/wast-parser-1.9.0.tgz", - "integrity": "sha512-qsqSAP3QQ3LyZjNC/0jBJ/ToSxfYJ8kYyuiGvtn/8MK89VrNEfwj7BPQzJVHi0jGTRK2dGdJ5PRqhtjzoww+bw==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/floating-point-hex-parser": "1.9.0", - "@webassemblyjs/helper-api-error": "1.9.0", - "@webassemblyjs/helper-code-frame": "1.9.0", - "@webassemblyjs/helper-fsm": "1.9.0", - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@webassemblyjs/wast-printer": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.9.0.tgz", - "integrity": "sha512-2J0nE95rHXHyQ24cWjMKJ1tqB/ds8z/cyeOZxJhcb+rW+SQASVjuznUSmdz5GpVJTzU8JkhYut0D3siFDD6wsA==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/wast-parser": "1.9.0", - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@xtuc/ieee754": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", - "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==" - }, - "node_modules/@xtuc/long": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", - "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==" - }, - "node_modules/abbrev": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", - "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==" - }, - "node_modules/accepts": { - "version": "1.3.7", - "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.7.tgz", - "integrity": "sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA==", - "dependencies": { - "mime-types": "~2.1.24", - "negotiator": "0.6.2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/acorn": { - "version": "7.4.1", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz", - "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==", - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/agentkeepalive": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-2.2.0.tgz", - "integrity": "sha1-xdG9SxKQCPEWPyNvhuX66iAm4u8=", - "engines": { - "node": ">= 0.10.0" - } - }, - "node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - } - }, - "node_modules/ajv-errors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/ajv-errors/-/ajv-errors-1.0.1.tgz", - "integrity": "sha512-DCRfO/4nQ+89p/RK43i8Ezd41EqdGIU4ld7nGF8OQ14oc/we5rEntLCUa7+jrn3nn83BosfwZA0wb4pon2o8iQ==" - }, - "node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==" - }, - "node_modules/algoliasearch": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.8.6.tgz", - "integrity": "sha512-G8IA3lcgaQB4r9HuQ4G+uSFjjz0Wv2OgEPiQ8emA+G2UUlroOfMl064j1bq/G+QTW0LmTQp9JwrFDRWxFM9J7w==", - "dependencies": { - "@algolia/cache-browser-local-storage": "4.8.6", - "@algolia/cache-common": "4.8.6", - "@algolia/cache-in-memory": "4.8.6", - "@algolia/client-account": "4.8.6", - "@algolia/client-analytics": "4.8.6", - "@algolia/client-common": "4.8.6", - "@algolia/client-recommendation": "4.8.6", - "@algolia/client-search": "4.8.6", - "@algolia/logger-common": "4.8.6", - "@algolia/logger-console": "4.8.6", - "@algolia/requester-browser-xhr": "4.8.6", - "@algolia/requester-common": "4.8.6", - "@algolia/requester-node-http": "4.8.6", - "@algolia/transporter": "4.8.6" - } - }, - "node_modules/alphanum-sort": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/alphanum-sort/-/alphanum-sort-1.0.2.tgz", - "integrity": "sha1-l6ERlkmyEa0zaR2fn0hqjsn74KM=" - }, - "node_modules/ansi-align": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.0.tgz", - "integrity": "sha512-ZpClVKqXN3RGBmKibdfWzqCY4lnjEuoNzU5T0oEFpfd/z5qJHVarukridD4juLO2FXMiwUQxr9WqQtaYa8XRYw==", - "dependencies": { - "string-width": "^3.0.0" - } - }, - "node_modules/ansi-colors": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-3.2.4.tgz", - "integrity": "sha512-hHUXGagefjN2iRrID63xckIvotOXOojhQKWIPUZ4mNUZ9nLZW+7FMNoE1lOkEhNWYsx/7ysGIuJYCiMAA9FnrA==", - "engines": { - "node": ">=6" - } - }, - "node_modules/ansi-escapes": { - "version": "4.3.2", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", - "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", - "dependencies": { - "type-fest": "^0.21.3" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-html": { - "version": "0.0.7", - "resolved": "https://registry.npmjs.org/ansi-html/-/ansi-html-0.0.7.tgz", - "integrity": "sha1-gTWEAhliqenm/QOflA0S9WynhZ4=", - "engines": [ - "node >= 0.8.0" - ], - "bin": { - "ansi-html": "bin/ansi-html" - } - }, - "node_modules/ansi-regex": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/anymatch": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-2.0.0.tgz", - "integrity": "sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw==", - "dependencies": { - "micromatch": "^3.1.4", - "normalize-path": "^2.1.1" - } - }, - "node_modules/anymatch/node_modules/normalize-path": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz", - "integrity": "sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=", - "dependencies": { - "remove-trailing-separator": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/aproba": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/aproba/-/aproba-1.2.0.tgz", - "integrity": "sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw==" - }, - "node_modules/argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "dependencies": { - "sprintf-js": "~1.0.2" - } - }, - "node_modules/arr-diff": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz", - "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/arr-flatten": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz", - "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/arr-union": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz", - "integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/array-flatten": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz", - "integrity": "sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ==" - }, - "node_modules/array-union": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-1.0.2.tgz", - "integrity": "sha1-mjRBDk9OPaI96jdb5b5w8kd47Dk=", - "dependencies": { - "array-uniq": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/array-uniq": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/array-uniq/-/array-uniq-1.0.3.tgz", - "integrity": "sha1-r2rId6Jcx/dOBYiUdThY39sk/bY=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/array-unique": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz", - "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/asap": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz", - "integrity": "sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY=" - }, - "node_modules/asn1": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz", - "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==", - "dependencies": { - "safer-buffer": "~2.1.0" - } - }, - "node_modules/asn1.js": { - "version": "5.4.1", - "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-5.4.1.tgz", - "integrity": "sha512-+I//4cYPccV8LdmBLiX8CYvf9Sp3vQsrqu2QNXRcrbiWvcx/UdlFiqUJJzxRQxgsZmvhXhn4cSKeSmoFjVdupA==", - "dependencies": { - "bn.js": "^4.0.0", - "inherits": "^2.0.1", - "minimalistic-assert": "^1.0.0", - "safer-buffer": "^2.1.0" - } - }, - "node_modules/asn1.js/node_modules/bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - }, - "node_modules/assert": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/assert/-/assert-1.5.0.tgz", - "integrity": "sha512-EDsgawzwoun2CZkCgtxJbv392v4nbk9XDD06zI+kQYoBM/3RBWLlEyJARDOmhAAosBjWACEkKL6S+lIZtcAubA==", - "dependencies": { - "object-assign": "^4.1.1", - "util": "0.10.3" - } - }, - "node_modules/assert-never": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/assert-never/-/assert-never-1.2.1.tgz", - "integrity": "sha512-TaTivMB6pYI1kXwrFlEhLeGfOqoDNdTxjCdwRfFFkEA30Eu+k48W34nlok2EYWJfFFzqaEmichdNM7th6M5HNw==" - }, - "node_modules/assert-plus": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", - "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/assert/node_modules/inherits": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz", - "integrity": "sha1-sX0I0ya0Qj5Wjv9xn5GwscvfafE=" - }, - "node_modules/assert/node_modules/util": { - "version": "0.10.3", - "resolved": "https://registry.npmjs.org/util/-/util-0.10.3.tgz", - "integrity": "sha1-evsa/lCAUkZInj23/g7TeTNqwPk=", - "dependencies": { - "inherits": "2.0.1" - } - }, - "node_modules/assign-symbols": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz", - "integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/async": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/async/-/async-2.6.3.tgz", - "integrity": "sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg==", - "dependencies": { - "lodash": "^4.17.14" - } - }, - "node_modules/async-each": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/async-each/-/async-each-1.0.3.tgz", - "integrity": "sha512-z/WhQ5FPySLdvREByI2vZiTWwCnF0moMJ1hK9YQwDTHKh6I7/uSckMetoRGb5UBZPC1z0jlw+n/XCgjeH7y1AQ==" - }, - "node_modules/async-limiter": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.1.tgz", - "integrity": "sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ==" - }, - "node_modules/asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=" - }, - "node_modules/atob": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz", - "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==", - "bin": { - "atob": "bin/atob.js" - }, - "engines": { - "node": ">= 4.5.0" - } - }, - "node_modules/autocomplete.js": { - "version": "0.36.0", - "resolved": "https://registry.npmjs.org/autocomplete.js/-/autocomplete.js-0.36.0.tgz", - "integrity": "sha512-jEwUXnVMeCHHutUt10i/8ZiRaCb0Wo+ZyKxeGsYwBDtw6EJHqEeDrq4UwZRD8YBSvp3g6klP678il2eeiVXN2Q==", - "dependencies": { - "immediate": "^3.2.3" - } - }, - "node_modules/autoprefixer": { - "version": "9.8.6", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-9.8.6.tgz", - "integrity": "sha512-XrvP4VVHdRBCdX1S3WXVD8+RyG9qeb1D5Sn1DeLiG2xfSpzellk5k54xbUERJ3M5DggQxes39UGOTP8CFrEGbg==", - "dependencies": { - "browserslist": "^4.12.0", - "caniuse-lite": "^1.0.30001109", - "colorette": "^1.2.1", - "normalize-range": "^0.1.2", - "num2fraction": "^1.2.2", - "postcss": "^7.0.32", - "postcss-value-parser": "^4.1.0" - }, - "bin": { - "autoprefixer": "bin/autoprefixer" - } - }, - "node_modules/aws-sign2": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", - "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=", - "engines": { - "node": "*" - } - }, - "node_modules/aws4": { - "version": "1.11.0", - "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.11.0.tgz", - "integrity": "sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA==" - }, - "node_modules/axios": { - "version": "0.21.1", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.21.1.tgz", - "integrity": "sha512-dKQiRHxGD9PPRIUNIWvZhPTPpl1rf/OxTYKsqKUDjBwYylTvV7SjSHJb9ratfyzM6wCdLCOYLzs73qpg5c4iGA==", - "dependencies": { - "follow-redirects": "^1.10.0" - } - }, - "node_modules/axios/node_modules/follow-redirects": { - "version": "1.13.3", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.13.3.tgz", - "integrity": "sha512-DUgl6+HDzB0iEptNQEXLx/KhTmDb8tZUHSeLqpnjpknR70H0nC2t9N73BK6fN4hOvJ84pKlIQVQ4k5FFlBedKA==", - "engines": { - "node": ">=4.0" - } - }, - "node_modules/babel-loader": { - "version": "8.2.2", - "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-8.2.2.tgz", - "integrity": "sha512-JvTd0/D889PQBtUXJ2PXaKU/pjZDMtHA9V2ecm+eNRmmBCMR09a+fmpGTNwnJtFmFl5Ei7Vy47LjBb+L0wQ99g==", - "dependencies": { - "find-cache-dir": "^3.3.1", - "loader-utils": "^1.4.0", - "make-dir": "^3.1.0", - "schema-utils": "^2.6.5" - }, - "engines": { - "node": ">= 8.9" - } - }, - "node_modules/babel-plugin-dynamic-import-node": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", - "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", - "dependencies": { - "object.assign": "^4.1.0" - } - }, - "node_modules/babel-plugin-polyfill-corejs2": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.2.0.tgz", - "integrity": "sha512-9bNwiR0dS881c5SHnzCmmGlMkJLl0OUZvxrxHo9w/iNoRuqaPjqlvBf4HrovXtQs/au5yKkpcdgfT1cC5PAZwg==", - "dependencies": { - "@babel/compat-data": "^7.13.11", - "@babel/helper-define-polyfill-provider": "^0.2.0", - "semver": "^6.1.1" - } - }, - "node_modules/babel-plugin-polyfill-corejs3": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.2.0.tgz", - "integrity": "sha512-zZyi7p3BCUyzNxLx8KV61zTINkkV65zVkDAFNZmrTCRVhjo1jAS+YLvDJ9Jgd/w2tsAviCwFHReYfxO3Iql8Yg==", - "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.2.0", - "core-js-compat": "^3.9.1" - } - }, - "node_modules/babel-plugin-polyfill-regenerator": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.2.0.tgz", - "integrity": "sha512-J7vKbCuD2Xi/eEHxquHN14bXAW9CXtecwuLrOIDJtcZzTaPzV1VdEfoUf9AzcRBMolKUQKM9/GVojeh0hFiqMg==", - "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.2.0" - } - }, - "node_modules/babel-walk": { - "version": "3.0.0-canary-5", - "resolved": "https://registry.npmjs.org/babel-walk/-/babel-walk-3.0.0-canary-5.tgz", - "integrity": "sha512-GAwkz0AihzY5bkwIY5QDR+LvsRQgB/B+1foMPvi0FZPMl5fjD7ICiznUiBdLYMH1QYe6vqu4gWYytZOccLouFw==", - "dependencies": { - "@babel/types": "^7.9.6" - }, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" - }, - "node_modules/base": { - "version": "0.11.2", - "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz", - "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==", - "dependencies": { - "cache-base": "^1.0.1", - "class-utils": "^0.3.5", - "component-emitter": "^1.2.1", - "define-property": "^1.0.0", - "isobject": "^3.0.1", - "mixin-deep": "^1.2.0", - "pascalcase": "^0.1.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/base/node_modules/define-property": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", - "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", - "dependencies": { - "is-descriptor": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/base/node_modules/is-accessor-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/base/node_modules/is-data-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/base/node_modules/is-descriptor": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", - "dependencies": { - "is-accessor-descriptor": "^1.0.0", - "is-data-descriptor": "^1.0.0", - "kind-of": "^6.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==" - }, - "node_modules/batch": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", - "integrity": "sha1-3DQxT05nkxgJP8dgJyUl+UvyXBY=" - }, - "node_modules/bcrypt-pbkdf": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", - "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=", - "dependencies": { - "tweetnacl": "^0.14.3" - } - }, - "node_modules/big.js": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", - "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==", - "engines": { - "node": "*" - } - }, - "node_modules/binary-extensions": { - "version": "1.13.1", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-1.13.1.tgz", - "integrity": "sha512-Un7MIEDdUC5gNpcGDV97op1Ywk748MpHcFTHoYs6qnj1Z3j7I53VG3nwZhKzoBZmbdRNnb6WRdFlwl7tSDuZGw==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/bindings": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz", - "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==", - "optional": true, - "dependencies": { - "file-uri-to-path": "1.0.0" - } - }, - "node_modules/bluebird": { - "version": "3.7.2", - "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", - "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==" - }, - "node_modules/bn.js": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.2.0.tgz", - "integrity": "sha512-D7iWRBvnZE8ecXiLj/9wbxH7Tk79fAh8IHaTNq1RWRixsS02W+5qS+iE9yq6RYl0asXx5tw0bLhmT5pIfbSquw==" - }, - "node_modules/body-parser": { - "version": "1.19.0", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.19.0.tgz", - "integrity": "sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw==", - "dependencies": { - "bytes": "3.1.0", - "content-type": "~1.0.4", - "debug": "2.6.9", - "depd": "~1.1.2", - "http-errors": "1.7.2", - "iconv-lite": "0.4.24", - "on-finished": "~2.3.0", - "qs": "6.7.0", - "raw-body": "2.4.0", - "type-is": "~1.6.17" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/body-parser/node_modules/bytes": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz", - "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/body-parser/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/bonjour": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/bonjour/-/bonjour-3.5.0.tgz", - "integrity": "sha1-jokKGD2O6aI5OzhExpGkK897yfU=", - "dependencies": { - "array-flatten": "^2.1.0", - "deep-equal": "^1.0.1", - "dns-equal": "^1.0.0", - "dns-txt": "^2.0.2", - "multicast-dns": "^6.0.1", - "multicast-dns-service-types": "^1.1.0" - } - }, - "node_modules/boolbase": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", - "integrity": "sha1-aN/1++YMUes3cl6p4+0xDcwed24=" - }, - "node_modules/boxen": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/boxen/-/boxen-4.2.0.tgz", - "integrity": "sha512-eB4uT9RGzg2odpER62bBwSLvUeGC+WbRjjyyFhGsKnc8wp/m0+hQsMUvUe3H2V0D5vw0nBdO1hCJoZo5mKeuIQ==", - "dependencies": { - "ansi-align": "^3.0.0", - "camelcase": "^5.3.1", - "chalk": "^3.0.0", - "cli-boxes": "^2.2.0", - "string-width": "^4.1.0", - "term-size": "^2.1.0", - "type-fest": "^0.8.1", - "widest-line": "^3.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/boxen/node_modules/chalk": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", - "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/boxen/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "node_modules/boxen/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" - }, - "node_modules/boxen/node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/string-width": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", - "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dependencies": { - "ansi-regex": "^5.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/type-fest": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", - "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", - "engines": { - "node": ">=8" - } - }, - "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/braces": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz", - "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", - "dependencies": { - "arr-flatten": "^1.1.0", - "array-unique": "^0.3.2", - "extend-shallow": "^2.0.1", - "fill-range": "^4.0.0", - "isobject": "^3.0.1", - "repeat-element": "^1.1.2", - "snapdragon": "^0.8.1", - "snapdragon-node": "^2.0.1", - "split-string": "^3.0.2", - "to-regex": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/brorand": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz", - "integrity": "sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8=" - }, - "node_modules/browserify-aes": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz", - "integrity": "sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==", - "dependencies": { - "buffer-xor": "^1.0.3", - "cipher-base": "^1.0.0", - "create-hash": "^1.1.0", - "evp_bytestokey": "^1.0.3", - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" - } - }, - "node_modules/browserify-cipher": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/browserify-cipher/-/browserify-cipher-1.0.1.tgz", - "integrity": "sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w==", - "dependencies": { - "browserify-aes": "^1.0.4", - "browserify-des": "^1.0.0", - "evp_bytestokey": "^1.0.0" - } - }, - "node_modules/browserify-des": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/browserify-des/-/browserify-des-1.0.2.tgz", - "integrity": "sha512-BioO1xf3hFwz4kc6iBhI3ieDFompMhrMlnDFC4/0/vd5MokpuAc3R+LYbwTA9A5Yc9pq9UYPqffKpW2ObuwX5A==", - "dependencies": { - "cipher-base": "^1.0.1", - "des.js": "^1.0.0", - "inherits": "^2.0.1", - "safe-buffer": "^5.1.2" - } - }, - "node_modules/browserify-rsa": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.1.0.tgz", - "integrity": "sha512-AdEER0Hkspgno2aR97SAf6vi0y0k8NuOpGnVH3O99rcA5Q6sh8QxcngtHuJ6uXwnfAXNM4Gn1Gb7/MV1+Ymbog==", - "dependencies": { - "bn.js": "^5.0.0", - "randombytes": "^2.0.1" - } - }, - "node_modules/browserify-sign": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.2.1.tgz", - "integrity": "sha512-/vrA5fguVAKKAVTNJjgSm1tRQDHUU6DbwO9IROu/0WAzC8PKhucDSh18J0RMvVeHAn5puMd+QHC2erPRNf8lmg==", - "dependencies": { - "bn.js": "^5.1.1", - "browserify-rsa": "^4.0.1", - "create-hash": "^1.2.0", - "create-hmac": "^1.1.7", - "elliptic": "^6.5.3", - "inherits": "^2.0.4", - "parse-asn1": "^5.1.5", - "readable-stream": "^3.6.0", - "safe-buffer": "^5.2.0" - } - }, - "node_modules/browserify-sign/node_modules/readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/browserify-sign/node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" - }, - "node_modules/browserify-zlib": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.2.0.tgz", - "integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==", - "dependencies": { - "pako": "~1.0.5" - } - }, - "node_modules/browserslist": { - "version": "4.16.6", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.16.6.tgz", - "integrity": "sha512-Wspk/PqO+4W9qp5iUTJsa1B/QrYn1keNCcEP5OvP7WBwT4KaDly0uONYmC6Xa3Z5IqnUgS0KcgLYu1l74x0ZXQ==", - "dependencies": { - "caniuse-lite": "^1.0.30001219", - "colorette": "^1.2.2", - "electron-to-chromium": "^1.3.723", - "escalade": "^3.1.1", - "node-releases": "^1.1.71" - }, - "bin": { - "browserslist": "cli.js" - }, - "engines": { - "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" - } - }, - "node_modules/browserslist/node_modules/caniuse-lite": { - "version": "1.0.30001228", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001228.tgz", - "integrity": "sha512-QQmLOGJ3DEgokHbMSA8cj2a+geXqmnpyOFT0lhQV6P3/YOJvGDEwoedcwxEQ30gJIwIIunHIicunJ2rzK5gB2A==" - }, - "node_modules/browserslist/node_modules/electron-to-chromium": { - "version": "1.3.738", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.738.tgz", - "integrity": "sha512-vCMf4gDOpEylPSLPLSwAEsz+R3ShP02Y3cAKMZvTqule3XcPp7tgc/0ESI7IS6ZeyBlGClE50N53fIOkcIVnpw==" - }, - "node_modules/buffer": { - "version": "4.9.2", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.2.tgz", - "integrity": "sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg==", - "dependencies": { - "base64-js": "^1.0.2", - "ieee754": "^1.1.4", - "isarray": "^1.0.0" - } - }, - "node_modules/buffer-from": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", - "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==" - }, - "node_modules/buffer-indexof": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/buffer-indexof/-/buffer-indexof-1.1.1.tgz", - "integrity": "sha512-4/rOEg86jivtPTeOUUT61jJO1Ya1TrR/OkqCSZDyq84WJh3LuuiphBYJN+fm5xufIk4XAFcEwte/8WzC8If/1g==" - }, - "node_modules/buffer-json": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/buffer-json/-/buffer-json-2.0.0.tgz", - "integrity": "sha512-+jjPFVqyfF1esi9fvfUs3NqM0pH1ziZ36VP4hmA/y/Ssfo/5w5xHKfTw9BwQjoJ1w/oVtpLomqwUHKdefGyuHw==" - }, - "node_modules/buffer-xor": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/buffer-xor/-/buffer-xor-1.0.3.tgz", - "integrity": "sha1-JuYe0UIvtw3ULm42cp7VHYVf6Nk=" - }, - "node_modules/builtin-status-codes": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz", - "integrity": "sha1-hZgoeOIbmOHGZCXgPQF0eI9Wnug=" - }, - "node_modules/bytes": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", - "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/cac": { - "version": "6.7.2", - "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.2.tgz", - "integrity": "sha512-w0bH1IF9rEjdi0a6lTtlXYT+vBZEJL9oytaXXRdsD68MH6+SrZGOGsu7s2saHQvYXqwo/wBdkW75tt8wFpj+mw==", - "engines": { - "node": ">=8" - } - }, - "node_modules/cacache": { - "version": "12.0.4", - "resolved": "https://registry.npmjs.org/cacache/-/cacache-12.0.4.tgz", - "integrity": "sha512-a0tMB40oefvuInr4Cwb3GerbL9xTj1D5yg0T5xrjGCGyfvbxseIXX7BAO/u/hIXdafzOI5JC3wDwHyf24buOAQ==", - "dependencies": { - "bluebird": "^3.5.5", - "chownr": "^1.1.1", - "figgy-pudding": "^3.5.1", - "glob": "^7.1.4", - "graceful-fs": "^4.1.15", - "infer-owner": "^1.0.3", - "lru-cache": "^5.1.1", - "mississippi": "^3.0.0", - "mkdirp": "^0.5.1", - "move-concurrently": "^1.0.1", - "promise-inflight": "^1.0.1", - "rimraf": "^2.6.3", - "ssri": "^6.0.1", - "unique-filename": "^1.1.1", - "y18n": "^4.0.0" - } - }, - "node_modules/cacache/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/cache-base": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz", - "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==", - "dependencies": { - "collection-visit": "^1.0.0", - "component-emitter": "^1.2.1", - "get-value": "^2.0.6", - "has-value": "^1.0.0", - "isobject": "^3.0.1", - "set-value": "^2.0.0", - "to-object-path": "^0.3.0", - "union-value": "^1.0.0", - "unset-value": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/cache-loader": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/cache-loader/-/cache-loader-3.0.1.tgz", - "integrity": "sha512-HzJIvGiGqYsFUrMjAJNDbVZoG7qQA+vy9AIoKs7s9DscNfki0I589mf2w6/tW+kkFH3zyiknoWV5Jdynu6b/zw==", - "dependencies": { - "buffer-json": "^2.0.0", - "find-cache-dir": "^2.1.0", - "loader-utils": "^1.2.3", - "mkdirp": "^0.5.1", - "neo-async": "^2.6.1", - "schema-utils": "^1.0.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/cache-loader/node_modules/find-cache-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz", - "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==", - "dependencies": { - "commondir": "^1.0.1", - "make-dir": "^2.0.0", - "pkg-dir": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/cache-loader/node_modules/find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dependencies": { - "locate-path": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/cache-loader/node_modules/locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/cache-loader/node_modules/make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dependencies": { - "pify": "^4.0.1", - "semver": "^5.6.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/cache-loader/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/cache-loader/node_modules/p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dependencies": { - "p-limit": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/cache-loader/node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "engines": { - "node": ">=4" - } - }, - "node_modules/cache-loader/node_modules/pkg-dir": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", - "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", - "dependencies": { - "find-up": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/cache-loader/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/cache-loader/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/cacheable-request": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-6.1.0.tgz", - "integrity": "sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg==", - "dependencies": { - "clone-response": "^1.0.2", - "get-stream": "^5.1.0", - "http-cache-semantics": "^4.0.0", - "keyv": "^3.0.0", - "lowercase-keys": "^2.0.0", - "normalize-url": "^4.1.0", - "responselike": "^1.0.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cacheable-request/node_modules/get-stream": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", - "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", - "dependencies": { - "pump": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cacheable-request/node_modules/lowercase-keys": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz", - "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==", - "engines": { - "node": ">=8" - } - }, - "node_modules/cacheable-request/node_modules/normalize-url": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-4.5.0.tgz", - "integrity": "sha512-2s47yzUxdexf1OhyRi4Em83iQk0aPvwTddtFz4hnSSw9dCEsLEGf6SwIO8ss/19S9iBb5sJaOuTvTGDeZI00BQ==", - "engines": { - "node": ">=8" - } - }, - "node_modules/call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", - "dependencies": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" - } - }, - "node_modules/call-me-maybe": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/call-me-maybe/-/call-me-maybe-1.0.1.tgz", - "integrity": "sha1-JtII6onje1y95gJQoV8DHBak1ms=" - }, - "node_modules/caller-callsite": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/caller-callsite/-/caller-callsite-2.0.0.tgz", - "integrity": "sha1-hH4PzgoiN1CpoCfFSzNzGtMVQTQ=", - "dependencies": { - "callsites": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/caller-path": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/caller-path/-/caller-path-2.0.0.tgz", - "integrity": "sha1-Ro+DBE42mrIBD6xfBs7uFbsssfQ=", - "dependencies": { - "caller-callsite": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/callsites": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-2.0.0.tgz", - "integrity": "sha1-BuuE8A7qQT2oav/vrL/7Ngk7PFA=", - "engines": { - "node": ">=4" - } - }, - "node_modules/camel-case": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/camel-case/-/camel-case-3.0.0.tgz", - "integrity": "sha1-yjw2iKTpzzpM2nd9xNy8cTJJz3M=", - "dependencies": { - "no-case": "^2.2.0", - "upper-case": "^1.1.1" - } - }, - "node_modules/camelcase": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.2.0.tgz", - "integrity": "sha512-c7wVvbw3f37nuobQNtgsgG9POC9qMbNuMQmTCqZv23b6MIz0fcYpBiOlv9gEN/hdLdnZTDQhg6e9Dq5M1vKvfg==", - "engines": { - "node": ">=10" - } - }, - "node_modules/caniuse-api": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", - "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", - "dependencies": { - "browserslist": "^4.0.0", - "caniuse-lite": "^1.0.0", - "lodash.memoize": "^4.1.2", - "lodash.uniq": "^4.5.0" - } - }, - "node_modules/caniuse-lite": { - "version": "1.0.30001208", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001208.tgz", - "integrity": "sha512-OE5UE4+nBOro8Dyvv0lfx+SRtfVIOM9uhKqFmJeUbGriqhhStgp1A0OyBpgy3OUF8AhYCT+PVwPC1gMl2ZcQMA==" - }, - "node_modules/caseless": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", - "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=" - }, - "node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/character-parser": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/character-parser/-/character-parser-2.2.0.tgz", - "integrity": "sha1-x84o821LzZdE5f/CxfzeHHMmH8A=", - "dependencies": { - "is-regex": "^1.0.3" - } - }, - "node_modules/cheerio": { - "version": "1.0.0-rc.6", - "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.6.tgz", - "integrity": "sha512-hjx1XE1M/D5pAtMgvWwE21QClmAEeGHOIDfycgmndisdNgI6PE1cGRQkMGBcsbUbmEQyWu5PJLUcAOjtQS8DWw==", - "dependencies": { - "cheerio-select": "^1.3.0", - "dom-serializer": "^1.3.1", - "domhandler": "^4.1.0", - "htmlparser2": "^6.1.0", - "parse5": "^6.0.1", - "parse5-htmlparser2-tree-adapter": "^6.0.1" - }, - "engines": { - "node": ">= 0.12" - } - }, - "node_modules/cheerio-select": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-1.3.0.tgz", - "integrity": "sha512-mLgqdHxVOQyhOIkG5QnRkDg7h817Dkf0dAvlCio2TJMmR72cJKH0bF28SHXvLkVrGcGOiub0/Bs/CMnPeQO7qw==", - "dependencies": { - "css-select": "^4.0.0", - "css-what": "^5.0.0", - "domelementtype": "^2.2.0", - "domhandler": "^4.1.0", - "domutils": "^2.5.2" - } - }, - "node_modules/chokidar": { - "version": "2.1.8", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-2.1.8.tgz", - "integrity": "sha512-ZmZUazfOzf0Nve7duiCKD23PFSCs4JPoYyccjUFF3aQkQadqBhfzhjkwBH2mNOG9cTBwhamM37EIsIkZw3nRgg==", - "dependencies": { - "anymatch": "^2.0.0", - "async-each": "^1.0.1", - "braces": "^2.3.2", - "fsevents": "^1.2.7", - "glob-parent": "^3.1.0", - "inherits": "^2.0.3", - "is-binary-path": "^1.0.0", - "is-glob": "^4.0.0", - "normalize-path": "^3.0.0", - "path-is-absolute": "^1.0.0", - "readdirp": "^2.2.1", - "upath": "^1.1.1" - }, - "optionalDependencies": { - "fsevents": "^1.2.7" - } - }, - "node_modules/chownr": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", - "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==" - }, - "node_modules/chrome-trace-event": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz", - "integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==", - "engines": { - "node": ">=6.0" - } - }, - "node_modules/ci-info": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.1.1.tgz", - "integrity": "sha512-kdRWLBIJwdsYJWYJFtAFFYxybguqeF91qpZaggjG5Nf8QKdizFG2hjqvaTXbxFIcYbSaD74KpAXv6BSm17DHEQ==" - }, - "node_modules/cipher-base": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/cipher-base/-/cipher-base-1.0.4.tgz", - "integrity": "sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==", - "dependencies": { - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" - } - }, - "node_modules/class-utils": { - "version": "0.3.6", - "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz", - "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==", - "dependencies": { - "arr-union": "^3.1.0", - "define-property": "^0.2.5", - "isobject": "^3.0.0", - "static-extend": "^0.1.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/class-utils/node_modules/define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dependencies": { - "is-descriptor": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/clean-css": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-4.2.3.tgz", - "integrity": "sha512-VcMWDN54ZN/DS+g58HYL5/n4Zrqe8vHJpGA8KdgUXFU4fuP/aHNw8eld9SyEIyabIMJX/0RaY/fplOo5hYLSFA==", - "dependencies": { - "source-map": "~0.6.0" - }, - "engines": { - "node": ">= 4.0" - } - }, - "node_modules/clean-css/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/cli-boxes": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.1.tgz", - "integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw==", - "engines": { - "node": ">=6" - } - }, - "node_modules/clipboard": { - "version": "2.0.8", - "resolved": "https://registry.npmjs.org/clipboard/-/clipboard-2.0.8.tgz", - "integrity": "sha512-Y6WO0unAIQp5bLmk1zdThRhgJt/x3ks6f30s3oE3H1mgIEU33XyQjEf8gsf6DxC7NPX8Y1SsNWjUjL/ywLnnbQ==", - "optional": true, - "dependencies": { - "good-listener": "^1.2.2", - "select": "^1.1.2", - "tiny-emitter": "^2.0.0" - } - }, - "node_modules/clipboard-copy": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/clipboard-copy/-/clipboard-copy-3.2.0.tgz", - "integrity": "sha512-vooFaGFL6ulEP1liiaWFBmmfuPm3cY3y7T9eB83ZTnYc/oFeAKsq3NcDrOkBC8XaauEE8zHQwI7k0+JSYiVQSQ==" - }, - "node_modules/cliui": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", - "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", - "dependencies": { - "string-width": "^3.1.0", - "strip-ansi": "^5.2.0", - "wrap-ansi": "^5.1.0" - } - }, - "node_modules/cliui/node_modules/ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/cliui/node_modules/strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", - "dependencies": { - "ansi-regex": "^4.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/clone-response": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.2.tgz", - "integrity": "sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws=", - "dependencies": { - "mimic-response": "^1.0.0" - } - }, - "node_modules/coa": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/coa/-/coa-2.0.2.tgz", - "integrity": "sha512-q5/jG+YQnSy4nRTV4F7lPepBJZ8qBNJJDBuJdoejDyLXgmL7IEo+Le2JDZudFTFt7mrCqIRaSjws4ygRCTCAXA==", - "dependencies": { - "@types/q": "^1.5.1", - "chalk": "^2.4.1", - "q": "^1.1.2" - }, - "engines": { - "node": ">= 4.0" - } - }, - "node_modules/collection-visit": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz", - "integrity": "sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA=", - "dependencies": { - "map-visit": "^1.0.0", - "object-visit": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/color": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/color/-/color-3.1.3.tgz", - "integrity": "sha512-xgXAcTHa2HeFCGLE9Xs/R82hujGtu9Jd9x4NW3T34+OMs7VoPsjwzRczKHvTAHeJwWFwX5j15+MgAppE8ztObQ==", - "dependencies": { - "color-convert": "^1.9.1", - "color-string": "^1.5.4" - } - }, - "node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" - }, - "node_modules/color-string": { - "version": "1.5.5", - "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.5.tgz", - "integrity": "sha512-jgIoum0OfQfq9Whcfc2z/VhCNcmQjWbey6qBX0vqt7YICflUmBCh9E9CiQD5GSJ+Uehixm3NUwHVhqUAWRivZg==", - "dependencies": { - "color-name": "^1.0.0", - "simple-swizzle": "^0.2.2" - } - }, - "node_modules/colorette": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/colorette/-/colorette-1.2.2.tgz", - "integrity": "sha512-MKGMzyfeuutC/ZJ1cba9NqcNpfeqMUcYmyF1ZFY6/Cn7CNSAKx6a+s48sqLqyAiZuaP2TcqMhoo+dlwFnVxT9w==" - }, - "node_modules/combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "dependencies": { - "delayed-stream": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/commander": { - "version": "2.17.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.17.1.tgz", - "integrity": "sha512-wPMUt6FnH2yzG95SA6mzjQOEKUU3aLaDEmzs1ti+1E9h+CsrZghRlqEM/EJ4KscsQVG8uNN4uVreUeT8+drlgg==" - }, - "node_modules/commondir": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", - "integrity": "sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs=" - }, - "node_modules/component-emitter": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz", - "integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg==" - }, - "node_modules/compressible": { - "version": "2.0.18", - "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", - "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", - "dependencies": { - "mime-db": ">= 1.43.0 < 2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/compression": { - "version": "1.7.4", - "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz", - "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==", - "dependencies": { - "accepts": "~1.3.5", - "bytes": "3.0.0", - "compressible": "~2.0.16", - "debug": "2.6.9", - "on-headers": "~1.0.2", - "safe-buffer": "5.1.2", - "vary": "~1.1.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/compression/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" - }, - "node_modules/concat-stream": { - "version": "1.6.2", - "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", - "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", - "engines": [ - "node >= 0.8" - ], - "dependencies": { - "buffer-from": "^1.0.0", - "inherits": "^2.0.3", - "readable-stream": "^2.2.2", - "typedarray": "^0.0.6" - } - }, - "node_modules/configstore": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/configstore/-/configstore-5.0.1.tgz", - "integrity": "sha512-aMKprgk5YhBNyH25hj8wGt2+D52Sw1DRRIzqBwLp2Ya9mFmY8KPvvtvmna8SxVR9JMZ4kzMD68N22vlaRpkeFA==", - "dependencies": { - "dot-prop": "^5.2.0", - "graceful-fs": "^4.1.2", - "make-dir": "^3.0.0", - "unique-string": "^2.0.0", - "write-file-atomic": "^3.0.0", - "xdg-basedir": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/connect-history-api-fallback": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-1.6.0.tgz", - "integrity": "sha512-e54B99q/OUoH64zYYRf3HBP5z24G38h5D3qXu23JGRoigpX5Ss4r9ZnDk3g0Z8uQC2x2lPaJ+UlWBc1ZWBWdLg==", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/consola": { - "version": "2.15.3", - "resolved": "https://registry.npmjs.org/consola/-/consola-2.15.3.tgz", - "integrity": "sha512-9vAdYbHj6x2fLKC4+oPH0kFzY/orMZyG2Aj+kNylHxKGJ/Ed4dpNyAQYwJOdqO4zdM7XpVHmyejQDcQHrnuXbw==" - }, - "node_modules/console-browserify": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/console-browserify/-/console-browserify-1.2.0.tgz", - "integrity": "sha512-ZMkYO/LkF17QvCPqM0gxw8yUzigAOZOSWSHg91FH6orS7vcEj5dVZTidN2fQ14yBSdg97RqhSNwLUXInd52OTA==" - }, - "node_modules/consolidate": { - "version": "0.15.1", - "resolved": "https://registry.npmjs.org/consolidate/-/consolidate-0.15.1.tgz", - "integrity": "sha512-DW46nrsMJgy9kqAbPt5rKaCr7uFtpo4mSUvLHIUbJEjm0vo+aY5QLwBUq3FK4tRnJr/X0Psc0C4jf/h+HtXSMw==", - "dependencies": { - "bluebird": "^3.1.1" - }, - "engines": { - "node": ">= 0.10.0" - } - }, - "node_modules/constantinople": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/constantinople/-/constantinople-4.0.1.tgz", - "integrity": "sha512-vCrqcSIq4//Gx74TXXCGnHpulY1dskqLTFGDmhrGxzeXL8lF8kvXv6mpNWlJj1uD4DW23D4ljAqbY4RRaaUZIw==", - "dependencies": { - "@babel/parser": "^7.6.0", - "@babel/types": "^7.6.1" - } - }, - "node_modules/constants-browserify": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/constants-browserify/-/constants-browserify-1.0.0.tgz", - "integrity": "sha1-wguW2MYXdIqvHBYCF2DNJ/y4y3U=" - }, - "node_modules/content-disposition": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.3.tgz", - "integrity": "sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g==", - "dependencies": { - "safe-buffer": "5.1.2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/content-type": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz", - "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/convert-source-map": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.7.0.tgz", - "integrity": "sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA==", - "dependencies": { - "safe-buffer": "~5.1.1" - } - }, - "node_modules/cookie": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.0.tgz", - "integrity": "sha512-+Hp8fLp57wnUSt0tY0tHEXh4voZRDnoIrZPqlo3DPiI4y9lwg/jqx+1Om94/W6ZaPDOUbnjOt/99w66zk+l1Xg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/cookie-signature": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", - "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw=" - }, - "node_modules/copy-concurrently": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/copy-concurrently/-/copy-concurrently-1.0.5.tgz", - "integrity": "sha512-f2domd9fsVDFtaFcbaRZuYXwtdmnzqbADSwhSWYxYB/Q8zsdUUFMXVRwXGDMWmbEzAn1kdRrtI1T/KTFOL4X2A==", - "dependencies": { - "aproba": "^1.1.1", - "fs-write-stream-atomic": "^1.0.8", - "iferr": "^0.1.5", - "mkdirp": "^0.5.1", - "rimraf": "^2.5.4", - "run-queue": "^1.0.0" - } - }, - "node_modules/copy-concurrently/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/copy-descriptor": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz", - "integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/copy-webpack-plugin": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-5.1.2.tgz", - "integrity": "sha512-Uh7crJAco3AjBvgAy9Z75CjK8IG+gxaErro71THQ+vv/bl4HaQcpkexAY8KVW/T6D2W2IRr+couF/knIRkZMIQ==", - "dependencies": { - "cacache": "^12.0.3", - "find-cache-dir": "^2.1.0", - "glob-parent": "^3.1.0", - "globby": "^7.1.1", - "is-glob": "^4.0.1", - "loader-utils": "^1.2.3", - "minimatch": "^3.0.4", - "normalize-path": "^3.0.0", - "p-limit": "^2.2.1", - "schema-utils": "^1.0.0", - "serialize-javascript": "^4.0.0", - "webpack-log": "^2.0.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/copy-webpack-plugin/node_modules/find-cache-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz", - "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==", - "dependencies": { - "commondir": "^1.0.1", - "make-dir": "^2.0.0", - "pkg-dir": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/copy-webpack-plugin/node_modules/find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dependencies": { - "locate-path": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/copy-webpack-plugin/node_modules/globby": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/globby/-/globby-7.1.1.tgz", - "integrity": "sha1-+yzP+UAfhgCUXfral0QMypcrhoA=", - "dependencies": { - "array-union": "^1.0.1", - "dir-glob": "^2.0.0", - "glob": "^7.1.2", - "ignore": "^3.3.5", - "pify": "^3.0.0", - "slash": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/copy-webpack-plugin/node_modules/globby/node_modules/pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=", - "engines": { - "node": ">=4" - } - }, - "node_modules/copy-webpack-plugin/node_modules/ignore": { - "version": "3.3.10", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-3.3.10.tgz", - "integrity": "sha512-Pgs951kaMm5GXP7MOvxERINe3gsaVjUWFm+UZPSq9xYriQAksyhg0csnS0KXSNRD5NmNdapXEpjxG49+AKh/ug==" - }, - "node_modules/copy-webpack-plugin/node_modules/locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/copy-webpack-plugin/node_modules/make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dependencies": { - "pify": "^4.0.1", - "semver": "^5.6.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/copy-webpack-plugin/node_modules/p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dependencies": { - "p-limit": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/copy-webpack-plugin/node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "engines": { - "node": ">=4" - } - }, - "node_modules/copy-webpack-plugin/node_modules/pkg-dir": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", - "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", - "dependencies": { - "find-up": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/copy-webpack-plugin/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/copy-webpack-plugin/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/copy-webpack-plugin/node_modules/slash": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-1.0.0.tgz", - "integrity": "sha1-xB8vbDn8FtHNF61LXYlhFK5HDVU=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/core-js": { - "version": "3.10.1", - "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.10.1.tgz", - "integrity": "sha512-pwCxEXnj27XG47mu7SXAwhLP3L5CrlvCB91ANUkIz40P27kUcvNfSdvyZJ9CLHiVoKSp+TTChMQMSKQEH/IQxA==", - "hasInstallScript": true - }, - "node_modules/core-js-compat": { - "version": "3.10.1", - "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.10.1.tgz", - "integrity": "sha512-ZHQTdTPkqvw2CeHiZC970NNJcnwzT6YIueDMASKt+p3WbZsLXOcoD392SkcWhkC0wBBHhlfhqGKKsNCQUozYtg==", - "dependencies": { - "browserslist": "^4.16.3", - "semver": "7.0.0" - } - }, - "node_modules/core-js-compat/node_modules/semver": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.0.0.tgz", - "integrity": "sha512-+GB6zVA9LWh6zovYQLALHwv5rb2PHGlJi3lfiqIHxR0uuwCgefcOJc59v9fv1w8GbStwxuuqqAjI9NMAOOgq1A==", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/core-util-is": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", - "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" - }, - "node_modules/cosmiconfig": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-5.2.1.tgz", - "integrity": "sha512-H65gsXo1SKjf8zmrJ67eJk8aIRKV5ff2D4uKZIBZShbhGSpEmsQOPW/SKMKYhSTrqR7ufy6RP69rPogdaPh/kA==", - "dependencies": { - "import-fresh": "^2.0.0", - "is-directory": "^0.3.1", - "js-yaml": "^3.13.1", - "parse-json": "^4.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/create-ecdh": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.4.tgz", - "integrity": "sha512-mf+TCx8wWc9VpuxfP2ht0iSISLZnt0JgWlrOKZiNqyUZWnjIaCIVNQArMHnCZKfEYRg6IM7A+NeJoN8gf/Ws0A==", - "dependencies": { - "bn.js": "^4.1.0", - "elliptic": "^6.5.3" - } - }, - "node_modules/create-ecdh/node_modules/bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - }, - "node_modules/create-hash": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz", - "integrity": "sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==", - "dependencies": { - "cipher-base": "^1.0.1", - "inherits": "^2.0.1", - "md5.js": "^1.3.4", - "ripemd160": "^2.0.1", - "sha.js": "^2.4.0" - } - }, - "node_modules/create-hmac": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz", - "integrity": "sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==", - "dependencies": { - "cipher-base": "^1.0.3", - "create-hash": "^1.1.0", - "inherits": "^2.0.1", - "ripemd160": "^2.0.0", - "safe-buffer": "^5.0.1", - "sha.js": "^2.4.8" - } - }, - "node_modules/cross-spawn": { - "version": "6.0.5", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", - "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", - "dependencies": { - "nice-try": "^1.0.4", - "path-key": "^2.0.1", - "semver": "^5.5.0", - "shebang-command": "^1.2.0", - "which": "^1.2.9" - }, - "engines": { - "node": ">=4.8" - } - }, - "node_modules/cross-spawn/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/crypto-browserify": { - "version": "3.12.0", - "resolved": "https://registry.npmjs.org/crypto-browserify/-/crypto-browserify-3.12.0.tgz", - "integrity": "sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==", - "dependencies": { - "browserify-cipher": "^1.0.0", - "browserify-sign": "^4.0.0", - "create-ecdh": "^4.0.0", - "create-hash": "^1.1.0", - "create-hmac": "^1.1.0", - "diffie-hellman": "^5.0.0", - "inherits": "^2.0.1", - "pbkdf2": "^3.0.3", - "public-encrypt": "^4.0.0", - "randombytes": "^2.0.0", - "randomfill": "^1.0.3" - }, - "engines": { - "node": "*" - } - }, - "node_modules/crypto-random-string": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz", - "integrity": "sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==", - "engines": { - "node": ">=8" - } - }, - "node_modules/css": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/css/-/css-2.2.4.tgz", - "integrity": "sha512-oUnjmWpy0niI3x/mPL8dVEI1l7MnG3+HHyRPHf+YFSbK+svOhXpmSOcDURUh2aOCgl2grzrOPt1nHLuCVFULLw==", - "dependencies": { - "inherits": "^2.0.3", - "source-map": "^0.6.1", - "source-map-resolve": "^0.5.2", - "urix": "^0.1.0" - } - }, - "node_modules/css-color-names": { - "version": "0.0.4", - "resolved": "https://registry.npmjs.org/css-color-names/-/css-color-names-0.0.4.tgz", - "integrity": "sha1-gIrcLnnPhHOAabZGyyDsJ762KeA=", - "engines": { - "node": "*" - } - }, - "node_modules/css-declaration-sorter": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-4.0.1.tgz", - "integrity": "sha512-BcxQSKTSEEQUftYpBVnsH4SF05NTuBokb19/sBt6asXGKZ/6VP7PLG1CBCkFDYOnhXhPh0jMhO6xZ71oYHXHBA==", - "dependencies": { - "postcss": "^7.0.1", - "timsort": "^0.3.0" - }, - "engines": { - "node": ">4" - } - }, - "node_modules/css-loader": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-2.1.1.tgz", - "integrity": "sha512-OcKJU/lt232vl1P9EEDamhoO9iKY3tIjY5GU+XDLblAykTdgs6Ux9P1hTHve8nFKy5KPpOXOsVI/hIwi3841+w==", - "dependencies": { - "camelcase": "^5.2.0", - "icss-utils": "^4.1.0", - "loader-utils": "^1.2.3", - "normalize-path": "^3.0.0", - "postcss": "^7.0.14", - "postcss-modules-extract-imports": "^2.0.0", - "postcss-modules-local-by-default": "^2.0.6", - "postcss-modules-scope": "^2.1.0", - "postcss-modules-values": "^2.0.0", - "postcss-value-parser": "^3.3.0", - "schema-utils": "^1.0.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/css-loader/node_modules/camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/css-loader/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/css-loader/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/css-parse": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/css-parse/-/css-parse-2.0.0.tgz", - "integrity": "sha1-pGjuZnwW2BzPBcWMONKpfHgNv9Q=", - "dependencies": { - "css": "^2.0.0" - } - }, - "node_modules/css-select": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.0.0.tgz", - "integrity": "sha512-I7favumBlDP/nuHBKLfL5RqvlvRdn/W29evvWJ+TaoGPm7QD+xSIN5eY2dyGjtkUmemh02TZrqJb4B8DWo6PoQ==", - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^5.0.0", - "domhandler": "^4.1.0", - "domutils": "^2.5.1", - "nth-check": "^2.0.0" - } - }, - "node_modules/css-select-base-adapter": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/css-select-base-adapter/-/css-select-base-adapter-0.1.1.tgz", - "integrity": "sha512-jQVeeRG70QI08vSTwf1jHxp74JoZsr2XSgETae8/xC8ovSnL2WF87GTLO86Sbwdt2lK4Umg4HnnwMO4YF3Ce7w==" - }, - "node_modules/css-tree": { - "version": "1.0.0-alpha.37", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha.37.tgz", - "integrity": "sha512-DMxWJg0rnz7UgxKT0Q1HU/L9BeJI0M6ksor0OgqOnF+aRCDWg/N2641HmVyU9KVIu0OVVWOb2IpC9A+BJRnejg==", - "dependencies": { - "mdn-data": "2.0.4", - "source-map": "^0.6.1" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/css-tree/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/css-what": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-5.0.0.tgz", - "integrity": "sha512-qxyKHQvgKwzwDWC/rGbT821eJalfupxYW2qbSJSAtdSTimsr/MlaGONoNLllaUPZWf8QnbcKM/kPVYUQuEKAFA==", - "engines": { - "node": ">= 6" - } - }, - "node_modules/css/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/cssesc": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", - "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", - "bin": { - "cssesc": "bin/cssesc" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/cssnano": { - "version": "4.1.11", - "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-4.1.11.tgz", - "integrity": "sha512-6gZm2htn7xIPJOHY824ERgj8cNPgPxyCSnkXc4v7YvNW+TdVfzgngHcEhy/8D11kUWRUMbke+tC+AUcUsnMz2g==", - "dependencies": { - "cosmiconfig": "^5.0.0", - "cssnano-preset-default": "^4.0.8", - "is-resolvable": "^1.0.0", - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/cssnano-preset-default": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-4.0.8.tgz", - "integrity": "sha512-LdAyHuq+VRyeVREFmuxUZR1TXjQm8QQU/ktoo/x7bz+SdOge1YKc5eMN6pRW7YWBmyq59CqYba1dJ5cUukEjLQ==", - "dependencies": { - "css-declaration-sorter": "^4.0.1", - "cssnano-util-raw-cache": "^4.0.1", - "postcss": "^7.0.0", - "postcss-calc": "^7.0.1", - "postcss-colormin": "^4.0.3", - "postcss-convert-values": "^4.0.1", - "postcss-discard-comments": "^4.0.2", - "postcss-discard-duplicates": "^4.0.2", - "postcss-discard-empty": "^4.0.1", - "postcss-discard-overridden": "^4.0.1", - "postcss-merge-longhand": "^4.0.11", - "postcss-merge-rules": "^4.0.3", - "postcss-minify-font-values": "^4.0.2", - "postcss-minify-gradients": "^4.0.2", - "postcss-minify-params": "^4.0.2", - "postcss-minify-selectors": "^4.0.2", - "postcss-normalize-charset": "^4.0.1", - "postcss-normalize-display-values": "^4.0.2", - "postcss-normalize-positions": "^4.0.2", - "postcss-normalize-repeat-style": "^4.0.2", - "postcss-normalize-string": "^4.0.2", - "postcss-normalize-timing-functions": "^4.0.2", - "postcss-normalize-unicode": "^4.0.1", - "postcss-normalize-url": "^4.0.1", - "postcss-normalize-whitespace": "^4.0.2", - "postcss-ordered-values": "^4.1.2", - "postcss-reduce-initial": "^4.0.3", - "postcss-reduce-transforms": "^4.0.2", - "postcss-svgo": "^4.0.3", - "postcss-unique-selectors": "^4.0.1" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/cssnano-util-get-arguments": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/cssnano-util-get-arguments/-/cssnano-util-get-arguments-4.0.0.tgz", - "integrity": "sha1-7ToIKZ8h11dBsg87gfGU7UnMFQ8=", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/cssnano-util-get-match": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/cssnano-util-get-match/-/cssnano-util-get-match-4.0.0.tgz", - "integrity": "sha1-wOTKB/U4a7F+xeUiULT1lhNlFW0=", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/cssnano-util-raw-cache": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/cssnano-util-raw-cache/-/cssnano-util-raw-cache-4.0.1.tgz", - "integrity": "sha512-qLuYtWK2b2Dy55I8ZX3ky1Z16WYsx544Q0UWViebptpwn/xDBmog2TLg4f+DBMg1rJ6JDWtn96WHbOKDWt1WQA==", - "dependencies": { - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/cssnano-util-same-parent": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/cssnano-util-same-parent/-/cssnano-util-same-parent-4.0.1.tgz", - "integrity": "sha512-WcKx5OY+KoSIAxBW6UBBRay1U6vkYheCdjyVNDm85zt5K9mHoGOfsOsqIszfAqrQQFIIKgjh2+FDgIj/zsl21Q==", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/csso": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/csso/-/csso-4.2.0.tgz", - "integrity": "sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA==", - "dependencies": { - "css-tree": "^1.1.2" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/csso/node_modules/css-tree": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.1.3.tgz", - "integrity": "sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q==", - "dependencies": { - "mdn-data": "2.0.14", - "source-map": "^0.6.1" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/csso/node_modules/mdn-data": { - "version": "2.0.14", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.14.tgz", - "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==" - }, - "node_modules/csso/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/cyclist": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/cyclist/-/cyclist-1.0.1.tgz", - "integrity": "sha1-WW6WmP0MgOEgOMK4LW6xs1tiJNk=" - }, - "node_modules/dashdash": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", - "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", - "dependencies": { - "assert-plus": "^1.0.0" - }, - "engines": { - "node": ">=0.10" - } - }, - "node_modules/de-indent": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/de-indent/-/de-indent-1.0.2.tgz", - "integrity": "sha1-sgOOhG3DO6pXlhKNCAS0VbjB4h0=" - }, - "node_modules/debug": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", - "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/decamelize": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", - "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/decode-uri-component": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz", - "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU=", - "engines": { - "node": ">=0.10" - } - }, - "node_modules/decompress-response": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz", - "integrity": "sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M=", - "dependencies": { - "mimic-response": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/deep-equal": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/deep-equal/-/deep-equal-1.1.1.tgz", - "integrity": "sha512-yd9c5AdiqVcR+JjcwUQb9DkhJc8ngNr0MahEBGvDiJw8puWab2yZlh+nkasOnZP+EGTAP6rRp2JzJhJZzvNF8g==", - "dependencies": { - "is-arguments": "^1.0.4", - "is-date-object": "^1.0.1", - "is-regex": "^1.0.4", - "object-is": "^1.0.1", - "object-keys": "^1.1.1", - "regexp.prototype.flags": "^1.2.0" - } - }, - "node_modules/deep-extend": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", - "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/deepmerge": { - "version": "1.5.2", - "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-1.5.2.tgz", - "integrity": "sha512-95k0GDqvBjZavkuvzx/YqVLv/6YYa17fz6ILMSf7neqQITCPbnfEnQvEgMPNjH4kgobe7+WIL0yJEHku+H3qtQ==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/default-gateway": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-4.2.0.tgz", - "integrity": "sha512-h6sMrVB1VMWVrW13mSc6ia/DwYYw5MN6+exNu1OaJeFac5aSAvwM7lZ0NVfTABuSkQelr4h5oebg3KB1XPdjgA==", - "dependencies": { - "execa": "^1.0.0", - "ip-regex": "^2.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/defer-to-connect": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-1.1.3.tgz", - "integrity": "sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ==" - }, - "node_modules/define-properties": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", - "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==", - "dependencies": { - "object-keys": "^1.0.12" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/define-property": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz", - "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==", - "dependencies": { - "is-descriptor": "^1.0.2", - "isobject": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/define-property/node_modules/is-accessor-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/define-property/node_modules/is-data-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/define-property/node_modules/is-descriptor": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", - "dependencies": { - "is-accessor-descriptor": "^1.0.0", - "is-data-descriptor": "^1.0.0", - "kind-of": "^6.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/del": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/del/-/del-4.1.1.tgz", - "integrity": "sha512-QwGuEUouP2kVwQenAsOof5Fv8K9t3D8Ca8NxcXKrIpEHjTXK5J2nXLdP+ALI1cgv8wj7KuwBhTwBkOZSJKM5XQ==", - "dependencies": { - "@types/glob": "^7.1.1", - "globby": "^6.1.0", - "is-path-cwd": "^2.0.0", - "is-path-in-cwd": "^2.0.0", - "p-map": "^2.0.0", - "pify": "^4.0.1", - "rimraf": "^2.6.3" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/del/node_modules/globby": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-6.1.0.tgz", - "integrity": "sha1-9abXDoOV4hyFj7BInWTfAkJNUGw=", - "dependencies": { - "array-union": "^1.0.1", - "glob": "^7.0.3", - "object-assign": "^4.0.1", - "pify": "^2.0.0", - "pinkie-promise": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/del/node_modules/globby/node_modules/pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=", - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/delegate": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/delegate/-/delegate-3.2.0.tgz", - "integrity": "sha512-IofjkYBZaZivn0V8nnsMJGBr4jVLxHDheKSW88PyxS5QC4Vo9ZbZVvhzlSxY87fVq3STR6r+4cGepyHkcWOQSw==", - "optional": true - }, - "node_modules/depd": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", - "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/des.js": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/des.js/-/des.js-1.0.1.tgz", - "integrity": "sha512-Q0I4pfFrv2VPd34/vfLrFOoRmlYj3OV50i7fskps1jZWK1kApMWWT9G6RRUeYedLcBDIhnSDaUvJMb3AhUlaEA==", - "dependencies": { - "inherits": "^2.0.1", - "minimalistic-assert": "^1.0.0" - } - }, - "node_modules/destroy": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz", - "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA=" - }, - "node_modules/detect-node": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.0.5.tgz", - "integrity": "sha512-qi86tE6hRcFHy8jI1m2VG+LaPUR1LhqDa5G8tVjuUXmOrpuAgqsA1pN0+ldgr3aKUH+QLI9hCY/OcRYisERejw==" - }, - "node_modules/diffie-hellman": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/diffie-hellman/-/diffie-hellman-5.0.3.tgz", - "integrity": "sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg==", - "dependencies": { - "bn.js": "^4.1.0", - "miller-rabin": "^4.0.0", - "randombytes": "^2.0.0" - } - }, - "node_modules/diffie-hellman/node_modules/bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - }, - "node_modules/dir-glob": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-2.2.2.tgz", - "integrity": "sha512-f9LBi5QWzIW3I6e//uxZoLBlUt9kcp66qo0sSCxL6YZKc75R1c4MFCoe/LaZiBGmgujvQdxc5Bn3QhfyvK5Hsw==", - "dependencies": { - "path-type": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/dns-equal": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz", - "integrity": "sha1-s55/HabrCnW6nBcySzR1PEfgZU0=" - }, - "node_modules/dns-packet": { - "version": "1.3.4", - "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-1.3.4.tgz", - "integrity": "sha512-BQ6F4vycLXBvdrJZ6S3gZewt6rcrks9KBgM9vrhW+knGRqc8uEdT7fuCwloc7nny5xNoMJ17HGH0R/6fpo8ECA==", - "dependencies": { - "ip": "^1.1.0", - "safe-buffer": "^5.0.1" - } - }, - "node_modules/dns-txt": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/dns-txt/-/dns-txt-2.0.2.tgz", - "integrity": "sha1-uR2Ab10nGI5Ks+fRB9iBocxGQrY=", - "dependencies": { - "buffer-indexof": "^1.0.0" - } - }, - "node_modules/docsearch.js": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/docsearch.js/-/docsearch.js-2.6.3.tgz", - "integrity": "sha512-GN+MBozuyz664ycpZY0ecdQE0ND/LSgJKhTLA0/v3arIS3S1Rpf2OJz6A35ReMsm91V5apcmzr5/kM84cvUg+A==", - "dependencies": { - "algoliasearch": "^3.24.5", - "autocomplete.js": "0.36.0", - "hogan.js": "^3.0.2", - "request": "^2.87.0", - "stack-utils": "^1.0.1", - "to-factory": "^1.0.0", - "zepto": "^1.2.0" - } - }, - "node_modules/docsearch.js/node_modules/algoliasearch": { - "version": "3.35.1", - "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-3.35.1.tgz", - "integrity": "sha512-K4yKVhaHkXfJ/xcUnil04xiSrB8B8yHZoFEhWNpXg23eiCnqvTZw1tn/SqvdsANlYHLJlKl0qi3I/Q2Sqo7LwQ==", - "dependencies": { - "agentkeepalive": "^2.2.0", - "debug": "^2.6.9", - "envify": "^4.0.0", - "es6-promise": "^4.1.0", - "events": "^1.1.0", - "foreach": "^2.0.5", - "global": "^4.3.2", - "inherits": "^2.0.1", - "isarray": "^2.0.1", - "load-script": "^1.0.0", - "object-keys": "^1.0.11", - "querystring-es3": "^0.2.1", - "reduce": "^1.0.1", - "semver": "^5.1.0", - "tunnel-agent": "^0.6.0" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/docsearch.js/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/docsearch.js/node_modules/events": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz", - "integrity": "sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ=", - "engines": { - "node": ">=0.4.x" - } - }, - "node_modules/docsearch.js/node_modules/isarray": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", - "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==" - }, - "node_modules/docsearch.js/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/doctypes": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/doctypes/-/doctypes-1.1.0.tgz", - "integrity": "sha1-6oCxBqh1OHdOijpKWv4pPeSJ4Kk=" - }, - "node_modules/dom-converter": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz", - "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==", - "dependencies": { - "utila": "~0.4" - } - }, - "node_modules/dom-serializer": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.3.1.tgz", - "integrity": "sha512-Pv2ZluG5ife96udGgEDovOOOA5UELkltfJpnIExPrAk1LTvecolUGn6lIaoLh86d83GiB86CjzciMd9BuRB71Q==", - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.0.0", - "entities": "^2.0.0" - } - }, - "node_modules/dom-walk": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/dom-walk/-/dom-walk-0.1.2.tgz", - "integrity": "sha512-6QvTW9mrGeIegrFXdtQi9pk7O/nSK6lSdXW2eqUspN5LWD7UTji2Fqw5V2YLjBpHEoU9Xl/eUWNpDeZvoyOv2w==" - }, - "node_modules/domain-browser": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/domain-browser/-/domain-browser-1.2.0.tgz", - "integrity": "sha512-jnjyiM6eRyZl2H+W8Q/zLMA481hzi0eszAaBUzIVnmYVDBbnLxVNnfu1HgEBvCbL+71FrxMl3E6lpKH7Ge3OXA==", - "engines": { - "node": ">=0.4", - "npm": ">=1.2" - } - }, - "node_modules/domelementtype": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.2.0.tgz", - "integrity": "sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A==" - }, - "node_modules/domhandler": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.1.0.tgz", - "integrity": "sha512-/6/kmsGlMY4Tup/nGVutdrK9yQi4YjWVcVeoQmixpzjOUK1U7pQkvAPHBJeUxOgxF0J8f8lwCJSlCfD0V4CMGQ==", - "dependencies": { - "domelementtype": "^2.2.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/domutils": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.5.2.tgz", - "integrity": "sha512-MHTthCb1zj8f1GVfRpeZUbohQf/HdBos0oX5gZcQFepOZPLLRyj6Wn7XS7EMnY7CVpwv8863u2vyE83Hfu28HQ==", - "dependencies": { - "dom-serializer": "^1.0.1", - "domelementtype": "^2.2.0", - "domhandler": "^4.1.0" - } - }, - "node_modules/dot-prop": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.3.0.tgz", - "integrity": "sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==", - "dependencies": { - "is-obj": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/duplexer3": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.4.tgz", - "integrity": "sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI=" - }, - "node_modules/duplexify": { - "version": "3.7.1", - "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.7.1.tgz", - "integrity": "sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==", - "dependencies": { - "end-of-stream": "^1.0.0", - "inherits": "^2.0.1", - "readable-stream": "^2.0.0", - "stream-shift": "^1.0.0" - } - }, - "node_modules/ecc-jsbn": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", - "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=", - "dependencies": { - "jsbn": "~0.1.0", - "safer-buffer": "^2.1.0" - } - }, - "node_modules/ee-first": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", - "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=" - }, - "node_modules/elliptic": { - "version": "6.5.4", - "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.4.tgz", - "integrity": "sha512-iLhC6ULemrljPZb+QutR5TQGB+pdW6KGD5RSegS+8sorOZT+rdQFbsQFJgvN3eRqNALqJer4oQ16YvJHlU8hzQ==", - "dependencies": { - "bn.js": "^4.11.9", - "brorand": "^1.1.0", - "hash.js": "^1.0.0", - "hmac-drbg": "^1.0.1", - "inherits": "^2.0.4", - "minimalistic-assert": "^1.0.1", - "minimalistic-crypto-utils": "^1.0.1" - } - }, - "node_modules/elliptic/node_modules/bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - }, - "node_modules/emoji-regex": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", - "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==" - }, - "node_modules/emojis-list": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", - "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==", - "engines": { - "node": ">= 4" - } - }, - "node_modules/encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/end-of-stream": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", - "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", - "dependencies": { - "once": "^1.4.0" - } - }, - "node_modules/enhanced-resolve": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-4.5.0.tgz", - "integrity": "sha512-Nv9m36S/vxpsI+Hc4/ZGRs0n9mXqSWGGq49zxb/cJfPAQMbUtttJAlNPS4AQzaBdw/pKskw5bMbekT/Y7W/Wlg==", - "dependencies": { - "graceful-fs": "^4.1.2", - "memory-fs": "^0.5.0", - "tapable": "^1.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/enhanced-resolve/node_modules/memory-fs": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/memory-fs/-/memory-fs-0.5.0.tgz", - "integrity": "sha512-jA0rdU5KoQMC0e6ppoNRtpp6vjFq6+NY7r8hywnC7V+1Xj/MtHwGIbB1QaK/dunyjWteJzmkpd7ooeWg10T7GA==", - "dependencies": { - "errno": "^0.1.3", - "readable-stream": "^2.0.1" - }, - "engines": { - "node": ">=4.3.0 <5.0.0 || >=5.10" - } - }, - "node_modules/entities": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-2.1.0.tgz", - "integrity": "sha512-hCx1oky9PFrJ611mf0ifBLBRW8lUUVRlFolb5gWRfIELabBlbp9xZvrqZLZAs+NxFnbfQoeGd8wDkygjg7U85w==" - }, - "node_modules/envify": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/envify/-/envify-4.1.0.tgz", - "integrity": "sha512-IKRVVoAYr4pIx4yIWNsz9mOsboxlNXiu7TNBnem/K/uTHdkyzXWDzHCK7UTolqBbgaBz0tQHsD3YNls0uIIjiw==", - "dependencies": { - "esprima": "^4.0.0", - "through": "~2.3.4" - }, - "bin": { - "envify": "bin/envify" - } - }, - "node_modules/envinfo": { - "version": "7.8.1", - "resolved": "https://registry.npmjs.org/envinfo/-/envinfo-7.8.1.tgz", - "integrity": "sha512-/o+BXHmB7ocbHEAs6F2EnG0ogybVVUdkRunTT2glZU9XAaGmhqskrvKwqXuDfNjEO0LZKWdejEEpnq8aM0tOaw==", - "bin": { - "envinfo": "dist/cli.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/errno": { - "version": "0.1.8", - "resolved": "https://registry.npmjs.org/errno/-/errno-0.1.8.tgz", - "integrity": "sha512-dJ6oBr5SQ1VSd9qkk7ByRgb/1SH4JZjCHSW/mr63/QcXO9zLVxvJ6Oy13nio03rxpSnVDDjFor75SjVeZWPW/A==", - "dependencies": { - "prr": "~1.0.1" - }, - "bin": { - "errno": "cli.js" - } - }, - "node_modules/error-ex": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", - "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", - "dependencies": { - "is-arrayish": "^0.2.1" - } - }, - "node_modules/es-abstract": { - "version": "1.18.0", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.18.0.tgz", - "integrity": "sha512-LJzK7MrQa8TS0ja2w3YNLzUgJCGPdPOV1yVvezjNnS89D+VR08+Szt2mz3YB2Dck/+w5tfIq/RoUAFqJJGM2yw==", - "dependencies": { - "call-bind": "^1.0.2", - "es-to-primitive": "^1.2.1", - "function-bind": "^1.1.1", - "get-intrinsic": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.2", - "is-callable": "^1.2.3", - "is-negative-zero": "^2.0.1", - "is-regex": "^1.1.2", - "is-string": "^1.0.5", - "object-inspect": "^1.9.0", - "object-keys": "^1.1.1", - "object.assign": "^4.1.2", - "string.prototype.trimend": "^1.0.4", - "string.prototype.trimstart": "^1.0.4", - "unbox-primitive": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-to-primitive": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz", - "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==", - "dependencies": { - "is-callable": "^1.1.4", - "is-date-object": "^1.0.1", - "is-symbol": "^1.0.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es6-promise": { - "version": "4.2.8", - "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz", - "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==" - }, - "node_modules/escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", - "engines": { - "node": ">=6" - } - }, - "node_modules/escape-goat": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-2.1.1.tgz", - "integrity": "sha512-8/uIhbG12Csjy2JEW7D9pHbreaVaS/OpN3ycnyvElTdwM5n6GY6W6e2IPemfvGZeUMqZ9A/3GqIZMgKnBhAw/Q==", - "engines": { - "node": ">=8" - } - }, - "node_modules/escape-html": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", - "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=" - }, - "node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/eslint-scope": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-4.0.3.tgz", - "integrity": "sha512-p7VutNr1O/QrxysMo3E45FjYDTeXBy0iTltPFNSqKAIfjDSXC+4dj+qfyuD8bfAXrW/y6lW3O76VaYNPKfpKrg==", - "dependencies": { - "esrecurse": "^4.1.0", - "estraverse": "^4.1.1" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/esm": { - "version": "3.2.25", - "resolved": "https://registry.npmjs.org/esm/-/esm-3.2.25.tgz", - "integrity": "sha512-U1suiZ2oDVWv4zPO56S0NcR5QriEahGtdN2OR6FiOG4WJvcjBVFB0qI4+eKoWFH483PKGuLuu6V8Z4T5g63UVA==", - "engines": { - "node": ">=6" - } - }, - "node_modules/esprima": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", - "bin": { - "esparse": "bin/esparse.js", - "esvalidate": "bin/esvalidate.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/esrecurse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", - "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", - "dependencies": { - "estraverse": "^5.2.0" - }, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/esrecurse/node_modules/estraverse": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz", - "integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==", - "engines": { - "node": ">=4.0" - } - }, - "node_modules/estraverse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", - "engines": { - "node": ">=4.0" - } - }, - "node_modules/esutils": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/etag": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", - "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/eventemitter3": { - "version": "4.0.7", - "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", - "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" - }, - "node_modules/events": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", - "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", - "engines": { - "node": ">=0.8.x" - } - }, - "node_modules/eventsource": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/eventsource/-/eventsource-1.1.0.tgz", - "integrity": "sha512-VSJjT5oCNrFvCS6igjzPAt5hBzQ2qPBFIbJ03zLI9SE0mxwZpMw6BfJrbFHm1a141AavMEB8JHmBhWAd66PfCg==", - "dependencies": { - "original": "^1.0.0" - }, - "engines": { - "node": ">=0.12.0" - } - }, - "node_modules/evp_bytestokey": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz", - "integrity": "sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==", - "dependencies": { - "md5.js": "^1.3.4", - "safe-buffer": "^5.1.1" - } - }, - "node_modules/execa": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz", - "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==", - "dependencies": { - "cross-spawn": "^6.0.0", - "get-stream": "^4.0.0", - "is-stream": "^1.1.0", - "npm-run-path": "^2.0.0", - "p-finally": "^1.0.0", - "signal-exit": "^3.0.0", - "strip-eof": "^1.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/expand-brackets": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz", - "integrity": "sha1-t3c14xXOMPa27/D4OwQVGiJEliI=", - "dependencies": { - "debug": "^2.3.3", - "define-property": "^0.2.5", - "extend-shallow": "^2.0.1", - "posix-character-classes": "^0.1.0", - "regex-not": "^1.0.0", - "snapdragon": "^0.8.1", - "to-regex": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/expand-brackets/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/expand-brackets/node_modules/define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dependencies": { - "is-descriptor": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/express": { - "version": "4.17.1", - "resolved": "https://registry.npmjs.org/express/-/express-4.17.1.tgz", - "integrity": "sha512-mHJ9O79RqluphRrcw2X/GTh3k9tVv8YcoyY4Kkh4WDMUYKRZUq0h1o0w2rrrxBqM7VoeUVqgb27xlEMXTnYt4g==", - "dependencies": { - "accepts": "~1.3.7", - "array-flatten": "1.1.1", - "body-parser": "1.19.0", - "content-disposition": "0.5.3", - "content-type": "~1.0.4", - "cookie": "0.4.0", - "cookie-signature": "1.0.6", - "debug": "2.6.9", - "depd": "~1.1.2", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "finalhandler": "~1.1.2", - "fresh": "0.5.2", - "merge-descriptors": "1.0.1", - "methods": "~1.1.2", - "on-finished": "~2.3.0", - "parseurl": "~1.3.3", - "path-to-regexp": "0.1.7", - "proxy-addr": "~2.0.5", - "qs": "6.7.0", - "range-parser": "~1.2.1", - "safe-buffer": "5.1.2", - "send": "0.17.1", - "serve-static": "1.14.1", - "setprototypeof": "1.1.1", - "statuses": "~1.5.0", - "type-is": "~1.6.18", - "utils-merge": "1.0.1", - "vary": "~1.1.2" - }, - "engines": { - "node": ">= 0.10.0" - } - }, - "node_modules/express/node_modules/array-flatten": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", - "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=" - }, - "node_modules/express/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" - }, - "node_modules/extend-shallow": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", - "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dependencies": { - "is-extendable": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/extglob": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz", - "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==", - "dependencies": { - "array-unique": "^0.3.2", - "define-property": "^1.0.0", - "expand-brackets": "^2.1.4", - "extend-shallow": "^2.0.1", - "fragment-cache": "^0.2.1", - "regex-not": "^1.0.0", - "snapdragon": "^0.8.1", - "to-regex": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/extglob/node_modules/define-property": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", - "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", - "dependencies": { - "is-descriptor": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/extglob/node_modules/is-accessor-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/extglob/node_modules/is-data-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/extglob/node_modules/is-descriptor": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", - "dependencies": { - "is-accessor-descriptor": "^1.0.0", - "is-data-descriptor": "^1.0.0", - "kind-of": "^6.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/extsprintf": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", - "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=", - "engines": [ - "node >=0.6.0" - ] - }, - "node_modules/fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" - }, - "node_modules/fast-glob": { - "version": "2.2.7", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-2.2.7.tgz", - "integrity": "sha512-g1KuQwHOZAmOZMuBtHdxDtju+T2RT8jgCC9aANsbpdiDDTSnjgfuVsIBNKbUeJI3oKMRExcfNDtJl4OhbffMsw==", - "dependencies": { - "@mrmlnc/readdir-enhanced": "^2.2.1", - "@nodelib/fs.stat": "^1.1.2", - "glob-parent": "^3.1.0", - "is-glob": "^4.0.0", - "merge2": "^1.2.3", - "micromatch": "^3.1.10" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" - }, - "node_modules/faye-websocket": { - "version": "0.11.3", - "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.3.tgz", - "integrity": "sha512-D2y4bovYpzziGgbHYtGCMjlJM36vAl/y+xUyn1C+FVx8szd1E+86KwVw6XvYSzOP8iMpm1X0I4xJD+QtUb36OA==", - "dependencies": { - "websocket-driver": ">=0.5.1" - }, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/figgy-pudding": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/figgy-pudding/-/figgy-pudding-3.5.2.tgz", - "integrity": "sha512-0btnI/H8f2pavGMN8w40mlSKOfTK2SVJmBfBeVIj3kNw0swwgzyRq0d5TJVOwodFmtvpPeWPN/MCcfuWF0Ezbw==" - }, - "node_modules/figures": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", - "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", - "dependencies": { - "escape-string-regexp": "^1.0.5" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/file-loader": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-3.0.1.tgz", - "integrity": "sha512-4sNIOXgtH/9WZq4NvlfU3Opn5ynUsqBwSLyM+I7UOwdGigTBYfVVQEwe/msZNX/j4pCJTIM14Fsw66Svo1oVrw==", - "dependencies": { - "loader-utils": "^1.0.2", - "schema-utils": "^1.0.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/file-loader/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/file-uri-to-path": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", - "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==", - "optional": true - }, - "node_modules/fill-range": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz", - "integrity": "sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=", - "dependencies": { - "extend-shallow": "^2.0.1", - "is-number": "^3.0.0", - "repeat-string": "^1.6.1", - "to-regex-range": "^2.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/finalhandler": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.2.tgz", - "integrity": "sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA==", - "dependencies": { - "debug": "2.6.9", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "on-finished": "~2.3.0", - "parseurl": "~1.3.3", - "statuses": "~1.5.0", - "unpipe": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/finalhandler/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/find-cache-dir": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.1.tgz", - "integrity": "sha512-t2GDMt3oGC/v+BMwzmllWDuJF/xcDtE5j/fCGbqDD7OLuJkj0cfh1YSA5VKPvwMeLFLNDBkwOKZ2X85jGLVftQ==", - "dependencies": { - "commondir": "^1.0.1", - "make-dir": "^3.0.2", - "pkg-dir": "^4.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/find-up": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", - "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", - "dependencies": { - "locate-path": "^5.0.0", - "path-exists": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/flush-write-stream": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/flush-write-stream/-/flush-write-stream-1.1.1.tgz", - "integrity": "sha512-3Z4XhFZ3992uIq0XOqb9AreonueSYphE6oYbpt5+3u06JWklbsPkNv3ZKkP9Bz/r+1MWCaMoSQ28P85+1Yc77w==", - "dependencies": { - "inherits": "^2.0.3", - "readable-stream": "^2.3.6" - } - }, - "node_modules/follow-redirects": { - "version": "1.5.10", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.5.10.tgz", - "integrity": "sha512-0V5l4Cizzvqt5D44aTXbFZz+FtyXV1vrDN6qrelxtfYQKW0KO0W2T/hkE8xvGa/540LkZlkaUjO4ailYTFtHVQ==", - "dependencies": { - "debug": "=3.1.0" - }, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/for-in": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz", - "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/foreach": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/foreach/-/foreach-2.0.5.tgz", - "integrity": "sha1-C+4AUBiusmDQo6865ljdATbsG5k=" - }, - "node_modules/forever-agent": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", - "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=", - "engines": { - "node": "*" - } - }, - "node_modules/form-data": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", - "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.6", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 0.12" - } - }, - "node_modules/forwarded": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz", - "integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/fragment-cache": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz", - "integrity": "sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk=", - "dependencies": { - "map-cache": "^0.2.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/fresh": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", - "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/from2": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz", - "integrity": "sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8=", - "dependencies": { - "inherits": "^2.0.1", - "readable-stream": "^2.0.0" - } - }, - "node_modules/fs-extra": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.1.tgz", - "integrity": "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==", - "dependencies": { - "graceful-fs": "^4.1.2", - "jsonfile": "^4.0.0", - "universalify": "^0.1.0" - }, - "engines": { - "node": ">=6 <7 || >=8" - } - }, - "node_modules/fs-write-stream-atomic": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz", - "integrity": "sha1-tH31NJPvkR33VzHnCp3tAYnbQMk=", - "dependencies": { - "graceful-fs": "^4.1.2", - "iferr": "^0.1.5", - "imurmurhash": "^0.1.4", - "readable-stream": "1 || 2" - } - }, - "node_modules/fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" - }, - "node_modules/fsevents": { - "version": "1.2.13", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.13.tgz", - "integrity": "sha512-oWb1Z6mkHIskLzEJ/XWX0srkpkTQ7vaopMQkyaEIoq0fmtFVxOthb8cCxeT+p3ynTdkk/RZwbgG4brR5BeWECw==", - "hasInstallScript": true, - "optional": true, - "os": [ - "darwin" - ], - "dependencies": { - "bindings": "^1.5.0", - "nan": "^2.12.1" - }, - "engines": { - "node": ">= 4.0" - } - }, - "node_modules/function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" - }, - "node_modules/fuse.js": { - "version": "3.6.1", - "resolved": "https://registry.npmjs.org/fuse.js/-/fuse.js-3.6.1.tgz", - "integrity": "sha512-hT9yh/tiinkmirKrlv4KWOjztdoZo1mx9Qh4KvWqC7isoXwdUY3PNWUxceF4/qO9R6riA2C29jdTOeQOIROjgw==", - "engines": { - "node": ">=6" - } - }, - "node_modules/gensync": { - "version": "1.0.0-beta.2", - "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", - "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "engines": { - "node": "6.* || 8.* || >= 10.*" - } - }, - "node_modules/get-intrinsic": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.1.tgz", - "integrity": "sha512-kWZrnVM42QCiEA2Ig1bG8zjoIMOgxWwYCEeNdwY6Tv/cOSeGpcoX4pXHfKUxNKVoArnrEr2e9srnAxxGIraS9Q==", - "dependencies": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.1" - } - }, - "node_modules/get-stream": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", - "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", - "dependencies": { - "pump": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/get-value": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz", - "integrity": "sha1-3BXKHGcjh8p2vTesCjlbogQqLCg=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/getpass": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", - "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", - "dependencies": { - "assert-plus": "^1.0.0" - } - }, - "node_modules/glob": { - "version": "7.1.6", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", - "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - } - }, - "node_modules/glob-parent": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-3.1.0.tgz", - "integrity": "sha1-nmr2KZ2NO9K9QEMIMr0RPfkGxa4=", - "dependencies": { - "is-glob": "^3.1.0", - "path-dirname": "^1.0.0" - } - }, - "node_modules/glob-parent/node_modules/is-glob": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-3.1.0.tgz", - "integrity": "sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo=", - "dependencies": { - "is-extglob": "^2.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/glob-to-regexp": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz", - "integrity": "sha1-jFoUlNIGbFcMw7/kSWF1rMTVAqs=" - }, - "node_modules/global": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/global/-/global-4.4.0.tgz", - "integrity": "sha512-wv/LAoHdRE3BeTGz53FAamhGlPLhlssK45usmGFThIi4XqnBmjKQ16u+RNbP7WvigRZDxUsM0J3gcQ5yicaL0w==", - "dependencies": { - "min-document": "^2.19.0", - "process": "^0.11.10" - } - }, - "node_modules/global-dirs": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-2.1.0.tgz", - "integrity": "sha512-MG6kdOUh/xBnyo9cJFeIKkLEc1AyFq42QTU4XiX51i2NEdxLxLWXIjEjmqKeSuKR7pAZjTqUVoT2b2huxVLgYQ==", - "dependencies": { - "ini": "1.3.7" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/globals": { - "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", - "engines": { - "node": ">=4" - } - }, - "node_modules/globby": { - "version": "9.2.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-9.2.0.tgz", - "integrity": "sha512-ollPHROa5mcxDEkwg6bPt3QbEf4pDQSNtd6JPL1YvOvAo/7/0VAm9TccUeoTmarjPw4pfUthSCqcyfNB1I3ZSg==", - "dependencies": { - "@types/glob": "^7.1.1", - "array-union": "^1.0.2", - "dir-glob": "^2.2.2", - "fast-glob": "^2.2.6", - "glob": "^7.1.3", - "ignore": "^4.0.3", - "pify": "^4.0.1", - "slash": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/good-listener": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/good-listener/-/good-listener-1.2.2.tgz", - "integrity": "sha1-1TswzfkxPf+33JoNR3CWqm0UXFA=", - "optional": true, - "dependencies": { - "delegate": "^3.1.2" - } - }, - "node_modules/got": { - "version": "9.6.0", - "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz", - "integrity": "sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q==", - "dependencies": { - "@sindresorhus/is": "^0.14.0", - "@szmarczak/http-timer": "^1.1.2", - "cacheable-request": "^6.0.0", - "decompress-response": "^3.3.0", - "duplexer3": "^0.1.4", - "get-stream": "^4.1.0", - "lowercase-keys": "^1.0.1", - "mimic-response": "^1.0.1", - "p-cancelable": "^1.0.0", - "to-readable-stream": "^1.0.0", - "url-parse-lax": "^3.0.0" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/graceful-fs": { - "version": "4.2.4", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.4.tgz", - "integrity": "sha512-WjKPNJF79dtJAVniUlGGWHYGz2jWxT6VhN/4m1NdkbZ2nOsEF+cI1Edgql5zCRhs/VsQYRvrXctxktVXZUkixw==" - }, - "node_modules/gray-matter": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.2.tgz", - "integrity": "sha512-7hB/+LxrOjq/dd8APlK0r24uL/67w7SkYnfwhNFwg/VDIGWGmduTDYf3WNstLW2fbbmRwrDGCVSJ2isuf2+4Hw==", - "dependencies": { - "js-yaml": "^3.11.0", - "kind-of": "^6.0.2", - "section-matter": "^1.0.0", - "strip-bom-string": "^1.0.0" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/handle-thing": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", - "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==" - }, - "node_modules/har-schema": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", - "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=", - "engines": { - "node": ">=4" - } - }, - "node_modules/har-validator": { - "version": "5.1.5", - "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz", - "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==", - "dependencies": { - "ajv": "^6.12.3", - "har-schema": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", - "dependencies": { - "function-bind": "^1.1.1" - }, - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/has-ansi": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz", - "integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=", - "dependencies": { - "ansi-regex": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/has-bigints": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.1.tgz", - "integrity": "sha512-LSBS2LjbNBTf6287JEbEzvJgftkF5qFkmCo9hDRpAzKhUOlJ+hx8dd4USs00SgsUNwc4617J9ki5YtEClM2ffA==" - }, - "node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", - "engines": { - "node": ">=4" - } - }, - "node_modules/has-symbols": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.2.tgz", - "integrity": "sha512-chXa79rL/UC2KlX17jo3vRGz0azaWEx5tGqZg5pO3NUyEJVB17dMruQlzCCOfUvElghKcm5194+BCRvi2Rv/Gw==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/has-value": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz", - "integrity": "sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc=", - "dependencies": { - "get-value": "^2.0.6", - "has-values": "^1.0.0", - "isobject": "^3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/has-values": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz", - "integrity": "sha1-lbC2P+whRmGab+V/51Yo1aOe/k8=", - "dependencies": { - "is-number": "^3.0.0", - "kind-of": "^4.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/has-values/node_modules/kind-of": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz", - "integrity": "sha1-IIE989cSkosgc3hpGkUGb65y3Vc=", - "dependencies": { - "is-buffer": "^1.1.5" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/has-yarn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-2.1.0.tgz", - "integrity": "sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw==", - "engines": { - "node": ">=8" - } - }, - "node_modules/hash-base": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.1.0.tgz", - "integrity": "sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==", - "dependencies": { - "inherits": "^2.0.4", - "readable-stream": "^3.6.0", - "safe-buffer": "^5.2.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/hash-base/node_modules/readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/hash-base/node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" - }, - "node_modules/hash-sum": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/hash-sum/-/hash-sum-1.0.2.tgz", - "integrity": "sha1-M7QHd3VMZDJXPBIMw4CLvRDUfwQ=" - }, - "node_modules/hash.js": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.7.tgz", - "integrity": "sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==", - "dependencies": { - "inherits": "^2.0.3", - "minimalistic-assert": "^1.0.1" - } - }, - "node_modules/he": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", - "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", - "bin": { - "he": "bin/he" - } - }, - "node_modules/hex-color-regex": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/hex-color-regex/-/hex-color-regex-1.1.0.tgz", - "integrity": "sha512-l9sfDFsuqtOqKDsQdqrMRk0U85RZc0RtOR9yPI7mRVOa4FsR/BVnZ0shmQRM96Ji99kYZP/7hn1cedc1+ApsTQ==" - }, - "node_modules/hmac-drbg": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz", - "integrity": "sha1-0nRXAQJabHdabFRXk+1QL8DGSaE=", - "dependencies": { - "hash.js": "^1.0.3", - "minimalistic-assert": "^1.0.0", - "minimalistic-crypto-utils": "^1.0.1" - } - }, - "node_modules/hogan.js": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/hogan.js/-/hogan.js-3.0.2.tgz", - "integrity": "sha1-TNnhq9QpQUbnZ55B14mHMrAse/0=", - "dependencies": { - "mkdirp": "0.3.0", - "nopt": "1.0.10" - }, - "bin": { - "hulk": "bin/hulk" - } - }, - "node_modules/hogan.js/node_modules/mkdirp": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.3.0.tgz", - "integrity": "sha1-G79asbqCevI1dRQ0kEJkVfSB/h4=", - "engines": { - "node": "*" - } - }, - "node_modules/hotkeys-js": { - "version": "3.8.1", - "resolved": "https://registry.npmjs.org/hotkeys-js/-/hotkeys-js-3.8.1.tgz", - "integrity": "sha512-YlhVQtyG9f1b7GhtzdhR0Pl+cImD1ZrKI6zYUa7QLd0zuThiL7RzZ+ANJyy7z+kmcCpNYBf5PjBa3CjiQ5PFpw==" - }, - "node_modules/hpack.js": { - "version": "2.1.6", - "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz", - "integrity": "sha1-h3dMCUnlE/QuhFdbPEVoH63ioLI=", - "dependencies": { - "inherits": "^2.0.1", - "obuf": "^1.0.0", - "readable-stream": "^2.0.1", - "wbuf": "^1.1.0" - } - }, - "node_modules/hsl-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/hsl-regex/-/hsl-regex-1.0.0.tgz", - "integrity": "sha1-1JMwx4ntgZ4nakwNJy3/owsY/m4=" - }, - "node_modules/hsla-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/hsla-regex/-/hsla-regex-1.0.0.tgz", - "integrity": "sha1-wc56MWjIxmFAM6S194d/OyJfnDg=" - }, - "node_modules/html-entities": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-1.4.0.tgz", - "integrity": "sha512-8nxjcBcd8wovbeKx7h3wTji4e6+rhaVuPNpMqwWgnHh+N9ToqsCs6XztWRBPQ+UtzsoMAdKZtUENoVzU/EMtZA==" - }, - "node_modules/html-minifier": { - "version": "3.5.21", - "resolved": "https://registry.npmjs.org/html-minifier/-/html-minifier-3.5.21.tgz", - "integrity": "sha512-LKUKwuJDhxNa3uf/LPR/KVjm/l3rBqtYeCOAekvG8F1vItxMUpueGd94i/asDDr8/1u7InxzFA5EeGjhhG5mMA==", - "dependencies": { - "camel-case": "3.0.x", - "clean-css": "4.2.x", - "commander": "2.17.x", - "he": "1.2.x", - "param-case": "2.1.x", - "relateurl": "0.2.x", - "uglify-js": "3.4.x" - }, - "bin": { - "html-minifier": "cli.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/html-tags": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.1.0.tgz", - "integrity": "sha512-1qYz89hW3lFDEazhjW0yVAV87lw8lVkrJocr72XmBkMKsoSVJCQx3W8BXsC7hO2qAt8BoVjYjtAcZ9perqGnNg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/htmlparser2": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz", - "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==", - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.0.0", - "domutils": "^2.5.2", - "entities": "^2.0.0" - } - }, - "node_modules/http-cache-semantics": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz", - "integrity": "sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ==" - }, - "node_modules/http-deceiver": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz", - "integrity": "sha1-+nFolEq5pRnTN8sL7HKE3D5yPYc=" - }, - "node_modules/http-errors": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.7.2.tgz", - "integrity": "sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg==", - "dependencies": { - "depd": "~1.1.2", - "inherits": "2.0.3", - "setprototypeof": "1.1.1", - "statuses": ">= 1.5.0 < 2", - "toidentifier": "1.0.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/http-errors/node_modules/inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=" - }, - "node_modules/http-parser-js": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.3.tgz", - "integrity": "sha512-t7hjvef/5HEK7RWTdUzVUhl8zkEu+LlaE0IYzdMuvbSDipxBRpOn4Uhw8ZyECEa808iVT8XCjzo6xmYt4CiLZg==" - }, - "node_modules/http-proxy": { - "version": "1.18.1", - "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", - "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", - "dependencies": { - "eventemitter3": "^4.0.0", - "follow-redirects": "^1.0.0", - "requires-port": "^1.0.0" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/http-proxy-middleware": { - "version": "0.19.1", - "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-0.19.1.tgz", - "integrity": "sha512-yHYTgWMQO8VvwNS22eLLloAkvungsKdKTLO8AJlftYIKNfJr3GK3zK0ZCfzDDGUBttdGc8xFy1mCitvNKQtC3Q==", - "dependencies": { - "http-proxy": "^1.17.0", - "is-glob": "^4.0.0", - "lodash": "^4.17.11", - "micromatch": "^3.1.10" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/http-signature": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", - "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", - "dependencies": { - "assert-plus": "^1.0.0", - "jsprim": "^1.2.2", - "sshpk": "^1.7.0" - }, - "engines": { - "node": ">=0.8", - "npm": ">=1.3.7" - } - }, - "node_modules/https-browserify": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/https-browserify/-/https-browserify-1.0.0.tgz", - "integrity": "sha1-7AbBDgo0wPL68Zn3/X/Hj//QPHM=" - }, - "node_modules/iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/icss-replace-symbols": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/icss-replace-symbols/-/icss-replace-symbols-1.1.0.tgz", - "integrity": "sha1-Bupvg2ead0njhs/h/oEq5dsiPe0=" - }, - "node_modules/icss-utils": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-4.1.1.tgz", - "integrity": "sha512-4aFq7wvWyMHKgxsH8QQtGpvbASCf+eM3wPRLI6R+MgAnTCZ6STYsRvttLvRWK0Nfif5piF394St3HeJDaljGPA==", - "dependencies": { - "postcss": "^7.0.14" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/ieee754": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", - "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==" - }, - "node_modules/iferr": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/iferr/-/iferr-0.1.5.tgz", - "integrity": "sha1-xg7taebY/bazEEofy8ocGS3FtQE=" - }, - "node_modules/ignore": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", - "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", - "engines": { - "node": ">= 4" - } - }, - "node_modules/immediate": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/immediate/-/immediate-3.3.0.tgz", - "integrity": "sha512-HR7EVodfFUdQCTIeySw+WDRFJlPcLOJbXfwwZ7Oom6tjsvZ3bOkCDJHehQC3nxJrv7+f9XecwazynjU8e4Vw3Q==" - }, - "node_modules/import-cwd": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/import-cwd/-/import-cwd-2.1.0.tgz", - "integrity": "sha1-qmzzbnInYShcs3HsZRn1PiQ1sKk=", - "dependencies": { - "import-from": "^2.1.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/import-fresh": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-2.0.0.tgz", - "integrity": "sha1-2BNVwVYS04bGH53dOSLUMEgipUY=", - "dependencies": { - "caller-path": "^2.0.0", - "resolve-from": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/import-from": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/import-from/-/import-from-2.1.0.tgz", - "integrity": "sha1-M1238qev/VOqpHHUuAId7ja387E=", - "dependencies": { - "resolve-from": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/import-lazy": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-2.1.0.tgz", - "integrity": "sha1-BWmOPUXIjo1+nZLLBYTnfwlvPkM=", - "engines": { - "node": ">=4" - } - }, - "node_modules/import-local": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/import-local/-/import-local-2.0.0.tgz", - "integrity": "sha512-b6s04m3O+s3CGSbqDIyP4R6aAwAeYlVq9+WUWep6iHa8ETRf9yei1U48C5MmfJmV9AiLYYBKPMq/W+/WRpQmCQ==", - "dependencies": { - "pkg-dir": "^3.0.0", - "resolve-cwd": "^2.0.0" - }, - "bin": { - "import-local-fixture": "fixtures/cli.js" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/import-local/node_modules/find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dependencies": { - "locate-path": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/import-local/node_modules/locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/import-local/node_modules/p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dependencies": { - "p-limit": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/import-local/node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "engines": { - "node": ">=4" - } - }, - "node_modules/import-local/node_modules/pkg-dir": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", - "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", - "dependencies": { - "find-up": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=", - "engines": { - "node": ">=0.8.19" - } - }, - "node_modules/indexes-of": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/indexes-of/-/indexes-of-1.0.1.tgz", - "integrity": "sha1-8w9xbI4r00bHtn0985FVZqfAVgc=" - }, - "node_modules/infer-owner": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/infer-owner/-/infer-owner-1.0.4.tgz", - "integrity": "sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A==" - }, - "node_modules/inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", - "dependencies": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" - }, - "node_modules/ini": { - "version": "1.3.7", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.7.tgz", - "integrity": "sha512-iKpRpXP+CrP2jyrxvg1kMUpXDyRUFDWurxbnVT1vQPx+Wz9uCYsMIqYuSBLV+PAaZG/d7kRLKRFc9oDMsH+mFQ==" - }, - "node_modules/internal-ip": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/internal-ip/-/internal-ip-4.3.0.tgz", - "integrity": "sha512-S1zBo1D6zcsyuC6PMmY5+55YMILQ9av8lotMx447Bq6SAgo/sDK6y6uUKmuYhW7eacnIhFfsPmCNYdDzsnnDCg==", - "dependencies": { - "default-gateway": "^4.2.0", - "ipaddr.js": "^1.9.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/ip": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/ip/-/ip-1.1.5.tgz", - "integrity": "sha1-vd7XARQpCCjAoDnnLvJfWq7ENUo=" - }, - "node_modules/ip-regex": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/ip-regex/-/ip-regex-2.1.0.tgz", - "integrity": "sha1-+ni/XS5pE8kRzp+BnuUUa7bYROk=", - "engines": { - "node": ">=4" - } - }, - "node_modules/ipaddr.js": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", - "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/is-absolute-url": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-2.1.0.tgz", - "integrity": "sha1-UFMN+4T8yap9vnhS6Do3uTufKqY=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-accessor-descriptor": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", - "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=", - "dependencies": { - "kind-of": "^3.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-accessor-descriptor/node_modules/kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dependencies": { - "is-buffer": "^1.1.5" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-arguments": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.0.tgz", - "integrity": "sha512-1Ij4lOMPl/xB5kBDn7I+b2ttPMKa8szhEIrXDuXQD/oe3HJLTLhqhgGspwgyGd6MOywBUqVvYicF72lkgDnIHg==", - "dependencies": { - "call-bind": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-arrayish": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", - "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=" - }, - "node_modules/is-bigint": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.1.tgz", - "integrity": "sha512-J0ELF4yHFxHy0cmSxZuheDOz2luOdVvqjwmEcj8H/L1JHeuEDSDbeRP+Dk9kFVk5RTFzbucJ2Kb9F7ixY2QaCg==" - }, - "node_modules/is-binary-path": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-1.0.1.tgz", - "integrity": "sha1-dfFmQrSA8YenEcgUFh/TpKdlWJg=", - "dependencies": { - "binary-extensions": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-boolean-object": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.0.tgz", - "integrity": "sha512-a7Uprx8UtD+HWdyYwnD1+ExtTgqQtD2k/1yJgtXP6wnMm8byhkoTZRl+95LLThpzNZJ5aEvi46cdH+ayMFRwmA==", - "dependencies": { - "call-bind": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-buffer": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" - }, - "node_modules/is-callable": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.3.tgz", - "integrity": "sha512-J1DcMe8UYTBSrKezuIUTUwjXsho29693unXM2YhJUTR2txK/eG47bvNa/wipPFmZFgr/N6f1GA66dv0mEyTIyQ==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-ci": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz", - "integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==", - "dependencies": { - "ci-info": "^2.0.0" - }, - "bin": { - "is-ci": "bin.js" - } - }, - "node_modules/is-ci/node_modules/ci-info": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz", - "integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==" - }, - "node_modules/is-color-stop": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-color-stop/-/is-color-stop-1.1.0.tgz", - "integrity": "sha1-z/9HGu5N1cnhWFmPvhKWe1za00U=", - "dependencies": { - "css-color-names": "^0.0.4", - "hex-color-regex": "^1.1.0", - "hsl-regex": "^1.0.0", - "hsla-regex": "^1.0.0", - "rgb-regex": "^1.0.1", - "rgba-regex": "^1.0.0" - } - }, - "node_modules/is-core-module": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.2.0.tgz", - "integrity": "sha512-XRAfAdyyY5F5cOXn7hYQDqh2Xmii+DEfIcQGxK/uNwMHhIkPWO0g8msXcbzLe+MpGoR951MlqM/2iIlU4vKDdQ==", - "dependencies": { - "has": "^1.0.3" - } - }, - "node_modules/is-data-descriptor": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", - "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=", - "dependencies": { - "kind-of": "^3.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-data-descriptor/node_modules/kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dependencies": { - "is-buffer": "^1.1.5" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-date-object": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.2.tgz", - "integrity": "sha512-USlDT524woQ08aoZFzh3/Z6ch9Y/EWXEHQ/AaRN0SkKq4t2Jw2R2339tSXmwuVoY7LLlBCbOIlx2myP/L5zk0g==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-descriptor": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", - "dependencies": { - "is-accessor-descriptor": "^0.1.6", - "is-data-descriptor": "^0.1.4", - "kind-of": "^5.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-descriptor/node_modules/kind-of": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-directory": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/is-directory/-/is-directory-0.3.1.tgz", - "integrity": "sha1-YTObbyR1/Hcv2cnYP1yFddwVSuE=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-expression": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/is-expression/-/is-expression-4.0.0.tgz", - "integrity": "sha512-zMIXX63sxzG3XrkHkrAPvm/OVZVSCPNkwMHU8oTX7/U3AL78I0QXCEICXUM13BIa8TYGZ68PiTKfQz3yaTNr4A==", - "dependencies": { - "acorn": "^7.1.1", - "object-assign": "^4.1.1" - } - }, - "node_modules/is-extendable": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", - "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "engines": { - "node": ">=4" - } - }, - "node_modules/is-glob": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", - "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", - "dependencies": { - "is-extglob": "^2.1.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-installed-globally": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.3.2.tgz", - "integrity": "sha512-wZ8x1js7Ia0kecP/CHM/3ABkAmujX7WPvQk6uu3Fly/Mk44pySulQpnHG46OMjHGXApINnV4QhY3SWnECO2z5g==", - "dependencies": { - "global-dirs": "^2.0.1", - "is-path-inside": "^3.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-installed-globally/node_modules/is-path-inside": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", - "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-negative-zero": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.1.tgz", - "integrity": "sha512-2z6JzQvZRa9A2Y7xC6dQQm4FSTSTNWjKIYYTt4246eMTJmIo0Q+ZyOsU66X8lxK1AbB92dFeglPLrhwpeRKO6w==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-npm": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-4.0.0.tgz", - "integrity": "sha512-96ECIfh9xtDDlPylNPXhzjsykHsMJZ18ASpaWzQyBr4YRTcVjUvzaHayDAES2oU/3KpljhHUjtSRNiDwi0F0ig==", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-number": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", - "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=", - "dependencies": { - "kind-of": "^3.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-number-object": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.4.tgz", - "integrity": "sha512-zohwelOAur+5uXtk8O3GPQ1eAcu4ZX3UwxQhUlfFFMNpUd83gXgjbhJh6HmB6LUNV/ieOLQuDwJO3dWJosUeMw==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-number/node_modules/kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dependencies": { - "is-buffer": "^1.1.5" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-obj": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", - "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-path-cwd": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz", - "integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==", - "engines": { - "node": ">=6" - } - }, - "node_modules/is-path-in-cwd": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-path-in-cwd/-/is-path-in-cwd-2.1.0.tgz", - "integrity": "sha512-rNocXHgipO+rvnP6dk3zI20RpOtrAM/kzbB258Uw5BWr3TpXi861yzjo16Dn4hUox07iw5AyeMLHWsujkjzvRQ==", - "dependencies": { - "is-path-inside": "^2.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/is-path-inside": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-2.1.0.tgz", - "integrity": "sha512-wiyhTzfDWsvwAW53OBWF5zuvaOGlZ6PwYxAbPVDhpm+gM09xKQGjBq/8uYN12aDvMxnAnq3dxTyoSoRNmg5YFg==", - "dependencies": { - "path-is-inside": "^1.0.2" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/is-plain-obj": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", - "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-plain-object": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", - "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", - "dependencies": { - "isobject": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-promise": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-2.2.2.tgz", - "integrity": "sha512-+lP4/6lKUBfQjZ2pdxThZvLUAafmZb8OAxFb8XXtiQmS35INgr85hdOGoEs124ez1FCnZJt6jau/T+alh58QFQ==" - }, - "node_modules/is-regex": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.2.tgz", - "integrity": "sha512-axvdhb5pdhEVThqJzYXwMlVuZwC+FF2DpcOhTS+y/8jVq4trxyPgfcwIxIKiyeuLlSQYKkmUaPQJ8ZE4yNKXDg==", - "dependencies": { - "call-bind": "^1.0.2", - "has-symbols": "^1.0.1" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-resolvable": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-resolvable/-/is-resolvable-1.1.0.tgz", - "integrity": "sha512-qgDYXFSR5WvEfuS5dMj6oTMEbrrSaM0CrFk2Yiq/gXnBvD9pMa2jGXxyhGLfvhZpuMZe18CJpFxAt3CRs42NMg==" - }, - "node_modules/is-stream": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-string": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.5.tgz", - "integrity": "sha512-buY6VNRjhQMiF1qWDouloZlQbRhDPCebwxSjxMjxgemYT46YMd2NR0/H+fBhEfWX4A/w9TBJ+ol+okqJKFE6vQ==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-symbol": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.3.tgz", - "integrity": "sha512-OwijhaRSgqvhm/0ZdAcXNZt9lYdKFpcRDT5ULUuYXPoT794UNOdU+gpT6Rzo7b4V2HUl/op6GqY894AZwv9faQ==", - "dependencies": { - "has-symbols": "^1.0.1" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-typedarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=" - }, - "node_modules/is-windows": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", - "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-wsl": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-1.1.0.tgz", - "integrity": "sha1-HxbkqiKwTRM2tmGIpmrzxgDDpm0=", - "engines": { - "node": ">=4" - } - }, - "node_modules/is-yarn-global": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.3.0.tgz", - "integrity": "sha512-VjSeb/lHmkoyd8ryPVIKvOCn4D1koMqY+vqyjjUfc3xyKtP4dYOxM44sZrnqQSzSds3xyOrUTLTC9LVCVgLngw==" - }, - "node_modules/isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" - }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=" - }, - "node_modules/isobject": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/isstream": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", - "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=" - }, - "node_modules/javascript-stringify": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/javascript-stringify/-/javascript-stringify-1.6.0.tgz", - "integrity": "sha1-FC0RHzpuPa6PSpr9d9RYVbWpzOM=" - }, - "node_modules/js-base64": { - "version": "2.6.4", - "resolved": "https://registry.npmjs.org/js-base64/-/js-base64-2.6.4.tgz", - "integrity": "sha512-pZe//GGmwJndub7ZghVHz7vjb2LgC1m8B07Au3eYqeqv9emhESByMXxaEgkUkEqJe87oBbSniGYoQNIBklc7IQ==" - }, - "node_modules/js-stringify": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/js-stringify/-/js-stringify-1.0.2.tgz", - "integrity": "sha1-Fzb939lyTyijaCrcYjCufk6Weds=" - }, - "node_modules/js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" - }, - "node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "dependencies": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/jsbn": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", - "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=" - }, - "node_modules/jsesc": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", - "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", - "bin": { - "jsesc": "bin/jsesc" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/json-buffer": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz", - "integrity": "sha1-Wx85evx11ne96Lz8Dkfh+aPZqJg=" - }, - "node_modules/json-parse-better-errors": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", - "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==" - }, - "node_modules/json-schema": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", - "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=" - }, - "node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" - }, - "node_modules/json-stringify-safe": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", - "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=" - }, - "node_modules/json3": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/json3/-/json3-3.3.3.tgz", - "integrity": "sha512-c7/8mbUsKigAbLkD5B010BK4D9LZm7A1pNItkEwiUZRpIN66exu/e7YQWysGun+TRKaJp8MhemM+VkfWv42aCA==" - }, - "node_modules/json5": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz", - "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==", - "dependencies": { - "minimist": "^1.2.0" - }, - "bin": { - "json5": "lib/cli.js" - } - }, - "node_modules/jsonfile": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", - "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=", - "dependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/jsonp": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/jsonp/-/jsonp-0.2.1.tgz", - "integrity": "sha1-pltPoPEL2nGaBUQep7lMVfPhW64=", - "dependencies": { - "debug": "^2.1.3" - } - }, - "node_modules/jsonp/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/jsprim": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", - "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", - "engines": [ - "node >=0.6.0" - ], - "dependencies": { - "assert-plus": "1.0.0", - "extsprintf": "1.3.0", - "json-schema": "0.2.3", - "verror": "1.10.0" - } - }, - "node_modules/jstransformer": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/jstransformer/-/jstransformer-1.0.0.tgz", - "integrity": "sha1-7Yvwkh4vPx7U1cGkT2hwntJHIsM=", - "dependencies": { - "is-promise": "^2.0.0", - "promise": "^7.0.1" - } - }, - "node_modules/keyv": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.1.0.tgz", - "integrity": "sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA==", - "dependencies": { - "json-buffer": "3.0.0" - } - }, - "node_modules/killable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/killable/-/killable-1.0.1.tgz", - "integrity": "sha512-LzqtLKlUwirEUyl/nicirVmNiPvYs7l5n8wOPP7fyJVpUPkvCnW/vuiXGpylGUlnPDnB7311rARzAt3Mhswpjg==" - }, - "node_modules/kind-of": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", - "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/last-call-webpack-plugin": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/last-call-webpack-plugin/-/last-call-webpack-plugin-3.0.0.tgz", - "integrity": "sha512-7KI2l2GIZa9p2spzPIVZBYyNKkN+e/SQPpnjlTiPhdbDW3F86tdKKELxKpzJ5sgU19wQWsACULZmpTPYHeWO5w==", - "dependencies": { - "lodash": "^4.17.5", - "webpack-sources": "^1.1.0" - } - }, - "node_modules/latest-version": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-5.1.0.tgz", - "integrity": "sha512-weT+r0kTkRQdCdYCNtkMwWXQTMEswKrFBkm4ckQOMVhhqhIMI1UT2hMj+1iigIhgSZm5gTmrRXBNoGUgaTY1xA==", - "dependencies": { - "package-json": "^6.3.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/linkify-it": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-2.2.0.tgz", - "integrity": "sha512-GnAl/knGn+i1U/wjBz3akz2stz+HrHLsxMwHQGofCDfPvlf+gDKN58UtfmUquTY4/MXeE2x7k19KQmeoZi94Iw==", - "dependencies": { - "uc.micro": "^1.0.1" - } - }, - "node_modules/load-script": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/load-script/-/load-script-1.0.0.tgz", - "integrity": "sha1-BJGTngvuVkPuSUp+PaPSuscMbKQ=" - }, - "node_modules/loader-runner": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-2.4.0.tgz", - "integrity": "sha512-Jsmr89RcXGIwivFY21FcRrisYZfvLMTWx5kOLc+JTxtpBOG6xML0vzbc6SEQG2FO9/4Fc3wW4LVcB5DmGflaRw==", - "engines": { - "node": ">=4.3.0 <5.0.0 || >=5.10" - } - }, - "node_modules/loader-utils": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-1.4.0.tgz", - "integrity": "sha512-qH0WSMBtn/oHuwjy/NucEgbx5dbxxnxup9s4PVXJUDHZBQY+s0NWA9rJf53RBnQZxfch7euUui7hpoAPvALZdA==", - "dependencies": { - "big.js": "^5.2.2", - "emojis-list": "^3.0.0", - "json5": "^1.0.1" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/locate-path": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", - "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", - "dependencies": { - "p-locate": "^4.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" - }, - "node_modules/lodash._reinterpolate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/lodash._reinterpolate/-/lodash._reinterpolate-3.0.0.tgz", - "integrity": "sha1-DM8tiRZq8Ds2Y8eWU4t1rG4RTZ0=" - }, - "node_modules/lodash.chunk": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/lodash.chunk/-/lodash.chunk-4.2.0.tgz", - "integrity": "sha1-ZuXOH3btJ7QwPYxlEujRIW6BBrw=" - }, - "node_modules/lodash.clonedeep": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz", - "integrity": "sha1-4j8/nE+Pvd6HJSnBBxhXoIblzO8=" - }, - "node_modules/lodash.debounce": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", - "integrity": "sha1-gteb/zCmfEAF/9XiUVMArZyk168=" - }, - "node_modules/lodash.kebabcase": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/lodash.kebabcase/-/lodash.kebabcase-4.1.1.tgz", - "integrity": "sha1-hImxyw0p/4gZXM7KRI/21swpXDY=" - }, - "node_modules/lodash.memoize": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", - "integrity": "sha1-vMbEmkKihA7Zl/Mj6tpezRguC/4=" - }, - "node_modules/lodash.padstart": { - "version": "4.6.1", - "resolved": "https://registry.npmjs.org/lodash.padstart/-/lodash.padstart-4.6.1.tgz", - "integrity": "sha1-0uPuv/DZ05rVD1y9G1KnvOa7YRs=" - }, - "node_modules/lodash.sortby": { - "version": "4.7.0", - "resolved": "https://registry.npmjs.org/lodash.sortby/-/lodash.sortby-4.7.0.tgz", - "integrity": "sha1-7dFMgk4sycHgsKG0K7UhBRakJDg=" - }, - "node_modules/lodash.template": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.template/-/lodash.template-4.5.0.tgz", - "integrity": "sha512-84vYFxIkmidUiFxidA/KjjH9pAycqW+h980j7Fuz5qxRtO9pgB7MDFTdys1N7A5mcucRiDyEq4fusljItR1T/A==", - "dependencies": { - "lodash._reinterpolate": "^3.0.0", - "lodash.templatesettings": "^4.0.0" - } - }, - "node_modules/lodash.templatesettings": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/lodash.templatesettings/-/lodash.templatesettings-4.2.0.tgz", - "integrity": "sha512-stgLz+i3Aa9mZgnjr/O+v9ruKZsPsndy7qPZOchbqk2cnTU1ZaldKK+v7m54WoKIyxiuMZTKT2H81F8BeAc3ZQ==", - "dependencies": { - "lodash._reinterpolate": "^3.0.0" - } - }, - "node_modules/lodash.uniq": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", - "integrity": "sha1-0CJTc662Uq3BvILklFM5qEJ1R3M=" - }, - "node_modules/loglevel": { - "version": "1.7.1", - "resolved": "https://registry.npmjs.org/loglevel/-/loglevel-1.7.1.tgz", - "integrity": "sha512-Hesni4s5UkWkwCGJMQGAh71PaLUmKFM60dHvq0zi/vDhhrzuk+4GgNbTXJ12YYQJn6ZKBDNIjYcuQGKudvqrIw==", - "engines": { - "node": ">= 0.6.0" - } - }, - "node_modules/lower-case": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-1.1.4.tgz", - "integrity": "sha1-miyr0bno4K6ZOkv31YdcOcQujqw=" - }, - "node_modules/lowercase-keys": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz", - "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/lru-cache": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", - "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", - "dependencies": { - "yallist": "^3.0.2" - } - }, - "node_modules/make-dir": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", - "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", - "dependencies": { - "semver": "^6.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/map-cache": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz", - "integrity": "sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/map-visit": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz", - "integrity": "sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=", - "dependencies": { - "object-visit": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/markdown-it": { - "version": "12.0.4", - "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-12.0.4.tgz", - "integrity": "sha512-34RwOXZT8kyuOJy25oJNJoulO8L0bTHYWXcdZBYZqFnjIy3NgjeoM3FmPXIOFQ26/lSHYMr8oc62B6adxXcb3Q==", - "dependencies": { - "argparse": "^2.0.1", - "entities": "~2.1.0", - "linkify-it": "^3.0.1", - "mdurl": "^1.0.1", - "uc.micro": "^1.0.5" - }, - "bin": { - "markdown-it": "bin/markdown-it.js" - } - }, - "node_modules/markdown-it-anchor": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/markdown-it-anchor/-/markdown-it-anchor-5.3.0.tgz", - "integrity": "sha512-/V1MnLL/rgJ3jkMWo84UR+K+jF1cxNG1a+KwqeXqTIJ+jtA8aWSHuigx8lTzauiIjBDbwF3NcWQMotd0Dm39jA==" - }, - "node_modules/markdown-it-attrs": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/markdown-it-attrs/-/markdown-it-attrs-3.0.3.tgz", - "integrity": "sha512-cLnICU2t61skNCr4Wih/sdza+UbQcqJGZwvqAypnbWA284nzDm+Gpc90iaRk/JjsIy4emag5v3s0rXFhFBWhCA==", - "engines": { - "node": ">=6" - } - }, - "node_modules/markdown-it-chain": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/markdown-it-chain/-/markdown-it-chain-1.3.0.tgz", - "integrity": "sha512-XClV8I1TKy8L2qsT9iX3qiV+50ZtcInGXI80CA+DP62sMs7hXlyV/RM3hfwy5O3Ad0sJm9xIwQELgANfESo8mQ==", - "dependencies": { - "webpack-chain": "^4.9.0" - }, - "engines": { - "node": ">=6.9" - } - }, - "node_modules/markdown-it-chain/node_modules/webpack-chain": { - "version": "4.12.1", - "resolved": "https://registry.npmjs.org/webpack-chain/-/webpack-chain-4.12.1.tgz", - "integrity": "sha512-BCfKo2YkDe2ByqkEWe1Rw+zko4LsyS75LVr29C6xIrxAg9JHJ4pl8kaIZ396SUSNp6b4815dRZPSTAS8LlURRQ==", - "dependencies": { - "deepmerge": "^1.5.2", - "javascript-stringify": "^1.6.0" - } - }, - "node_modules/markdown-it-container": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/markdown-it-container/-/markdown-it-container-2.0.0.tgz", - "integrity": "sha1-ABm0P9Au7+zi8ZYKKJX7qBpARpU=" - }, - "node_modules/markdown-it-emoji": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/markdown-it-emoji/-/markdown-it-emoji-1.4.0.tgz", - "integrity": "sha1-m+4OmpkKljupbfaYDE/dsF37Tcw=" - }, - "node_modules/markdown-it-table-of-contents": { - "version": "0.4.4", - "resolved": "https://registry.npmjs.org/markdown-it-table-of-contents/-/markdown-it-table-of-contents-0.4.4.tgz", - "integrity": "sha512-TAIHTHPwa9+ltKvKPWulm/beozQU41Ab+FIefRaQV1NRnpzwcV9QOe6wXQS5WLivm5Q/nlo0rl6laGkMDZE7Gw==", - "engines": { - "node": ">6.4.0" - } - }, - "node_modules/markdown-it/node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" - }, - "node_modules/markdown-it/node_modules/linkify-it": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-3.0.2.tgz", - "integrity": "sha512-gDBO4aHNZS6coiZCKVhSNh43F9ioIL4JwRjLZPkoLIY4yZFwg264Y5lu2x6rb1Js42Gh6Yqm2f6L2AJcnkzinQ==", - "dependencies": { - "uc.micro": "^1.0.1" - } - }, - "node_modules/md5.js": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/md5.js/-/md5.js-1.3.5.tgz", - "integrity": "sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==", - "dependencies": { - "hash-base": "^3.0.0", - "inherits": "^2.0.1", - "safe-buffer": "^5.1.2" - } - }, - "node_modules/mdn-data": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.4.tgz", - "integrity": "sha512-iV3XNKw06j5Q7mi6h+9vbx23Tv7JkjEVgKHW4pimwyDGWm0OIQntJJ+u1C6mg6mK1EaTv42XQ7w76yuzH7M2cA==" - }, - "node_modules/mdurl": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-1.0.1.tgz", - "integrity": "sha1-/oWy7HWlkDfyrf7BAP1sYBdhFS4=" - }, - "node_modules/media-typer": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", - "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/memory-fs": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/memory-fs/-/memory-fs-0.4.1.tgz", - "integrity": "sha1-OpoguEYlI+RHz7x+i7gO1me/xVI=", - "dependencies": { - "errno": "^0.1.3", - "readable-stream": "^2.0.1" - } - }, - "node_modules/merge-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E=" - }, - "node_modules/merge-source-map": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/merge-source-map/-/merge-source-map-1.1.0.tgz", - "integrity": "sha512-Qkcp7P2ygktpMPh2mCQZaf3jhN6D3Z/qVZHSdWvQ+2Ef5HgRAPBO57A77+ENm0CPx2+1Ce/MYKi3ymqdfuqibw==", - "dependencies": { - "source-map": "^0.6.1" - } - }, - "node_modules/merge-source-map/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/merge2": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", - "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", - "engines": { - "node": ">= 8" - } - }, - "node_modules/methods": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", - "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/micromatch": { - "version": "3.1.10", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz", - "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", - "dependencies": { - "arr-diff": "^4.0.0", - "array-unique": "^0.3.2", - "braces": "^2.3.1", - "define-property": "^2.0.2", - "extend-shallow": "^3.0.2", - "extglob": "^2.0.4", - "fragment-cache": "^0.2.1", - "kind-of": "^6.0.2", - "nanomatch": "^1.2.9", - "object.pick": "^1.3.0", - "regex-not": "^1.0.0", - "snapdragon": "^0.8.1", - "to-regex": "^3.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/micromatch/node_modules/extend-shallow": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=", - "dependencies": { - "assign-symbols": "^1.0.0", - "is-extendable": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/micromatch/node_modules/is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "dependencies": { - "is-plain-object": "^2.0.4" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/miller-rabin": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/miller-rabin/-/miller-rabin-4.0.1.tgz", - "integrity": "sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA==", - "dependencies": { - "bn.js": "^4.0.0", - "brorand": "^1.0.1" - }, - "bin": { - "miller-rabin": "bin/miller-rabin" - } - }, - "node_modules/miller-rabin/node_modules/bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - }, - "node_modules/mime": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/mime/-/mime-2.5.2.tgz", - "integrity": "sha512-tqkh47FzKeCPD2PUiPB6pkbMzsCasjxAfC62/Wap5qrUWcb+sFasXUC5I3gYM5iBM8v/Qpn4UK0x+j0iHyFPDg==", - "bin": { - "mime": "cli.js" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/mime-db": { - "version": "1.47.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.47.0.tgz", - "integrity": "sha512-QBmA/G2y+IfeS4oktet3qRZ+P5kPhCKRXxXnQEudYqUaEioAU1/Lq2us3D/t1Jfo4hE9REQPrbB7K5sOczJVIw==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "2.1.30", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.30.tgz", - "integrity": "sha512-crmjA4bLtR8m9qLpHvgxSChT+XoSlZi8J4n/aIdn3z92e/U47Z0V/yl+Wh9W046GgFVAmoNR/fmdbZYcSSIUeg==", - "dependencies": { - "mime-db": "1.47.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mimic-response": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", - "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==", - "engines": { - "node": ">=4" - } - }, - "node_modules/min-document": { - "version": "2.19.0", - "resolved": "https://registry.npmjs.org/min-document/-/min-document-2.19.0.tgz", - "integrity": "sha1-e9KC4/WELtKVu3SM3Z8f+iyCRoU=", - "dependencies": { - "dom-walk": "^0.1.0" - } - }, - "node_modules/mini-css-extract-plugin": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-0.6.0.tgz", - "integrity": "sha512-79q5P7YGI6rdnVyIAV4NXpBQJFWdkzJxCim3Kog4078fM0piAaFlwocqbejdWtLW1cEzCexPrh6EdyFsPgVdAw==", - "dependencies": { - "loader-utils": "^1.1.0", - "normalize-url": "^2.0.1", - "schema-utils": "^1.0.0", - "webpack-sources": "^1.1.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/mini-css-extract-plugin/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/minimalistic-assert": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", - "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" - }, - "node_modules/minimalistic-crypto-utils": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz", - "integrity": "sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo=" - }, - "node_modules/minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/minimist": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", - "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==" - }, - "node_modules/mississippi": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/mississippi/-/mississippi-3.0.0.tgz", - "integrity": "sha512-x471SsVjUtBRtcvd4BzKE9kFC+/2TeWgKCgw0bZcw1b9l2X3QX5vCWgF+KaZaYm87Ss//rHnWryupDrgLvmSkA==", - "dependencies": { - "concat-stream": "^1.5.0", - "duplexify": "^3.4.2", - "end-of-stream": "^1.1.0", - "flush-write-stream": "^1.0.0", - "from2": "^2.1.0", - "parallel-transform": "^1.1.0", - "pump": "^3.0.0", - "pumpify": "^1.3.3", - "stream-each": "^1.1.0", - "through2": "^2.0.0" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/mixin-deep": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz", - "integrity": "sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==", - "dependencies": { - "for-in": "^1.0.2", - "is-extendable": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/mixin-deep/node_modules/is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "dependencies": { - "is-plain-object": "^2.0.4" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/mkdirp": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", - "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", - "bin": { - "mkdirp": "bin/cmd.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/move-concurrently": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/move-concurrently/-/move-concurrently-1.0.1.tgz", - "integrity": "sha1-viwAX9oy4LKa8fBdfEszIUxwH5I=", - "dependencies": { - "aproba": "^1.1.1", - "copy-concurrently": "^1.0.0", - "fs-write-stream-atomic": "^1.0.8", - "mkdirp": "^0.5.1", - "rimraf": "^2.5.4", - "run-queue": "^1.0.3" - } - }, - "node_modules/move-concurrently/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" - }, - "node_modules/multicast-dns": { - "version": "6.2.3", - "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-6.2.3.tgz", - "integrity": "sha512-ji6J5enbMyGRHIAkAOu3WdV8nggqviKCEKtXcOqfphZZtQrmHKycfynJ2V7eVPUA4NhJ6V7Wf4TmGbTwKE9B6g==", - "dependencies": { - "dns-packet": "^1.3.1", - "thunky": "^1.0.2" - }, - "bin": { - "multicast-dns": "cli.js" - } - }, - "node_modules/multicast-dns-service-types": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/multicast-dns-service-types/-/multicast-dns-service-types-1.1.0.tgz", - "integrity": "sha1-iZ8R2WhuXgXLkbNdXw5jt3PPyQE=" - }, - "node_modules/nan": { - "version": "2.14.2", - "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.2.tgz", - "integrity": "sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ==", - "optional": true - }, - "node_modules/nanomatch": { - "version": "1.2.13", - "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz", - "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==", - "dependencies": { - "arr-diff": "^4.0.0", - "array-unique": "^0.3.2", - "define-property": "^2.0.2", - "extend-shallow": "^3.0.2", - "fragment-cache": "^0.2.1", - "is-windows": "^1.0.2", - "kind-of": "^6.0.2", - "object.pick": "^1.3.0", - "regex-not": "^1.0.0", - "snapdragon": "^0.8.1", - "to-regex": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/nanomatch/node_modules/extend-shallow": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=", - "dependencies": { - "assign-symbols": "^1.0.0", - "is-extendable": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/nanomatch/node_modules/is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "dependencies": { - "is-plain-object": "^2.0.4" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/negotiator": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.2.tgz", - "integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/neo-async": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", - "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" - }, - "node_modules/nice-try": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", - "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==" - }, - "node_modules/no-case": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/no-case/-/no-case-2.3.2.tgz", - "integrity": "sha512-rmTZ9kz+f3rCvK2TD1Ue/oZlns7OGoIWP4fc3llxxRXlOkHKoWPPWJOfFYpITabSow43QJbRIoHQXtt10VldyQ==", - "dependencies": { - "lower-case": "^1.1.1" - } - }, - "node_modules/node-forge": { - "version": "0.10.0", - "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.10.0.tgz", - "integrity": "sha512-PPmu8eEeG9saEUvI97fm4OYxXVB6bFvyNTyiUOBichBpFG8A1Ljw3bY62+5oOjDEMHRnd0Y7HQ+x7uzxOzC6JA==", - "engines": { - "node": ">= 6.0.0" - } - }, - "node_modules/node-libs-browser": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/node-libs-browser/-/node-libs-browser-2.2.1.tgz", - "integrity": "sha512-h/zcD8H9kaDZ9ALUWwlBUDo6TKF8a7qBSCSEGfjTVIYeqsioSKaAX+BN7NgiMGp6iSIXZ3PxgCu8KS3b71YK5Q==", - "dependencies": { - "assert": "^1.1.1", - "browserify-zlib": "^0.2.0", - "buffer": "^4.3.0", - "console-browserify": "^1.1.0", - "constants-browserify": "^1.0.0", - "crypto-browserify": "^3.11.0", - "domain-browser": "^1.1.1", - "events": "^3.0.0", - "https-browserify": "^1.0.0", - "os-browserify": "^0.3.0", - "path-browserify": "0.0.1", - "process": "^0.11.10", - "punycode": "^1.2.4", - "querystring-es3": "^0.2.0", - "readable-stream": "^2.3.3", - "stream-browserify": "^2.0.1", - "stream-http": "^2.7.2", - "string_decoder": "^1.0.0", - "timers-browserify": "^2.0.4", - "tty-browserify": "0.0.0", - "url": "^0.11.0", - "util": "^0.11.0", - "vm-browserify": "^1.0.1" - } - }, - "node_modules/node-libs-browser/node_modules/punycode": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", - "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4=" - }, - "node_modules/node-releases": { - "version": "1.1.71", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.71.tgz", - "integrity": "sha512-zR6HoT6LrLCRBwukmrVbHv0EpEQjksO6GmFcZQQuCAy139BEsoVKPYnf3jongYW83fAa1torLGYwxxky/p28sg==" - }, - "node_modules/nopt": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-1.0.10.tgz", - "integrity": "sha1-bd0hvSoxQXuScn3Vhfim83YI6+4=", - "dependencies": { - "abbrev": "1" - }, - "bin": { - "nopt": "bin/nopt.js" - } - }, - "node_modules/normalize-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", - "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/normalize-range": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", - "integrity": "sha1-LRDAa9/TEuqXd2laTShDlFa3WUI=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/normalize-url": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-2.0.1.tgz", - "integrity": "sha512-D6MUW4K/VzoJ4rJ01JFKxDrtY1v9wrgzCX5f2qj/lzH1m/lW6MhUZFKerVsnyjOhOsYzI9Kqqak+10l4LvLpMw==", - "dependencies": { - "prepend-http": "^2.0.0", - "query-string": "^5.0.1", - "sort-keys": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/npm-run-path": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", - "integrity": "sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8=", - "dependencies": { - "path-key": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/nprogress": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/nprogress/-/nprogress-0.2.0.tgz", - "integrity": "sha1-y480xTIT2JVyP8urkH6UIq28r7E=" - }, - "node_modules/nth-check": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.0.0.tgz", - "integrity": "sha512-i4sc/Kj8htBrAiH1viZ0TgU8Y5XqCaV/FziYK6TBczxmeKm3AEFWqqF3195yKudrarqy7Zu80Ra5dobFjn9X/Q==", - "dependencies": { - "boolbase": "^1.0.0" - } - }, - "node_modules/num2fraction": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/num2fraction/-/num2fraction-1.2.2.tgz", - "integrity": "sha1-b2gragJ6Tp3fpFZM0lidHU5mnt4=" - }, - "node_modules/oauth-sign": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", - "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==", - "engines": { - "node": "*" - } - }, - "node_modules/object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object-copy": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz", - "integrity": "sha1-fn2Fi3gb18mRpBupde04EnVOmYw=", - "dependencies": { - "copy-descriptor": "^0.1.0", - "define-property": "^0.2.5", - "kind-of": "^3.0.3" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object-copy/node_modules/define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dependencies": { - "is-descriptor": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object-copy/node_modules/kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dependencies": { - "is-buffer": "^1.1.5" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object-inspect": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.9.0.tgz", - "integrity": "sha512-i3Bp9iTqwhaLZBxGkRfo5ZbE07BQRT7MGu8+nNgwW9ItGp1TzCTw2DLEoWwjClxBjOFI/hWljTAmYGCEwmtnOw==" - }, - "node_modules/object-is": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.1.5.tgz", - "integrity": "sha512-3cyDsyHgtmi7I7DfSSI2LDp6SK2lwvtbg0p0R1e0RvTqF5ceGx+K2dfSjm1bKDMVCFEDAQvy+o8c6a7VujOddw==", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/object-keys": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", - "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/object-visit": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz", - "integrity": "sha1-95xEk68MU3e1n+OdOV5BBC3QRbs=", - "dependencies": { - "isobject": "^3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object.assign": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.2.tgz", - "integrity": "sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ==", - "dependencies": { - "call-bind": "^1.0.0", - "define-properties": "^1.1.3", - "has-symbols": "^1.0.1", - "object-keys": "^1.1.1" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/object.getownpropertydescriptors": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.2.tgz", - "integrity": "sha512-WtxeKSzfBjlzL+F9b7M7hewDzMwy+C8NRssHd1YrNlzHzIDrXcXiNOMrezdAEM4UXixgV+vvnyBeN7Rygl2ttQ==", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/object.pick": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz", - "integrity": "sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c=", - "dependencies": { - "isobject": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object.values": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.3.tgz", - "integrity": "sha512-nkF6PfDB9alkOUxpf1HNm/QlkeW3SReqL5WXeBLpEJJnlPSvRaDQpW3gQTksTN3fgJX4hL42RzKyOin6ff3tyw==", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2", - "has": "^1.0.3" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/obuf": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz", - "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==" - }, - "node_modules/on-finished": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz", - "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=", - "dependencies": { - "ee-first": "1.1.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/on-headers": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", - "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", - "dependencies": { - "wrappy": "1" - } - }, - "node_modules/opencollective-postinstall": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/opencollective-postinstall/-/opencollective-postinstall-2.0.3.tgz", - "integrity": "sha512-8AV/sCtuzUeTo8gQK5qDZzARrulB3egtLzFgteqB2tcT4Mw7B8Kt7JcDHmltjz6FOAHsvTevk70gZEbhM4ZS9Q==", - "bin": { - "opencollective-postinstall": "index.js" - } - }, - "node_modules/opn": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/opn/-/opn-5.5.0.tgz", - "integrity": "sha512-PqHpggC9bLV0VeWcdKhkpxY+3JTzetLSqTCWL/z/tFIbI6G8JCjondXklT1JinczLz2Xib62sSp0T/gKT4KksA==", - "dependencies": { - "is-wsl": "^1.1.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/optimize-css-assets-webpack-plugin": { - "version": "5.0.4", - "resolved": "https://registry.npmjs.org/optimize-css-assets-webpack-plugin/-/optimize-css-assets-webpack-plugin-5.0.4.tgz", - "integrity": "sha512-wqd6FdI2a5/FdoiCNNkEvLeA//lHHfG24Ln2Xm2qqdIk4aOlsR18jwpyOihqQ8849W3qu2DX8fOYxpvTMj+93A==", - "dependencies": { - "cssnano": "^4.1.10", - "last-call-webpack-plugin": "^3.0.0" - } - }, - "node_modules/original": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/original/-/original-1.0.2.tgz", - "integrity": "sha512-hyBVl6iqqUOJ8FqRe+l/gS8H+kKYjrEndd5Pm1MfBtsEKA038HkkdbAl/72EAXGyonD/PFsvmVG+EvcIpliMBg==", - "dependencies": { - "url-parse": "^1.4.3" - } - }, - "node_modules/os-browserify": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/os-browserify/-/os-browserify-0.3.0.tgz", - "integrity": "sha1-hUNzx/XCMVkU/Jv8a9gjj92h7Cc=" - }, - "node_modules/p-cancelable": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-1.1.0.tgz", - "integrity": "sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw==", - "engines": { - "node": ">=6" - } - }, - "node_modules/p-finally": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", - "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=", - "engines": { - "node": ">=4" - } - }, - "node_modules/p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "dependencies": { - "p-try": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/p-locate": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", - "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", - "dependencies": { - "p-limit": "^2.2.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/p-map": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-2.1.0.tgz", - "integrity": "sha512-y3b8Kpd8OAN444hxfBbFfj1FY/RjtTd8tzYwhUqNYXx0fXx2iX4maP4Qr6qhIKbQXI02wTLAda4fYUbDagTUFw==", - "engines": { - "node": ">=6" - } - }, - "node_modules/p-retry": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-3.0.1.tgz", - "integrity": "sha512-XE6G4+YTTkT2a0UWb2kjZe8xNwf8bIbnqpc/IS/idOBVhyves0mK5OJgeocjx7q5pvX/6m23xuzVPYT1uGM73w==", - "dependencies": { - "retry": "^0.12.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", - "engines": { - "node": ">=6" - } - }, - "node_modules/package-json": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/package-json/-/package-json-6.5.0.tgz", - "integrity": "sha512-k3bdm2n25tkyxcjSKzB5x8kfVxlMdgsbPr0GkZcwHsLpba6cBjqCt1KlcChKEvxHIcTB1FVMuwoijZ26xex5MQ==", - "dependencies": { - "got": "^9.6.0", - "registry-auth-token": "^4.0.0", - "registry-url": "^5.0.0", - "semver": "^6.2.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/pako": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz", - "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==" - }, - "node_modules/parallel-transform": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/parallel-transform/-/parallel-transform-1.2.0.tgz", - "integrity": "sha512-P2vSmIu38uIlvdcU7fDkyrxj33gTUy/ABO5ZUbGowxNCopBq/OoD42bP4UmMrJoPyk4Uqf0mu3mtWBhHCZD8yg==", - "dependencies": { - "cyclist": "^1.0.1", - "inherits": "^2.0.3", - "readable-stream": "^2.1.5" - } - }, - "node_modules/param-case": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/param-case/-/param-case-2.1.1.tgz", - "integrity": "sha1-35T9jPZTHs915r75oIWPvHK+Ikc=", - "dependencies": { - "no-case": "^2.2.0" - } - }, - "node_modules/parse-asn1": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.6.tgz", - "integrity": "sha512-RnZRo1EPU6JBnra2vGHj0yhp6ebyjBZpmUCLHWiFhxlzvBCCpAuZ7elsBp1PVAbQN0/04VD/19rfzlBSwLstMw==", - "dependencies": { - "asn1.js": "^5.2.0", - "browserify-aes": "^1.0.0", - "evp_bytestokey": "^1.0.0", - "pbkdf2": "^3.0.3", - "safe-buffer": "^5.1.1" - } - }, - "node_modules/parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", - "dependencies": { - "error-ex": "^1.3.1", - "json-parse-better-errors": "^1.0.1" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/parse5": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz", - "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==" - }, - "node_modules/parse5-htmlparser2-tree-adapter": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz", - "integrity": "sha512-qPuWvbLgvDGilKc5BoicRovlT4MtYT6JfJyBOMDsKoiT+GiuP5qyrPCnR9HcPECIJJmZh5jRndyNThnhhb/vlA==", - "dependencies": { - "parse5": "^6.0.1" - } - }, - "node_modules/parseurl": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", - "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/pascalcase": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz", - "integrity": "sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/path-browserify": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-0.0.1.tgz", - "integrity": "sha512-BapA40NHICOS+USX9SN4tyhq+A2RrN/Ws5F0Z5aMHDp98Fl86lX8Oti8B7uN93L4Ifv4fHOEA+pQw87gmMO/lQ==" - }, - "node_modules/path-dirname": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/path-dirname/-/path-dirname-1.0.2.tgz", - "integrity": "sha1-zDPSTVJeCZpTiMAzbG4yuRYGCeA=" - }, - "node_modules/path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", - "engines": { - "node": ">=8" - } - }, - "node_modules/path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/path-is-inside": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", - "integrity": "sha1-NlQX3t5EQw0cEa9hAn+s8HS9/FM=" - }, - "node_modules/path-key": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=", - "engines": { - "node": ">=4" - } - }, - "node_modules/path-parse": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz", - "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==" - }, - "node_modules/path-to-regexp": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w=" - }, - "node_modules/path-type": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-3.0.0.tgz", - "integrity": "sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==", - "dependencies": { - "pify": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/path-type/node_modules/pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=", - "engines": { - "node": ">=4" - } - }, - "node_modules/pbkdf2": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.1.2.tgz", - "integrity": "sha512-iuh7L6jA7JEGu2WxDwtQP1ddOpaJNC4KlDEFfdQajSGgGPNi4OyDc2R7QnbY2bR9QjBVGwgvTdNJZoE7RaxUMA==", - "dependencies": { - "create-hash": "^1.1.2", - "create-hmac": "^1.1.4", - "ripemd160": "^2.0.1", - "safe-buffer": "^5.0.1", - "sha.js": "^2.4.8" - }, - "engines": { - "node": ">=0.12" - } - }, - "node_modules/performance-now": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", - "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" - }, - "node_modules/picomatch": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.3.tgz", - "integrity": "sha512-KpELjfwcCDUb9PeigTs2mBJzXUPzAuP2oPcA989He8Rte0+YUAjw1JVedDhuTKPkHjSYzMN3npC9luThGYEKdg==", - "optional": true, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", - "engines": { - "node": ">=6" - } - }, - "node_modules/pinkie": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/pinkie/-/pinkie-2.0.4.tgz", - "integrity": "sha1-clVrgM+g1IqXToDnckjoDtT3+HA=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/pinkie-promise": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/pinkie-promise/-/pinkie-promise-2.0.1.tgz", - "integrity": "sha1-ITXW36ejWMBprJsXh3YogihFD/o=", - "dependencies": { - "pinkie": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/pkg-dir": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", - "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", - "dependencies": { - "find-up": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/portfinder": { - "version": "1.0.28", - "resolved": "https://registry.npmjs.org/portfinder/-/portfinder-1.0.28.tgz", - "integrity": "sha512-Se+2isanIcEqf2XMHjyUKskczxbPH7dQnlMjXX6+dybayyHvAf/TCgyMRlzf/B6QDhAEFOGes0pzRo3by4AbMA==", - "dependencies": { - "async": "^2.6.2", - "debug": "^3.1.1", - "mkdirp": "^0.5.5" - }, - "engines": { - "node": ">= 0.12.0" - } - }, - "node_modules/portfinder/node_modules/debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "dependencies": { - "ms": "^2.1.1" - } - }, - "node_modules/portfinder/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/portfinder/node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" - }, - "node_modules/posix-character-classes": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz", - "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/postcss": { - "version": "7.0.35", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.35.tgz", - "integrity": "sha512-3QT8bBJeX/S5zKTTjTCIjRF3If4avAT6kqxcASlTWEtAFCb9NH0OUxNDfgZSWdP5fJnBYCMEWkIFfWeugjzYMg==", - "dependencies": { - "chalk": "^2.4.2", - "source-map": "^0.6.1", - "supports-color": "^6.1.0" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/postcss-calc": { - "version": "7.0.5", - "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-7.0.5.tgz", - "integrity": "sha512-1tKHutbGtLtEZF6PT4JSihCHfIVldU72mZ8SdZHIYriIZ9fh9k9aWSppaT8rHsyI3dX+KSR+W+Ix9BMY3AODrg==", - "dependencies": { - "postcss": "^7.0.27", - "postcss-selector-parser": "^6.0.2", - "postcss-value-parser": "^4.0.2" - } - }, - "node_modules/postcss-colormin": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-4.0.3.tgz", - "integrity": "sha512-WyQFAdDZpExQh32j0U0feWisZ0dmOtPl44qYmJKkq9xFWY3p+4qnRzCHeNrkeRhwPHz9bQ3mo0/yVkaply0MNw==", - "dependencies": { - "browserslist": "^4.0.0", - "color": "^3.0.0", - "has": "^1.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-colormin/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-convert-values": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-4.0.1.tgz", - "integrity": "sha512-Kisdo1y77KUC0Jmn0OXU/COOJbzM8cImvw1ZFsBgBgMgb1iL23Zs/LXRe3r+EZqM3vGYKdQ2YJVQ5VkJI+zEJQ==", - "dependencies": { - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-convert-values/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-discard-comments": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-4.0.2.tgz", - "integrity": "sha512-RJutN259iuRf3IW7GZyLM5Sw4GLTOH8FmsXBnv8Ab/Tc2k4SR4qbV4DNbyyY4+Sjo362SyDmW2DQ7lBSChrpkg==", - "dependencies": { - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-discard-duplicates": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-4.0.2.tgz", - "integrity": "sha512-ZNQfR1gPNAiXZhgENFfEglF93pciw0WxMkJeVmw8eF+JZBbMD7jp6C67GqJAXVZP2BWbOztKfbsdmMp/k8c6oQ==", - "dependencies": { - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-discard-empty": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-4.0.1.tgz", - "integrity": "sha512-B9miTzbznhDjTfjvipfHoqbWKwd0Mj+/fL5s1QOz06wufguil+Xheo4XpOnc4NqKYBCNqqEzgPv2aPBIJLox0w==", - "dependencies": { - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-discard-overridden": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-4.0.1.tgz", - "integrity": "sha512-IYY2bEDD7g1XM1IDEsUT4//iEYCxAmP5oDSFMVU/JVvT7gh+l4fmjciLqGgwjdWpQIdb0Che2VX00QObS5+cTg==", - "dependencies": { - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-load-config": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-2.1.2.tgz", - "integrity": "sha512-/rDeGV6vMUo3mwJZmeHfEDvwnTKKqQ0S7OHUi/kJvvtx3aWtyWG2/0ZWnzCt2keEclwN6Tf0DST2v9kITdOKYw==", - "dependencies": { - "cosmiconfig": "^5.0.0", - "import-cwd": "^2.0.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/postcss-loader": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-3.0.0.tgz", - "integrity": "sha512-cLWoDEY5OwHcAjDnkyRQzAXfs2jrKjXpO/HQFcc5b5u/r7aa471wdmChmwfnv7x2u840iat/wi0lQ5nbRgSkUA==", - "dependencies": { - "loader-utils": "^1.1.0", - "postcss": "^7.0.0", - "postcss-load-config": "^2.0.0", - "schema-utils": "^1.0.0" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/postcss-loader/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/postcss-merge-longhand": { - "version": "4.0.11", - "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-4.0.11.tgz", - "integrity": "sha512-alx/zmoeXvJjp7L4mxEMjh8lxVlDFX1gqWHzaaQewwMZiVhLo42TEClKaeHbRf6J7j82ZOdTJ808RtN0ZOZwvw==", - "dependencies": { - "css-color-names": "0.0.4", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0", - "stylehacks": "^4.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-merge-longhand/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-merge-rules": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-4.0.3.tgz", - "integrity": "sha512-U7e3r1SbvYzO0Jr3UT/zKBVgYYyhAz0aitvGIYOYK5CPmkNih+WDSsS5tvPrJ8YMQYlEMvsZIiqmn7HdFUaeEQ==", - "dependencies": { - "browserslist": "^4.0.0", - "caniuse-api": "^3.0.0", - "cssnano-util-same-parent": "^4.0.0", - "postcss": "^7.0.0", - "postcss-selector-parser": "^3.0.0", - "vendors": "^1.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-merge-rules/node_modules/postcss-selector-parser": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", - "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", - "dependencies": { - "dot-prop": "^5.2.0", - "indexes-of": "^1.0.1", - "uniq": "^1.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/postcss-minify-font-values": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-4.0.2.tgz", - "integrity": "sha512-j85oO6OnRU9zPf04+PZv1LYIYOprWm6IA6zkXkrJXyRveDEuQggG6tvoy8ir8ZwjLxLuGfNkCZEQG7zan+Hbtg==", - "dependencies": { - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-minify-font-values/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-minify-gradients": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-4.0.2.tgz", - "integrity": "sha512-qKPfwlONdcf/AndP1U8SJ/uzIJtowHlMaSioKzebAXSG4iJthlWC9iSWznQcX4f66gIWX44RSA841HTHj3wK+Q==", - "dependencies": { - "cssnano-util-get-arguments": "^4.0.0", - "is-color-stop": "^1.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-minify-gradients/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-minify-params": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-4.0.2.tgz", - "integrity": "sha512-G7eWyzEx0xL4/wiBBJxJOz48zAKV2WG3iZOqVhPet/9geefm/Px5uo1fzlHu+DOjT+m0Mmiz3jkQzVHe6wxAWg==", - "dependencies": { - "alphanum-sort": "^1.0.0", - "browserslist": "^4.0.0", - "cssnano-util-get-arguments": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0", - "uniqs": "^2.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-minify-params/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-minify-selectors": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-4.0.2.tgz", - "integrity": "sha512-D5S1iViljXBj9kflQo4YutWnJmwm8VvIsU1GeXJGiG9j8CIg9zs4voPMdQDUmIxetUOh60VilsNzCiAFTOqu3g==", - "dependencies": { - "alphanum-sort": "^1.0.0", - "has": "^1.0.0", - "postcss": "^7.0.0", - "postcss-selector-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-minify-selectors/node_modules/postcss-selector-parser": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", - "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", - "dependencies": { - "dot-prop": "^5.2.0", - "indexes-of": "^1.0.1", - "uniq": "^1.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/postcss-modules-extract-imports": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-2.0.0.tgz", - "integrity": "sha512-LaYLDNS4SG8Q5WAWqIJgdHPJrDDr/Lv775rMBFUbgjTz6j34lUznACHcdRWroPvXANP2Vj7yNK57vp9eFqzLWQ==", - "dependencies": { - "postcss": "^7.0.5" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/postcss-modules-local-by-default": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-2.0.6.tgz", - "integrity": "sha512-oLUV5YNkeIBa0yQl7EYnxMgy4N6noxmiwZStaEJUSe2xPMcdNc8WmBQuQCx18H5psYbVxz8zoHk0RAAYZXP9gA==", - "dependencies": { - "postcss": "^7.0.6", - "postcss-selector-parser": "^6.0.0", - "postcss-value-parser": "^3.3.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/postcss-modules-local-by-default/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-modules-scope": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-2.2.0.tgz", - "integrity": "sha512-YyEgsTMRpNd+HmyC7H/mh3y+MeFWevy7V1evVhJWewmMbjDHIbZbOXICC2y+m1xI1UVfIT1HMW/O04Hxyu9oXQ==", - "dependencies": { - "postcss": "^7.0.6", - "postcss-selector-parser": "^6.0.0" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/postcss-modules-values": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-2.0.0.tgz", - "integrity": "sha512-Ki7JZa7ff1N3EIMlPnGTZfUMe69FFwiQPnVSXC9mnn3jozCRBYIxiZd44yJOV2AmabOo4qFf8s0dC/+lweG7+w==", - "dependencies": { - "icss-replace-symbols": "^1.1.0", - "postcss": "^7.0.6" - } - }, - "node_modules/postcss-normalize-charset": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-4.0.1.tgz", - "integrity": "sha512-gMXCrrlWh6G27U0hF3vNvR3w8I1s2wOBILvA87iNXaPvSNo5uZAMYsZG7XjCUf1eVxuPfyL4TJ7++SGZLc9A3g==", - "dependencies": { - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-display-values": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-4.0.2.tgz", - "integrity": "sha512-3F2jcsaMW7+VtRMAqf/3m4cPFhPD3EFRgNs18u+k3lTJJlVe7d0YPO+bnwqo2xg8YiRpDXJI2u8A0wqJxMsQuQ==", - "dependencies": { - "cssnano-util-get-match": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-display-values/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-normalize-positions": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-4.0.2.tgz", - "integrity": "sha512-Dlf3/9AxpxE+NF1fJxYDeggi5WwV35MXGFnnoccP/9qDtFrTArZ0D0R+iKcg5WsUd8nUYMIl8yXDCtcrT8JrdA==", - "dependencies": { - "cssnano-util-get-arguments": "^4.0.0", - "has": "^1.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-positions/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-normalize-repeat-style": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-4.0.2.tgz", - "integrity": "sha512-qvigdYYMpSuoFs3Is/f5nHdRLJN/ITA7huIoCyqqENJe9PvPmLhNLMu7QTjPdtnVf6OcYYO5SHonx4+fbJE1+Q==", - "dependencies": { - "cssnano-util-get-arguments": "^4.0.0", - "cssnano-util-get-match": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-repeat-style/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-normalize-string": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-4.0.2.tgz", - "integrity": "sha512-RrERod97Dnwqq49WNz8qo66ps0swYZDSb6rM57kN2J+aoyEAJfZ6bMx0sx/F9TIEX0xthPGCmeyiam/jXif0eA==", - "dependencies": { - "has": "^1.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-string/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-normalize-timing-functions": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-4.0.2.tgz", - "integrity": "sha512-acwJY95edP762e++00Ehq9L4sZCEcOPyaHwoaFOhIwWCDfik6YvqsYNxckee65JHLKzuNSSmAdxwD2Cud1Z54A==", - "dependencies": { - "cssnano-util-get-match": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-timing-functions/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-normalize-unicode": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-4.0.1.tgz", - "integrity": "sha512-od18Uq2wCYn+vZ/qCOeutvHjB5jm57ToxRaMeNuf0nWVHaP9Hua56QyMF6fs/4FSUnVIw0CBPsU0K4LnBPwYwg==", - "dependencies": { - "browserslist": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-unicode/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-normalize-url": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-4.0.1.tgz", - "integrity": "sha512-p5oVaF4+IHwu7VpMan/SSpmpYxcJMtkGppYf0VbdH5B6hN8YNmVyJLuY9FmLQTzY3fag5ESUUHDqM+heid0UVA==", - "dependencies": { - "is-absolute-url": "^2.0.0", - "normalize-url": "^3.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-url/node_modules/normalize-url": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-3.3.0.tgz", - "integrity": "sha512-U+JJi7duF1o+u2pynbp2zXDW2/PADgC30f0GsHZtRh+HOcXHnw137TrNlyxxRvWW5fjKd3bcLHPxofWuCjaeZg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/postcss-normalize-url/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-normalize-whitespace": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-4.0.2.tgz", - "integrity": "sha512-tO8QIgrsI3p95r8fyqKV+ufKlSHh9hMJqACqbv2XknufqEDhDvbguXGBBqxw9nsQoXWf0qOqppziKJKHMD4GtA==", - "dependencies": { - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-whitespace/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-ordered-values": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-4.1.2.tgz", - "integrity": "sha512-2fCObh5UanxvSxeXrtLtlwVThBvHn6MQcu4ksNT2tsaV2Fg76R2CV98W7wNSlX+5/pFwEyaDwKLLoEV7uRybAw==", - "dependencies": { - "cssnano-util-get-arguments": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-ordered-values/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-reduce-initial": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-4.0.3.tgz", - "integrity": "sha512-gKWmR5aUulSjbzOfD9AlJiHCGH6AEVLaM0AV+aSioxUDd16qXP1PCh8d1/BGVvpdWn8k/HiK7n6TjeoXN1F7DA==", - "dependencies": { - "browserslist": "^4.0.0", - "caniuse-api": "^3.0.0", - "has": "^1.0.0", - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-reduce-transforms": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-4.0.2.tgz", - "integrity": "sha512-EEVig1Q2QJ4ELpJXMZR8Vt5DQx8/mo+dGWSR7vWXqcob2gQLyQGsionYcGKATXvQzMPn6DSN1vTN7yFximdIAg==", - "dependencies": { - "cssnano-util-get-match": "^4.0.0", - "has": "^1.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-reduce-transforms/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-safe-parser": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-safe-parser/-/postcss-safe-parser-4.0.2.tgz", - "integrity": "sha512-Uw6ekxSWNLCPesSv/cmqf2bY/77z11O7jZGPax3ycZMFU/oi2DMH9i89AdHc1tRwFg/arFoEwX0IS3LCUxJh1g==", - "dependencies": { - "postcss": "^7.0.26" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/postcss-selector-parser": { - "version": "6.0.4", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.4.tgz", - "integrity": "sha512-gjMeXBempyInaBqpp8gODmwZ52WaYsVOsfr4L4lDQ7n3ncD6mEyySiDtgzCT+NYC0mmeOLvtsF8iaEf0YT6dBw==", - "dependencies": { - "cssesc": "^3.0.0", - "indexes-of": "^1.0.1", - "uniq": "^1.0.1", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/postcss-svgo": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-4.0.3.tgz", - "integrity": "sha512-NoRbrcMWTtUghzuKSoIm6XV+sJdvZ7GZSc3wdBN0W19FTtp2ko8NqLsgoh/m9CzNhU3KLPvQmjIwtaNFkaFTvw==", - "dependencies": { - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0", - "svgo": "^1.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-svgo/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-unique-selectors": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-4.0.1.tgz", - "integrity": "sha512-+JanVaryLo9QwZjKrmJgkI4Fn8SBgRO6WXQBJi7KiAVPlmxikB5Jzc4EvXMT2H0/m0RjrVVm9rGNhZddm/8Spg==", - "dependencies": { - "alphanum-sort": "^1.0.0", - "postcss": "^7.0.0", - "uniqs": "^2.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-value-parser": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.1.0.tgz", - "integrity": "sha512-97DXOFbQJhk71ne5/Mt6cOu6yxsSfM0QGQyl0L25Gca4yGWEGJaig7l7gbCX623VqTBNGLRLaVUCnNkcedlRSQ==" - }, - "node_modules/postcss/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/postcss/node_modules/supports-color": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", - "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/prepend-http": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz", - "integrity": "sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc=", - "engines": { - "node": ">=4" - } - }, - "node_modules/prettier": { - "version": "1.19.1", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-1.19.1.tgz", - "integrity": "sha512-s7PoyDv/II1ObgQunCbB9PdLmUcBZcnWOcxDh7O0N/UwDEsHyqkW+Qh28jW+mVuCdx7gLB0BotYI1Y6uI9iyew==", - "optional": true, - "bin": { - "prettier": "bin-prettier.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/pretty-error": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-2.1.2.tgz", - "integrity": "sha512-EY5oDzmsX5wvuynAByrmY0P0hcp+QpnAKbJng2A2MPjVKXCxrDSUkzghVJ4ZGPIv+JC4gX8fPUWscC0RtjsWGw==", - "dependencies": { - "lodash": "^4.17.20", - "renderkid": "^2.0.4" - } - }, - "node_modules/pretty-time": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/pretty-time/-/pretty-time-1.1.0.tgz", - "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==", - "engines": { - "node": ">=4" - } - }, - "node_modules/prismjs": { - "version": "1.23.0", - "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.23.0.tgz", - "integrity": "sha512-c29LVsqOaLbBHuIbsTxaKENh1N2EQBOHaWv7gkHN4dgRbxSREqDnDbtFJYdpPauS4YCplMSNCABQ6Eeor69bAA==", - "dependencies": { - "clipboard": "^2.0.0" - }, - "optionalDependencies": { - "clipboard": "^2.0.0" - } - }, - "node_modules/process": { - "version": "0.11.10", - "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", - "integrity": "sha1-czIwDoQBYb2j5podHZGn1LwW8YI=", - "engines": { - "node": ">= 0.6.0" - } - }, - "node_modules/process-nextick-args": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", - "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" - }, - "node_modules/promise": { - "version": "7.3.1", - "resolved": "https://registry.npmjs.org/promise/-/promise-7.3.1.tgz", - "integrity": "sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg==", - "dependencies": { - "asap": "~2.0.3" - } - }, - "node_modules/promise-inflight": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/promise-inflight/-/promise-inflight-1.0.1.tgz", - "integrity": "sha1-mEcocL8igTL8vdhoEputEsPAKeM=" - }, - "node_modules/proxy-addr": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.6.tgz", - "integrity": "sha512-dh/frvCBVmSsDYzw6n926jv974gddhkFPfiN8hPOi30Wax25QZyZEGveluCgliBnqmuM+UJmBErbAUFIoDbjOw==", - "dependencies": { - "forwarded": "~0.1.2", - "ipaddr.js": "1.9.1" - }, - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/prr": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/prr/-/prr-1.0.1.tgz", - "integrity": "sha1-0/wRS6BplaRexok/SEzrHXj19HY=" - }, - "node_modules/pseudomap": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/pseudomap/-/pseudomap-1.0.2.tgz", - "integrity": "sha1-8FKijacOYYkX7wqKw0wa5aaChrM=" - }, - "node_modules/psl": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz", - "integrity": "sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ==" - }, - "node_modules/public-encrypt": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/public-encrypt/-/public-encrypt-4.0.3.tgz", - "integrity": "sha512-zVpa8oKZSz5bTMTFClc1fQOnyyEzpl5ozpi1B5YcvBrdohMjH2rfsBtyXcuNuwjsDIXmBYlF2N5FlJYhR29t8Q==", - "dependencies": { - "bn.js": "^4.1.0", - "browserify-rsa": "^4.0.0", - "create-hash": "^1.1.0", - "parse-asn1": "^5.0.0", - "randombytes": "^2.0.1", - "safe-buffer": "^5.1.2" - } - }, - "node_modules/public-encrypt/node_modules/bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - }, - "node_modules/pug": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/pug/-/pug-3.0.2.tgz", - "integrity": "sha512-bp0I/hiK1D1vChHh6EfDxtndHji55XP/ZJKwsRqrz6lRia6ZC2OZbdAymlxdVFwd1L70ebrVJw4/eZ79skrIaw==", - "dependencies": { - "pug-code-gen": "^3.0.2", - "pug-filters": "^4.0.0", - "pug-lexer": "^5.0.1", - "pug-linker": "^4.0.0", - "pug-load": "^3.0.0", - "pug-parser": "^6.0.0", - "pug-runtime": "^3.0.1", - "pug-strip-comments": "^2.0.0" - } - }, - "node_modules/pug-attrs": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pug-attrs/-/pug-attrs-3.0.0.tgz", - "integrity": "sha512-azINV9dUtzPMFQktvTXciNAfAuVh/L/JCl0vtPCwvOA21uZrC08K/UnmrL+SXGEVc1FwzjW62+xw5S/uaLj6cA==", - "dependencies": { - "constantinople": "^4.0.1", - "js-stringify": "^1.0.2", - "pug-runtime": "^3.0.0" - } - }, - "node_modules/pug-code-gen": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/pug-code-gen/-/pug-code-gen-3.0.2.tgz", - "integrity": "sha512-nJMhW16MbiGRiyR4miDTQMRWDgKplnHyeLvioEJYbk1RsPI3FuA3saEP8uwnTb2nTJEKBU90NFVWJBk4OU5qyg==", - "dependencies": { - "constantinople": "^4.0.1", - "doctypes": "^1.1.0", - "js-stringify": "^1.0.2", - "pug-attrs": "^3.0.0", - "pug-error": "^2.0.0", - "pug-runtime": "^3.0.0", - "void-elements": "^3.1.0", - "with": "^7.0.0" - } - }, - "node_modules/pug-error": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/pug-error/-/pug-error-2.0.0.tgz", - "integrity": "sha512-sjiUsi9M4RAGHktC1drQfCr5C5eriu24Lfbt4s+7SykztEOwVZtbFk1RRq0tzLxcMxMYTBR+zMQaG07J/btayQ==" - }, - "node_modules/pug-filters": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/pug-filters/-/pug-filters-4.0.0.tgz", - "integrity": "sha512-yeNFtq5Yxmfz0f9z2rMXGw/8/4i1cCFecw/Q7+D0V2DdtII5UvqE12VaZ2AY7ri6o5RNXiweGH79OCq+2RQU4A==", - "dependencies": { - "constantinople": "^4.0.1", - "jstransformer": "1.0.0", - "pug-error": "^2.0.0", - "pug-walk": "^2.0.0", - "resolve": "^1.15.1" - } - }, - "node_modules/pug-lexer": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/pug-lexer/-/pug-lexer-5.0.1.tgz", - "integrity": "sha512-0I6C62+keXlZPZkOJeVam9aBLVP2EnbeDw3An+k0/QlqdwH6rv8284nko14Na7c0TtqtogfWXcRoFE4O4Ff20w==", - "dependencies": { - "character-parser": "^2.2.0", - "is-expression": "^4.0.0", - "pug-error": "^2.0.0" - } - }, - "node_modules/pug-linker": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/pug-linker/-/pug-linker-4.0.0.tgz", - "integrity": "sha512-gjD1yzp0yxbQqnzBAdlhbgoJL5qIFJw78juN1NpTLt/mfPJ5VgC4BvkoD3G23qKzJtIIXBbcCt6FioLSFLOHdw==", - "dependencies": { - "pug-error": "^2.0.0", - "pug-walk": "^2.0.0" - } - }, - "node_modules/pug-load": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pug-load/-/pug-load-3.0.0.tgz", - "integrity": "sha512-OCjTEnhLWZBvS4zni/WUMjH2YSUosnsmjGBB1An7CsKQarYSWQ0GCVyd4eQPMFJqZ8w9xgs01QdiZXKVjk92EQ==", - "dependencies": { - "object-assign": "^4.1.1", - "pug-walk": "^2.0.0" - } - }, - "node_modules/pug-parser": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/pug-parser/-/pug-parser-6.0.0.tgz", - "integrity": "sha512-ukiYM/9cH6Cml+AOl5kETtM9NR3WulyVP2y4HOU45DyMim1IeP/OOiyEWRr6qk5I5klpsBnbuHpwKmTx6WURnw==", - "dependencies": { - "pug-error": "^2.0.0", - "token-stream": "1.0.0" - } - }, - "node_modules/pug-plain-loader": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/pug-plain-loader/-/pug-plain-loader-1.1.0.tgz", - "integrity": "sha512-1nYgIJLaahRuHJHhzSPODV44aZfb00bO7kiJiMkke6Hj4SVZftuvx6shZ4BOokk50dJc2RSFqNUBOlus0dniFQ==", - "dependencies": { - "loader-utils": "^1.1.0" - } - }, - "node_modules/pug-runtime": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/pug-runtime/-/pug-runtime-3.0.1.tgz", - "integrity": "sha512-L50zbvrQ35TkpHwv0G6aLSuueDRwc/97XdY8kL3tOT0FmhgG7UypU3VztfV/LATAvmUfYi4wNxSajhSAeNN+Kg==" - }, - "node_modules/pug-strip-comments": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/pug-strip-comments/-/pug-strip-comments-2.0.0.tgz", - "integrity": "sha512-zo8DsDpH7eTkPHCXFeAk1xZXJbyoTfdPlNR0bK7rpOMuhBYb0f5qUVCO1xlsitYd3w5FQTK7zpNVKb3rZoUrrQ==", - "dependencies": { - "pug-error": "^2.0.0" - } - }, - "node_modules/pug-walk": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/pug-walk/-/pug-walk-2.0.0.tgz", - "integrity": "sha512-yYELe9Q5q9IQhuvqsZNwA5hfPkMJ8u92bQLIMcsMxf/VADjNtEYptU+inlufAFYcWdHlwNfZOEnOOQrZrcyJCQ==" - }, - "node_modules/pump": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", - "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", - "dependencies": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "node_modules/pumpify": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/pumpify/-/pumpify-1.5.1.tgz", - "integrity": "sha512-oClZI37HvuUJJxSKKrC17bZ9Cu0ZYhEAGPsPUy9KlMUmv9dKX2o77RUmq7f3XjIxbwyGwYzbzQ1L2Ks8sIradQ==", - "dependencies": { - "duplexify": "^3.6.0", - "inherits": "^2.0.3", - "pump": "^2.0.0" - } - }, - "node_modules/pumpify/node_modules/pump": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/pump/-/pump-2.0.1.tgz", - "integrity": "sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==", - "dependencies": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "node_modules/punycode": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", - "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", - "engines": { - "node": ">=6" - } - }, - "node_modules/pupa": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/pupa/-/pupa-2.1.1.tgz", - "integrity": "sha512-l1jNAspIBSFqbT+y+5FosojNpVpF94nlI+wDUpqP9enwOTfHx9f0gh5nB96vl+6yTpsJsypeNrwfzPrKuHB41A==", - "dependencies": { - "escape-goat": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/q": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/q/-/q-1.5.1.tgz", - "integrity": "sha1-fjL3W0E4EpHQRhHxvxQQmsAGUdc=", - "engines": { - "node": ">=0.6.0", - "teleport": ">=0.2.0" - } - }, - "node_modules/qs": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz", - "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ==", - "engines": { - "node": ">=0.6" - } - }, - "node_modules/query-string": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/query-string/-/query-string-5.1.1.tgz", - "integrity": "sha512-gjWOsm2SoGlgLEdAGt7a6slVOk9mGiXmPFMqrEhLQ68rhQuBnpfs3+EmlvqKyxnCo9/PPlF+9MtY02S1aFg+Jw==", - "dependencies": { - "decode-uri-component": "^0.2.0", - "object-assign": "^4.1.0", - "strict-uri-encode": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/querystring": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.1.tgz", - "integrity": "sha512-wkvS7mL/JMugcup3/rMitHmd9ecIGd2lhFhK9N3UUQ450h66d1r3Y9nvXzQAW1Lq+wyx61k/1pfKS5KuKiyEbg==", - "engines": { - "node": ">=0.4.x" - } - }, - "node_modules/querystring-es3": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/querystring-es3/-/querystring-es3-0.2.1.tgz", - "integrity": "sha1-nsYfeQSYdXB9aUFFlv2Qek1xHnM=", - "engines": { - "node": ">=0.4.x" - } - }, - "node_modules/querystringify": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", - "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==" - }, - "node_modules/randombytes": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", - "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", - "dependencies": { - "safe-buffer": "^5.1.0" - } - }, - "node_modules/randomfill": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/randomfill/-/randomfill-1.0.4.tgz", - "integrity": "sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw==", - "dependencies": { - "randombytes": "^2.0.5", - "safe-buffer": "^5.1.0" - } - }, - "node_modules/range-parser": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/raw-body": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.4.0.tgz", - "integrity": "sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q==", - "dependencies": { - "bytes": "3.1.0", - "http-errors": "1.7.2", - "iconv-lite": "0.4.24", - "unpipe": "1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/raw-body/node_modules/bytes": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz", - "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/rc": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", - "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", - "dependencies": { - "deep-extend": "^0.6.0", - "ini": "~1.3.0", - "minimist": "^1.2.0", - "strip-json-comments": "~2.0.1" - }, - "bin": { - "rc": "cli.js" - } - }, - "node_modules/readable-stream": { - "version": "2.3.7", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", - "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", - "dependencies": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "node_modules/readdirp": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-2.2.1.tgz", - "integrity": "sha512-1JU/8q+VgFZyxwrJ+SVIOsh+KywWGpds3NTqikiKpDMZWScmAYyKIgqkO+ARvNWJfXeXR1zxz7aHF4u4CyH6vQ==", - "dependencies": { - "graceful-fs": "^4.1.11", - "micromatch": "^3.1.10", - "readable-stream": "^2.0.2" - }, - "engines": { - "node": ">=0.10" - } - }, - "node_modules/reduce": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/reduce/-/reduce-1.0.2.tgz", - "integrity": "sha512-xX7Fxke/oHO5IfZSk77lvPa/7bjMh9BuCk4OOoX5XTXrM7s0Z+MkPfSDfz0q7r91BhhGSs8gii/VEN/7zhCPpQ==", - "dependencies": { - "object-keys": "^1.1.0" - } - }, - "node_modules/regenerate": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", - "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==" - }, - "node_modules/regenerate-unicode-properties": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-8.2.0.tgz", - "integrity": "sha512-F9DjY1vKLo/tPePDycuH3dn9H1OTPIkVD9Kz4LODu+F2C75mgjAJ7x/gwy6ZcSNRAAkhNlJSOHRe8k3p+K9WhA==", - "dependencies": { - "regenerate": "^1.4.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/regenerator-runtime": { - "version": "0.13.7", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.7.tgz", - "integrity": "sha512-a54FxoJDIr27pgf7IgeQGxmqUNYrcV338lf/6gH456HZ/PhX+5BcwHXG9ajESmwe6WRO0tAzRUrRmNONWgkrew==" - }, - "node_modules/regenerator-transform": { - "version": "0.14.5", - "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.14.5.tgz", - "integrity": "sha512-eOf6vka5IO151Jfsw2NO9WpGX58W6wWmefK3I1zEGr0lOD0u8rwPaNqQL1aRxUaxLeKO3ArNh3VYg1KbaD+FFw==", - "dependencies": { - "@babel/runtime": "^7.8.4" - } - }, - "node_modules/regex-not": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz", - "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==", - "dependencies": { - "extend-shallow": "^3.0.2", - "safe-regex": "^1.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/regex-not/node_modules/extend-shallow": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=", - "dependencies": { - "assign-symbols": "^1.0.0", - "is-extendable": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/regex-not/node_modules/is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "dependencies": { - "is-plain-object": "^2.0.4" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/regexp.prototype.flags": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.3.1.tgz", - "integrity": "sha512-JiBdRBq91WlY7uRJ0ds7R+dU02i6LKi8r3BuQhNXn+kmeLN+EfHhfjqMRis1zJxnlu88hq/4dx0P2OP3APRTOA==", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/regexpu-core": { - "version": "4.7.1", - "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-4.7.1.tgz", - "integrity": "sha512-ywH2VUraA44DZQuRKzARmw6S66mr48pQVva4LBeRhcOltJ6hExvWly5ZjFLYo67xbIxb6W1q4bAGtgfEl20zfQ==", - "dependencies": { - "regenerate": "^1.4.0", - "regenerate-unicode-properties": "^8.2.0", - "regjsgen": "^0.5.1", - "regjsparser": "^0.6.4", - "unicode-match-property-ecmascript": "^1.0.4", - "unicode-match-property-value-ecmascript": "^1.2.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/registry-auth-token": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-4.2.1.tgz", - "integrity": "sha512-6gkSb4U6aWJB4SF2ZvLb76yCBjcvufXBqvvEx1HbmKPkutswjW1xNVRY0+daljIYRbogN7O0etYSlbiaEQyMyw==", - "dependencies": { - "rc": "^1.2.8" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/registry-url": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-5.1.0.tgz", - "integrity": "sha512-8acYXXTI0AkQv6RAOjE3vOaIXZkT9wo4LOFbBKYQEEnnMNBpKqdUrI6S4NT0KPIo/WVvJ5tE/X5LF/TQUf0ekw==", - "dependencies": { - "rc": "^1.2.8" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/regjsgen": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.5.2.tgz", - "integrity": "sha512-OFFT3MfrH90xIW8OOSyUrk6QHD5E9JOTeGodiJeBS3J6IwlgzJMNE/1bZklWz5oTg+9dCMyEetclvCVXOPoN3A==" - }, - "node_modules/regjsparser": { - "version": "0.6.9", - "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.6.9.tgz", - "integrity": "sha512-ZqbNRz1SNjLAiYuwY0zoXW8Ne675IX5q+YHioAGbCw4X96Mjl2+dcX9B2ciaeyYjViDAfvIjFpQjJgLttTEERQ==", - "dependencies": { - "jsesc": "~0.5.0" - }, - "bin": { - "regjsparser": "bin/parser" - } - }, - "node_modules/regjsparser/node_modules/jsesc": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", - "integrity": "sha1-597mbjXW/Bb3EP6R1c9p9w8IkR0=", - "bin": { - "jsesc": "bin/jsesc" - } - }, - "node_modules/relateurl": { - "version": "0.2.7", - "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz", - "integrity": "sha1-VNvzd+UUQKypCkzSdGANP/LYiKk=", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/remove-trailing-separator": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz", - "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8=" - }, - "node_modules/renderkid": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-2.0.5.tgz", - "integrity": "sha512-ccqoLg+HLOHq1vdfYNm4TBeaCDIi1FLt3wGojTDSvdewUv65oTmI3cnT2E4hRjl1gzKZIPK+KZrXzlUYKnR+vQ==", - "dependencies": { - "css-select": "^2.0.2", - "dom-converter": "^0.2", - "htmlparser2": "^3.10.1", - "lodash": "^4.17.20", - "strip-ansi": "^3.0.0" - } - }, - "node_modules/renderkid/node_modules/css-select": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-2.1.0.tgz", - "integrity": "sha512-Dqk7LQKpwLoH3VovzZnkzegqNSuAziQyNZUcrdDM401iY+R5NkGBXGmtO05/yaXQziALuPogeG0b7UAgjnTJTQ==", - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^3.2.1", - "domutils": "^1.7.0", - "nth-check": "^1.0.2" - } - }, - "node_modules/renderkid/node_modules/css-what": { - "version": "3.4.2", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-3.4.2.tgz", - "integrity": "sha512-ACUm3L0/jiZTqfzRM3Hi9Q8eZqd6IK37mMWPLz9PJxkLWllYeRf+EHUSHYEtFop2Eqytaq1FizFVh7XfBnXCDQ==", - "engines": { - "node": ">= 6" - } - }, - "node_modules/renderkid/node_modules/dom-serializer": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz", - "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==", - "dependencies": { - "domelementtype": "^2.0.1", - "entities": "^2.0.0" - } - }, - "node_modules/renderkid/node_modules/dom-serializer/node_modules/domelementtype": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.2.0.tgz", - "integrity": "sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A==" - }, - "node_modules/renderkid/node_modules/domelementtype": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz", - "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==" - }, - "node_modules/renderkid/node_modules/domhandler": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-2.4.2.tgz", - "integrity": "sha512-JiK04h0Ht5u/80fdLMCEmV4zkNh2BcoMFBmZ/91WtYZ8qVXSKjiw7fXMgFPnHcSZgOo3XdinHvmnDUeMf5R4wA==", - "dependencies": { - "domelementtype": "1" - } - }, - "node_modules/renderkid/node_modules/domutils": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.7.0.tgz", - "integrity": "sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg==", - "dependencies": { - "dom-serializer": "0", - "domelementtype": "1" - } - }, - "node_modules/renderkid/node_modules/htmlparser2": { - "version": "3.10.1", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-3.10.1.tgz", - "integrity": "sha512-IgieNijUMbkDovyoKObU1DUhm1iwNYE/fuifEoEHfd1oZKZDaONBSkal7Y01shxsM49R4XaMdGez3WnF9UfiCQ==", - "dependencies": { - "domelementtype": "^1.3.1", - "domhandler": "^2.3.0", - "domutils": "^1.5.1", - "entities": "^1.1.1", - "inherits": "^2.0.1", - "readable-stream": "^3.1.1" - } - }, - "node_modules/renderkid/node_modules/htmlparser2/node_modules/entities": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.2.tgz", - "integrity": "sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w==" - }, - "node_modules/renderkid/node_modules/nth-check": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz", - "integrity": "sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==", - "dependencies": { - "boolbase": "~1.0.0" - } - }, - "node_modules/renderkid/node_modules/readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/repeat-element": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.4.tgz", - "integrity": "sha512-LFiNfRcSu7KK3evMyYOuCzv3L10TW7yC1G2/+StMjK8Y6Vqd2MG7r/Qjw4ghtuCOjFvlnms/iMmLqpvW/ES/WQ==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/repeat-string": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", - "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=", - "engines": { - "node": ">=0.10" - } - }, - "node_modules/request": { - "version": "2.88.2", - "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", - "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==", - "dependencies": { - "aws-sign2": "~0.7.0", - "aws4": "^1.8.0", - "caseless": "~0.12.0", - "combined-stream": "~1.0.6", - "extend": "~3.0.2", - "forever-agent": "~0.6.1", - "form-data": "~2.3.2", - "har-validator": "~5.1.3", - "http-signature": "~1.2.0", - "is-typedarray": "~1.0.0", - "isstream": "~0.1.2", - "json-stringify-safe": "~5.0.1", - "mime-types": "~2.1.19", - "oauth-sign": "~0.9.0", - "performance-now": "^2.1.0", - "qs": "~6.5.2", - "safe-buffer": "^5.1.2", - "tough-cookie": "~2.5.0", - "tunnel-agent": "^0.6.0", - "uuid": "^3.3.2" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/request/node_modules/qs": { - "version": "6.5.2", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", - "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==", - "engines": { - "node": ">=0.6" - } - }, - "node_modules/require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/require-main-filename": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", - "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==" - }, - "node_modules/requires-port": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", - "integrity": "sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8=" - }, - "node_modules/resolve": { - "version": "1.20.0", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.20.0.tgz", - "integrity": "sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A==", - "dependencies": { - "is-core-module": "^2.2.0", - "path-parse": "^1.0.6" - } - }, - "node_modules/resolve-cwd": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-2.0.0.tgz", - "integrity": "sha1-AKn3OHVW4nA46uIyyqNypqWbZlo=", - "dependencies": { - "resolve-from": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/resolve-from": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-3.0.0.tgz", - "integrity": "sha1-six699nWiBvItuZTM17rywoYh0g=", - "engines": { - "node": ">=4" - } - }, - "node_modules/resolve-url": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz", - "integrity": "sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo=" - }, - "node_modules/responselike": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz", - "integrity": "sha1-kYcg7ztjHFZCvgaPFa3lpG9Loec=", - "dependencies": { - "lowercase-keys": "^1.0.0" - } - }, - "node_modules/ret": { - "version": "0.1.15", - "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz", - "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==", - "engines": { - "node": ">=0.12" - } - }, - "node_modules/retry": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz", - "integrity": "sha1-G0KmJmoh8HQh0bC1S33BZ7AcATs=", - "engines": { - "node": ">= 4" - } - }, - "node_modules/rgb-regex": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/rgb-regex/-/rgb-regex-1.0.1.tgz", - "integrity": "sha1-wODWiC3w4jviVKR16O3UGRX+rrE=" - }, - "node_modules/rgba-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/rgba-regex/-/rgba-regex-1.0.0.tgz", - "integrity": "sha1-QzdOLiyglosO8VI0YLfXMP8i7rM=" - }, - "node_modules/rimraf": { - "version": "2.7.1", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz", - "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - } - }, - "node_modules/ripemd160": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz", - "integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==", - "dependencies": { - "hash-base": "^3.0.0", - "inherits": "^2.0.1" - } - }, - "node_modules/run-queue": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/run-queue/-/run-queue-1.0.3.tgz", - "integrity": "sha1-6Eg5bwV9Ij8kOGkkYY4laUFh7Ec=", - "dependencies": { - "aproba": "^1.1.1" - } - }, - "node_modules/safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - }, - "node_modules/safe-regex": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz", - "integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=", - "dependencies": { - "ret": "~0.1.10" - } - }, - "node_modules/safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" - }, - "node_modules/sax": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz", - "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==" - }, - "node_modules/schema-utils": { - "version": "2.7.1", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.1.tgz", - "integrity": "sha512-SHiNtMOUGWBQJwzISiVYKu82GiV4QYGePp3odlY1tuKO7gPtphAT5R/py0fA6xtbgLL/RvtJZnU9b8s0F1q0Xg==", - "dependencies": { - "@types/json-schema": "^7.0.5", - "ajv": "^6.12.4", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 8.9.0" - } - }, - "node_modules/section-matter": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz", - "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==", - "dependencies": { - "extend-shallow": "^2.0.1", - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/select": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/select/-/select-1.1.2.tgz", - "integrity": "sha1-DnNQrN7ICxEIUoeG7B1EGNEbOW0=", - "optional": true - }, - "node_modules/select-hose": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", - "integrity": "sha1-Yl2GWPhlr0Psliv8N2o3NZpJlMo=" - }, - "node_modules/selfsigned": { - "version": "1.10.8", - "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-1.10.8.tgz", - "integrity": "sha512-2P4PtieJeEwVgTU9QEcwIRDQ/mXJLX8/+I3ur+Pg16nS8oNbrGxEso9NyYWy8NAmXiNl4dlAp5MwoNeCWzON4w==", - "dependencies": { - "node-forge": "^0.10.0" - } - }, - "node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/semver-diff": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-3.1.1.tgz", - "integrity": "sha512-GX0Ix/CJcHyB8c4ykpHGIAvLyOwOobtM/8d+TQkAd81/bEjgPHrfba41Vpesr7jX/t8Uh+R3EX9eAS5be+jQYg==", - "dependencies": { - "semver": "^6.3.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/send": { - "version": "0.17.1", - "resolved": "https://registry.npmjs.org/send/-/send-0.17.1.tgz", - "integrity": "sha512-BsVKsiGcQMFwT8UxypobUKyv7irCNRHk1T0G680vk88yf6LBByGcZJOTJCrTP2xVN6yI+XjPJcNuE3V4fT9sAg==", - "dependencies": { - "debug": "2.6.9", - "depd": "~1.1.2", - "destroy": "~1.0.4", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "fresh": "0.5.2", - "http-errors": "~1.7.2", - "mime": "1.6.0", - "ms": "2.1.1", - "on-finished": "~2.3.0", - "range-parser": "~1.2.1", - "statuses": "~1.5.0" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/send/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/send/node_modules/debug/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" - }, - "node_modules/send/node_modules/mime": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", - "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", - "bin": { - "mime": "cli.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/send/node_modules/ms": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", - "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==" - }, - "node_modules/serialize-javascript": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-4.0.0.tgz", - "integrity": "sha512-GaNA54380uFefWghODBWEGisLZFj00nS5ACs6yHa9nLqlLpVLO8ChDGeKRjZnV4Nh4n0Qi7nhYZD/9fCPzEqkw==", - "dependencies": { - "randombytes": "^2.1.0" - } - }, - "node_modules/serve-index": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz", - "integrity": "sha1-03aNabHn2C5c4FD/9bRTvqEqkjk=", - "dependencies": { - "accepts": "~1.3.4", - "batch": "0.6.1", - "debug": "2.6.9", - "escape-html": "~1.0.3", - "http-errors": "~1.6.2", - "mime-types": "~2.1.17", - "parseurl": "~1.3.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/serve-index/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/serve-index/node_modules/http-errors": { - "version": "1.6.3", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", - "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=", - "dependencies": { - "depd": "~1.1.2", - "inherits": "2.0.3", - "setprototypeof": "1.1.0", - "statuses": ">= 1.4.0 < 2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/serve-index/node_modules/inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=" - }, - "node_modules/serve-index/node_modules/setprototypeof": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", - "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==" - }, - "node_modules/serve-static": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.14.1.tgz", - "integrity": "sha512-JMrvUwE54emCYWlTI+hGrGv5I8dEwmco/00EvkzIIsR7MqrHonbD9pO2MOfFnpFntl7ecpZs+3mW+XbQZu9QCg==", - "dependencies": { - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "parseurl": "~1.3.3", - "send": "0.17.1" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/set-blocking": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", - "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=" - }, - "node_modules/set-value": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz", - "integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==", - "dependencies": { - "extend-shallow": "^2.0.1", - "is-extendable": "^0.1.1", - "is-plain-object": "^2.0.3", - "split-string": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/setimmediate": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz", - "integrity": "sha1-KQy7Iy4waULX1+qbg3Mqt4VvgoU=" - }, - "node_modules/setprototypeof": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.1.tgz", - "integrity": "sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw==" - }, - "node_modules/sha.js": { - "version": "2.4.11", - "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", - "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", - "dependencies": { - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" - }, - "bin": { - "sha.js": "bin.js" - } - }, - "node_modules/shebang-command": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", - "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", - "dependencies": { - "shebang-regex": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/shebang-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/signal-exit": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", - "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==" - }, - "node_modules/simple-swizzle": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", - "integrity": "sha1-pNprY1/8zMoz9w0Xy5JZLeleVXo=", - "dependencies": { - "is-arrayish": "^0.3.1" - } - }, - "node_modules/simple-swizzle/node_modules/is-arrayish": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", - "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==" - }, - "node_modules/sitemap": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-3.2.2.tgz", - "integrity": "sha512-TModL/WU4m2q/mQcrDgNANn0P4LwprM9MMvG4hu5zP4c6IIKs2YLTu6nXXnNr8ODW/WFtxKggiJ1EGn2W0GNmg==", - "dependencies": { - "lodash.chunk": "^4.2.0", - "lodash.padstart": "^4.6.1", - "whatwg-url": "^7.0.0", - "xmlbuilder": "^13.0.0" - }, - "engines": { - "node": ">=6.0.0", - "npm": ">=4.0.0" - } - }, - "node_modules/slash": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-2.0.0.tgz", - "integrity": "sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==", - "engines": { - "node": ">=6" - } - }, - "node_modules/smoothscroll-polyfill": { - "version": "0.4.4", - "resolved": "https://registry.npmjs.org/smoothscroll-polyfill/-/smoothscroll-polyfill-0.4.4.tgz", - "integrity": "sha512-TK5ZA9U5RqCwMpfoMq/l1mrH0JAR7y7KRvOBx0n2869aLxch+gT9GhN3yUfjiw+d/DiF1mKo14+hd62JyMmoBg==" - }, - "node_modules/snapdragon": { - "version": "0.8.2", - "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz", - "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==", - "dependencies": { - "base": "^0.11.1", - "debug": "^2.2.0", - "define-property": "^0.2.5", - "extend-shallow": "^2.0.1", - "map-cache": "^0.2.2", - "source-map": "^0.5.6", - "source-map-resolve": "^0.5.0", - "use": "^3.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon-node": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz", - "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==", - "dependencies": { - "define-property": "^1.0.0", - "isobject": "^3.0.0", - "snapdragon-util": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon-node/node_modules/define-property": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", - "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", - "dependencies": { - "is-descriptor": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon-node/node_modules/is-accessor-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon-node/node_modules/is-data-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon-node/node_modules/is-descriptor": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", - "dependencies": { - "is-accessor-descriptor": "^1.0.0", - "is-data-descriptor": "^1.0.0", - "kind-of": "^6.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon-util": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz", - "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==", - "dependencies": { - "kind-of": "^3.2.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon-util/node_modules/kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dependencies": { - "is-buffer": "^1.1.5" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/snapdragon/node_modules/define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dependencies": { - "is-descriptor": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon/node_modules/source-map": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/sockjs": { - "version": "0.3.21", - "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.21.tgz", - "integrity": "sha512-DhbPFGpxjc6Z3I+uX07Id5ZO2XwYsWOrYjaSeieES78cq+JaJvVe5q/m1uvjIQhXinhIeCFRH6JgXe+mvVMyXw==", - "dependencies": { - "faye-websocket": "^0.11.3", - "uuid": "^3.4.0", - "websocket-driver": "^0.7.4" - } - }, - "node_modules/sockjs-client": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/sockjs-client/-/sockjs-client-1.5.1.tgz", - "integrity": "sha512-VnVAb663fosipI/m6pqRXakEOw7nvd7TUgdr3PlR/8V2I95QIdwT8L4nMxhyU8SmDBHYXU1TOElaKOmKLfYzeQ==", - "dependencies": { - "debug": "^3.2.6", - "eventsource": "^1.0.7", - "faye-websocket": "^0.11.3", - "inherits": "^2.0.4", - "json3": "^3.3.3", - "url-parse": "^1.5.1" - } - }, - "node_modules/sockjs-client/node_modules/debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "dependencies": { - "ms": "^2.1.1" - } - }, - "node_modules/sockjs-client/node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" - }, - "node_modules/sort-keys": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/sort-keys/-/sort-keys-2.0.0.tgz", - "integrity": "sha1-ZYU1WEhh7JfXMNbPQYIuH1ZoQSg=", - "dependencies": { - "is-plain-obj": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/source-list-map": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/source-list-map/-/source-list-map-2.0.1.tgz", - "integrity": "sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw==" - }, - "node_modules/source-map": { - "version": "0.7.3", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.3.tgz", - "integrity": "sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ==", - "engines": { - "node": ">= 8" - } - }, - "node_modules/source-map-resolve": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.3.tgz", - "integrity": "sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==", - "dependencies": { - "atob": "^2.1.2", - "decode-uri-component": "^0.2.0", - "resolve-url": "^0.2.1", - "source-map-url": "^0.4.0", - "urix": "^0.1.0" - } - }, - "node_modules/source-map-support": { - "version": "0.5.19", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.19.tgz", - "integrity": "sha512-Wonm7zOCIJzBGQdB+thsPar0kYuCIzYvxZwlBa87yi/Mdjv7Tip2cyVbLj5o0cFPN4EVkuTwb3GDDyUx2DGnGw==", - "dependencies": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" - } - }, - "node_modules/source-map-support/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/source-map-url": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.1.tgz", - "integrity": "sha512-cPiFOTLUKvJFIg4SKVScy4ilPPW6rFgMgfuZJPNoDuMs3nC1HbMUycBoJw77xFIp6z1UJQJOfx6C9GMH80DiTw==" - }, - "node_modules/spdy": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz", - "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==", - "dependencies": { - "debug": "^4.1.0", - "handle-thing": "^2.0.0", - "http-deceiver": "^1.2.7", - "select-hose": "^2.0.0", - "spdy-transport": "^3.0.0" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/spdy-transport": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz", - "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==", - "dependencies": { - "debug": "^4.1.0", - "detect-node": "^2.0.4", - "hpack.js": "^2.1.6", - "obuf": "^1.1.2", - "readable-stream": "^3.0.6", - "wbuf": "^1.7.3" - } - }, - "node_modules/spdy-transport/node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/spdy-transport/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/spdy-transport/node_modules/readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/spdy/node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/spdy/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/split-string": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz", - "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==", - "dependencies": { - "extend-shallow": "^3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/split-string/node_modules/extend-shallow": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=", - "dependencies": { - "assign-symbols": "^1.0.0", - "is-extendable": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/split-string/node_modules/is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "dependencies": { - "is-plain-object": "^2.0.4" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/sprintf-js": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", - "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=" - }, - "node_modules/sshpk": { - "version": "1.16.1", - "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.16.1.tgz", - "integrity": "sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==", - "dependencies": { - "asn1": "~0.2.3", - "assert-plus": "^1.0.0", - "bcrypt-pbkdf": "^1.0.0", - "dashdash": "^1.12.0", - "ecc-jsbn": "~0.1.1", - "getpass": "^0.1.1", - "jsbn": "~0.1.0", - "safer-buffer": "^2.0.2", - "tweetnacl": "~0.14.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/ssri": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/ssri/-/ssri-6.0.2.tgz", - "integrity": "sha512-cepbSq/neFK7xB6A50KHN0xHDotYzq58wWCa5LeWqnPrHG8GzfEjO/4O8kpmcGW+oaxkvhEJCWgbgNk4/ZV93Q==", - "dependencies": { - "figgy-pudding": "^3.5.1" - } - }, - "node_modules/stable": { - "version": "0.1.8", - "resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz", - "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==" - }, - "node_modules/stack-utils": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-1.0.5.tgz", - "integrity": "sha512-KZiTzuV3CnSnSvgMRrARVCj+Ht7rMbauGDK0LdVFRGyenwdylpajAp4Q0i6SX8rEmbTpMMf6ryq2gb8pPq2WgQ==", - "dependencies": { - "escape-string-regexp": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/stack-utils/node_modules/escape-string-regexp": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", - "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", - "engines": { - "node": ">=8" - } - }, - "node_modules/static-extend": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz", - "integrity": "sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY=", - "dependencies": { - "define-property": "^0.2.5", - "object-copy": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/static-extend/node_modules/define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dependencies": { - "is-descriptor": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/statuses": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", - "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/std-env": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/std-env/-/std-env-2.3.0.tgz", - "integrity": "sha512-4qT5B45+Kjef2Z6pE0BkskzsH0GO7GrND0wGlTM1ioUe3v0dGYx9ZJH0Aro/YyA8fqQ5EyIKDRjZojJYMFTflw==", - "dependencies": { - "ci-info": "^3.0.0" - } - }, - "node_modules/stream-browserify": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/stream-browserify/-/stream-browserify-2.0.2.tgz", - "integrity": "sha512-nX6hmklHs/gr2FuxYDltq8fJA1GDlxKQCz8O/IM4atRqBH8OORmBNgfvW5gG10GT/qQ9u0CzIvr2X5Pkt6ntqg==", - "dependencies": { - "inherits": "~2.0.1", - "readable-stream": "^2.0.2" - } - }, - "node_modules/stream-each": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/stream-each/-/stream-each-1.2.3.tgz", - "integrity": "sha512-vlMC2f8I2u/bZGqkdfLQW/13Zihpej/7PmSiMQsbYddxuTsJp8vRe2x2FvVExZg7FaOds43ROAuFJwPR4MTZLw==", - "dependencies": { - "end-of-stream": "^1.1.0", - "stream-shift": "^1.0.0" - } - }, - "node_modules/stream-http": { - "version": "2.8.3", - "resolved": "https://registry.npmjs.org/stream-http/-/stream-http-2.8.3.tgz", - "integrity": "sha512-+TSkfINHDo4J+ZobQLWiMouQYB+UVYFttRA94FpEzzJ7ZdqcL4uUUQ7WkdkI4DSozGmgBUE/a47L+38PenXhUw==", - "dependencies": { - "builtin-status-codes": "^3.0.0", - "inherits": "^2.0.1", - "readable-stream": "^2.3.6", - "to-arraybuffer": "^1.0.0", - "xtend": "^4.0.0" - } - }, - "node_modules/stream-shift": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz", - "integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==" - }, - "node_modules/strict-uri-encode": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz", - "integrity": "sha1-J5siXfHVgrH1TmWt3UNS4Y+qBxM=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "dependencies": { - "safe-buffer": "~5.1.0" - } - }, - "node_modules/string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "dependencies": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/string-width/node_modules/ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/string-width/node_modules/strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", - "dependencies": { - "ansi-regex": "^4.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/string.prototype.trimend": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.4.tgz", - "integrity": "sha512-y9xCjw1P23Awk8EvTpcyL2NIr1j7wJ39f+k6lvRnSMz+mz9CGz9NYPelDk42kOz6+ql8xjfK8oYzy3jAP5QU5A==", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - } - }, - "node_modules/string.prototype.trimstart": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.4.tgz", - "integrity": "sha512-jh6e984OBfvxS50tdY2nRZnoC5/mLFKOREQfw8t5yytkoUsJRNxvI/E39qu1sD0OtWI3OC0XgKSmcWwziwYuZw==", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - } - }, - "node_modules/strip-ansi": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", - "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", - "dependencies": { - "ansi-regex": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/strip-bom-string": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz", - "integrity": "sha1-5SEekiQ2n7uB1jOi8ABE3IztrZI=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/strip-eof": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz", - "integrity": "sha1-u0P/VZim6wXYm1n80SnJgzE2Br8=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/strip-json-comments": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/stylehacks": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-4.0.3.tgz", - "integrity": "sha512-7GlLk9JwlElY4Y6a/rmbH2MhVlTyVmiJd1PfTCqFaIBEGMYNsrO/v3SeGTdhBThLg4Z+NbOk/qFMwCa+J+3p/g==", - "dependencies": { - "browserslist": "^4.0.0", - "postcss": "^7.0.0", - "postcss-selector-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/stylehacks/node_modules/postcss-selector-parser": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", - "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", - "dependencies": { - "dot-prop": "^5.2.0", - "indexes-of": "^1.0.1", - "uniq": "^1.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/stylus": { - "version": "0.54.8", - "resolved": "https://registry.npmjs.org/stylus/-/stylus-0.54.8.tgz", - "integrity": "sha512-vr54Or4BZ7pJafo2mpf0ZcwA74rpuYCZbxrHBsH8kbcXOwSfvBFwsRfpGO5OD5fhG5HDCFW737PKaawI7OqEAg==", - "dependencies": { - "css-parse": "~2.0.0", - "debug": "~3.1.0", - "glob": "^7.1.6", - "mkdirp": "~1.0.4", - "safer-buffer": "^2.1.2", - "sax": "~1.2.4", - "semver": "^6.3.0", - "source-map": "^0.7.3" - }, - "bin": { - "stylus": "bin/stylus" - }, - "engines": { - "node": "*" - } - }, - "node_modules/stylus-loader": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/stylus-loader/-/stylus-loader-3.0.2.tgz", - "integrity": "sha512-+VomPdZ6a0razP+zinir61yZgpw2NfljeSsdUF5kJuEzlo3khXhY19Fn6l8QQz1GRJGtMCo8nG5C04ePyV7SUA==", - "dependencies": { - "loader-utils": "^1.0.2", - "lodash.clonedeep": "^4.5.0", - "when": "~3.6.x" - } - }, - "node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/svg-tags": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/svg-tags/-/svg-tags-1.0.0.tgz", - "integrity": "sha1-WPcc7jvVGbWdSyqEO2x95krAR2Q=" - }, - "node_modules/svgo": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/svgo/-/svgo-1.3.2.tgz", - "integrity": "sha512-yhy/sQYxR5BkC98CY7o31VGsg014AKLEPxdfhora76l36hD9Rdy5NZA/Ocn6yayNPgSamYdtX2rFJdcv07AYVw==", - "dependencies": { - "chalk": "^2.4.1", - "coa": "^2.0.2", - "css-select": "^2.0.0", - "css-select-base-adapter": "^0.1.1", - "css-tree": "1.0.0-alpha.37", - "csso": "^4.0.2", - "js-yaml": "^3.13.1", - "mkdirp": "~0.5.1", - "object.values": "^1.1.0", - "sax": "~1.2.4", - "stable": "^0.1.8", - "unquote": "~1.1.1", - "util.promisify": "~1.0.0" - }, - "bin": { - "svgo": "bin/svgo" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/svgo/node_modules/css-select": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-2.1.0.tgz", - "integrity": "sha512-Dqk7LQKpwLoH3VovzZnkzegqNSuAziQyNZUcrdDM401iY+R5NkGBXGmtO05/yaXQziALuPogeG0b7UAgjnTJTQ==", - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^3.2.1", - "domutils": "^1.7.0", - "nth-check": "^1.0.2" - } - }, - "node_modules/svgo/node_modules/css-what": { - "version": "3.4.2", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-3.4.2.tgz", - "integrity": "sha512-ACUm3L0/jiZTqfzRM3Hi9Q8eZqd6IK37mMWPLz9PJxkLWllYeRf+EHUSHYEtFop2Eqytaq1FizFVh7XfBnXCDQ==", - "engines": { - "node": ">= 6" - } - }, - "node_modules/svgo/node_modules/dom-serializer": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz", - "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==", - "dependencies": { - "domelementtype": "^2.0.1", - "entities": "^2.0.0" - } - }, - "node_modules/svgo/node_modules/dom-serializer/node_modules/domelementtype": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.2.0.tgz", - "integrity": "sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A==" - }, - "node_modules/svgo/node_modules/domelementtype": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz", - "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==" - }, - "node_modules/svgo/node_modules/domutils": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.7.0.tgz", - "integrity": "sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg==", - "dependencies": { - "dom-serializer": "0", - "domelementtype": "1" - } - }, - "node_modules/svgo/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/svgo/node_modules/nth-check": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz", - "integrity": "sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==", - "dependencies": { - "boolbase": "~1.0.0" - } - }, - "node_modules/tapable": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz", - "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==", - "engines": { - "node": ">=6" - } - }, - "node_modules/term-size": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/term-size/-/term-size-2.2.1.tgz", - "integrity": "sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/terser": { - "version": "4.8.0", - "resolved": "https://registry.npmjs.org/terser/-/terser-4.8.0.tgz", - "integrity": "sha512-EAPipTNeWsb/3wLPeup1tVPaXfIaU68xMnVdPafIL1TV05OhASArYyIfFvnvJCNrR2NIOvDVNNTFRa+Re2MWyw==", - "dependencies": { - "commander": "^2.20.0", - "source-map": "~0.6.1", - "source-map-support": "~0.5.12" - }, - "bin": { - "terser": "bin/terser" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/terser-webpack-plugin": { - "version": "1.4.5", - "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-1.4.5.tgz", - "integrity": "sha512-04Rfe496lN8EYruwi6oPQkG0vo8C+HT49X687FZnpPF0qMAIHONI6HEXYPKDOE8e5HjXTyKfqRd/agHtH0kOtw==", - "dependencies": { - "cacache": "^12.0.2", - "find-cache-dir": "^2.1.0", - "is-wsl": "^1.1.0", - "schema-utils": "^1.0.0", - "serialize-javascript": "^4.0.0", - "source-map": "^0.6.1", - "terser": "^4.1.2", - "webpack-sources": "^1.4.0", - "worker-farm": "^1.7.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/terser-webpack-plugin/node_modules/find-cache-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz", - "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==", - "dependencies": { - "commondir": "^1.0.1", - "make-dir": "^2.0.0", - "pkg-dir": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/terser-webpack-plugin/node_modules/find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dependencies": { - "locate-path": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/terser-webpack-plugin/node_modules/locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/terser-webpack-plugin/node_modules/make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dependencies": { - "pify": "^4.0.1", - "semver": "^5.6.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/terser-webpack-plugin/node_modules/p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dependencies": { - "p-limit": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/terser-webpack-plugin/node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "engines": { - "node": ">=4" - } - }, - "node_modules/terser-webpack-plugin/node_modules/pkg-dir": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", - "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", - "dependencies": { - "find-up": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/terser-webpack-plugin/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/terser-webpack-plugin/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/terser-webpack-plugin/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/terser/node_modules/commander": { - "version": "2.20.3", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", - "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" - }, - "node_modules/terser/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/text-table": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", - "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=" - }, - "node_modules/through": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", - "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=" - }, - "node_modules/through2": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", - "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", - "dependencies": { - "readable-stream": "~2.3.6", - "xtend": "~4.0.1" - } - }, - "node_modules/thunky": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", - "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==" - }, - "node_modules/timers-browserify": { - "version": "2.0.12", - "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.12.tgz", - "integrity": "sha512-9phl76Cqm6FhSX9Xe1ZUAMLtm1BLkKj2Qd5ApyWkXzsMRaA7dgr81kf4wJmQf/hAvg8EEyJxDo3du/0KlhPiKQ==", - "dependencies": { - "setimmediate": "^1.0.4" - }, - "engines": { - "node": ">=0.6.0" - } - }, - "node_modules/timsort": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/timsort/-/timsort-0.3.0.tgz", - "integrity": "sha1-QFQRqOfmM5/mTbmiNN4R3DHgK9Q=" - }, - "node_modules/tiny-cookie": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/tiny-cookie/-/tiny-cookie-2.3.2.tgz", - "integrity": "sha512-qbymkVh+6+Gc/c9sqnvbG+dOHH6bschjphK3SHgIfT6h/t+63GBL37JXNoXEc6u/+BcwU6XmaWUuf19ouLVtPg==" - }, - "node_modules/tiny-emitter": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/tiny-emitter/-/tiny-emitter-2.1.0.tgz", - "integrity": "sha512-NB6Dk1A9xgQPMoGqC5CVXn123gWyte215ONT5Pp5a0yt4nlEoO1ZWeCwpncaekPHXO60i47ihFnZPiRPjRMq4Q==", - "optional": true - }, - "node_modules/to-arraybuffer": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz", - "integrity": "sha1-fSKbH8xjfkZsoIEYCDanqr/4P0M=" - }, - "node_modules/to-factory": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/to-factory/-/to-factory-1.0.0.tgz", - "integrity": "sha1-hzivi9lxIK0dQEeXKtpVY7+UebE=" - }, - "node_modules/to-fast-properties": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", - "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=", - "engines": { - "node": ">=4" - } - }, - "node_modules/to-object-path": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz", - "integrity": "sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68=", - "dependencies": { - "kind-of": "^3.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/to-object-path/node_modules/kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dependencies": { - "is-buffer": "^1.1.5" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/to-readable-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/to-readable-stream/-/to-readable-stream-1.0.0.tgz", - "integrity": "sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q==", - "engines": { - "node": ">=6" - } - }, - "node_modules/to-regex": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz", - "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==", - "dependencies": { - "define-property": "^2.0.2", - "extend-shallow": "^3.0.2", - "regex-not": "^1.0.2", - "safe-regex": "^1.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/to-regex-range": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz", - "integrity": "sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg=", - "dependencies": { - "is-number": "^3.0.0", - "repeat-string": "^1.6.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/to-regex/node_modules/extend-shallow": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=", - "dependencies": { - "assign-symbols": "^1.0.0", - "is-extendable": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/to-regex/node_modules/is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "dependencies": { - "is-plain-object": "^2.0.4" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/toidentifier": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.0.tgz", - "integrity": "sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw==", - "engines": { - "node": ">=0.6" - } - }, - "node_modules/token-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/token-stream/-/token-stream-1.0.0.tgz", - "integrity": "sha1-zCAOqyYT9BZtJ/+a/HylbUnfbrQ=" - }, - "node_modules/toml": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/toml/-/toml-3.0.0.tgz", - "integrity": "sha512-y/mWCZinnvxjTKYhJ+pYxwD0mRLVvOtdS2Awbgxln6iEnt4rk0yBxeSBHkGJcPucRiG0e55mwWp+g/05rsrd6w==" - }, - "node_modules/toposort": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/toposort/-/toposort-1.0.7.tgz", - "integrity": "sha1-LmhELZ9k7HILjMieZEOsbKqVACk=" - }, - "node_modules/tough-cookie": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", - "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", - "dependencies": { - "psl": "^1.1.28", - "punycode": "^2.1.1" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/tr46": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-1.0.1.tgz", - "integrity": "sha1-qLE/1r/SSJUZZ0zN5VujaTtwbQk=", - "dependencies": { - "punycode": "^2.1.0" - } - }, - "node_modules/tty-browserify": { - "version": "0.0.0", - "resolved": "https://registry.npmjs.org/tty-browserify/-/tty-browserify-0.0.0.tgz", - "integrity": "sha1-oVe6QC2iTpv5V/mqadUk7tQpAaY=" - }, - "node_modules/tunnel-agent": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", - "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", - "dependencies": { - "safe-buffer": "^5.0.1" - }, - "engines": { - "node": "*" - } - }, - "node_modules/tweetnacl": { - "version": "0.14.5", - "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", - "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=" - }, - "node_modules/type-fest": { - "version": "0.21.3", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", - "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", - "engines": { - "node": ">=10" - } - }, - "node_modules/type-is": { - "version": "1.6.18", - "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", - "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", - "dependencies": { - "media-typer": "0.3.0", - "mime-types": "~2.1.24" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/typedarray": { - "version": "0.0.6", - "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", - "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=" - }, - "node_modules/typedarray-to-buffer": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", - "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", - "dependencies": { - "is-typedarray": "^1.0.0" - } - }, - "node_modules/uc.micro": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-1.0.6.tgz", - "integrity": "sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA==" - }, - "node_modules/uglify-js": { - "version": "3.4.10", - "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.4.10.tgz", - "integrity": "sha512-Y2VsbPVs0FIshJztycsO2SfPk7/KAF/T72qzv9u5EpQ4kB2hQoHlhNQTsNyy6ul7lQtqJN/AoWeS23OzEiEFxw==", - "dependencies": { - "commander": "~2.19.0", - "source-map": "~0.6.1" - }, - "bin": { - "uglifyjs": "bin/uglifyjs" - }, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/uglify-js/node_modules/commander": { - "version": "2.19.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.19.0.tgz", - "integrity": "sha512-6tvAOO+D6OENvRAh524Dh9jcfKTYDQAqvqezbCW82xj5X0pSrcpxtvRKHLG0yBY6SD7PSDrJaj+0AiOcKVd1Xg==" - }, - "node_modules/uglify-js/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/unbox-primitive": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.1.tgz", - "integrity": "sha512-tZU/3NqK3dA5gpE1KtyiJUrEB0lxnGkMFHptJ7q6ewdZ8s12QrODwNbhIJStmJkd1QDXa1NRA8aF2A1zk/Ypyw==", - "dependencies": { - "function-bind": "^1.1.1", - "has-bigints": "^1.0.1", - "has-symbols": "^1.0.2", - "which-boxed-primitive": "^1.0.2" - } - }, - "node_modules/unicode-canonical-property-names-ecmascript": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-1.0.4.tgz", - "integrity": "sha512-jDrNnXWHd4oHiTZnx/ZG7gtUTVp+gCcTTKr8L0HjlwphROEW3+Him+IpvC+xcJEFegapiMZyZe02CyuOnRmbnQ==", - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-match-property-ecmascript": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-1.0.4.tgz", - "integrity": "sha512-L4Qoh15vTfntsn4P1zqnHulG0LdXgjSO035fEpdtp6YxXhMT51Q6vgM5lYdG/5X3MjS+k/Y9Xw4SFCY9IkR0rg==", - "dependencies": { - "unicode-canonical-property-names-ecmascript": "^1.0.4", - "unicode-property-aliases-ecmascript": "^1.0.4" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-match-property-value-ecmascript": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-1.2.0.tgz", - "integrity": "sha512-wjuQHGQVofmSJv1uVISKLE5zO2rNGzM/KCYZch/QQvez7C1hUhBIuZ701fYXExuufJFMPhv2SyL8CyoIfMLbIQ==", - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-property-aliases-ecmascript": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-1.1.0.tgz", - "integrity": "sha512-PqSoPh/pWetQ2phoj5RLiaqIk4kCNwoV3CI+LfGmWLKI3rE3kl1h59XpX2BjgDrmbxD9ARtQobPGU1SguCYuQg==", - "engines": { - "node": ">=4" - } - }, - "node_modules/union-value": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz", - "integrity": "sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==", - "dependencies": { - "arr-union": "^3.1.0", - "get-value": "^2.0.6", - "is-extendable": "^0.1.1", - "set-value": "^2.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/uniq": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/uniq/-/uniq-1.0.1.tgz", - "integrity": "sha1-sxxa6CVIRKOoKBVBzisEuGWnNP8=" - }, - "node_modules/uniqs": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/uniqs/-/uniqs-2.0.0.tgz", - "integrity": "sha1-/+3ks2slKQaW5uFl1KWe25mOawI=" - }, - "node_modules/unique-filename": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-1.1.1.tgz", - "integrity": "sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ==", - "dependencies": { - "unique-slug": "^2.0.0" - } - }, - "node_modules/unique-slug": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-2.0.2.tgz", - "integrity": "sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w==", - "dependencies": { - "imurmurhash": "^0.1.4" - } - }, - "node_modules/unique-string": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-2.0.0.tgz", - "integrity": "sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==", - "dependencies": { - "crypto-random-string": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/universalify": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", - "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/unpipe": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", - "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/unquote": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/unquote/-/unquote-1.1.1.tgz", - "integrity": "sha1-j97XMk7G6IoP+LkF58CYzcCG1UQ=" - }, - "node_modules/unset-value": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz", - "integrity": "sha1-g3aHP30jNRef+x5vw6jtDfyKtVk=", - "dependencies": { - "has-value": "^0.3.1", - "isobject": "^3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/unset-value/node_modules/has-value": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz", - "integrity": "sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8=", - "dependencies": { - "get-value": "^2.0.3", - "has-values": "^0.1.4", - "isobject": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/unset-value/node_modules/has-value/node_modules/isobject": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", - "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=", - "dependencies": { - "isarray": "1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/unset-value/node_modules/has-values": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz", - "integrity": "sha1-bWHeldkd/Km5oCCJrThL/49it3E=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/upath": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/upath/-/upath-1.2.0.tgz", - "integrity": "sha512-aZwGpamFO61g3OlfT7OQCHqhGnW43ieH9WZeP7QxN/G/jS4jfqUkZxoryvJgVPEcrl5NL/ggHsSmLMHuH64Lhg==", - "engines": { - "node": ">=4", - "yarn": "*" - } - }, - "node_modules/update-notifier": { - "version": "4.1.3", - "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-4.1.3.tgz", - "integrity": "sha512-Yld6Z0RyCYGB6ckIjffGOSOmHXj1gMeE7aROz4MG+XMkmixBX4jUngrGXNYz7wPKBmtoD4MnBa2Anu7RSKht/A==", - "dependencies": { - "boxen": "^4.2.0", - "chalk": "^3.0.0", - "configstore": "^5.0.1", - "has-yarn": "^2.1.0", - "import-lazy": "^2.1.0", - "is-ci": "^2.0.0", - "is-installed-globally": "^0.3.1", - "is-npm": "^4.0.0", - "is-yarn-global": "^0.3.0", - "latest-version": "^5.0.0", - "pupa": "^2.0.1", - "semver-diff": "^3.1.1", - "xdg-basedir": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/update-notifier/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/update-notifier/node_modules/chalk": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", - "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/update-notifier/node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/update-notifier/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "node_modules/update-notifier/node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "engines": { - "node": ">=8" - } - }, - "node_modules/update-notifier/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/upper-case": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/upper-case/-/upper-case-1.1.3.tgz", - "integrity": "sha1-9rRQHC7EzdJrp4vnIilh3ndiFZg=" - }, - "node_modules/uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", - "dependencies": { - "punycode": "^2.1.0" - } - }, - "node_modules/urix": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz", - "integrity": "sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI=" - }, - "node_modules/url": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/url/-/url-0.11.0.tgz", - "integrity": "sha1-ODjpfPxgUh63PFJajlW/3Z4uKPE=", - "dependencies": { - "punycode": "1.3.2", - "querystring": "0.2.0" - } - }, - "node_modules/url-loader": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/url-loader/-/url-loader-1.1.2.tgz", - "integrity": "sha512-dXHkKmw8FhPqu8asTc1puBfe3TehOCo2+RmOOev5suNCIYBcT626kxiWg1NBVkwc4rO8BGa7gP70W7VXuqHrjg==", - "dependencies": { - "loader-utils": "^1.1.0", - "mime": "^2.0.3", - "schema-utils": "^1.0.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/url-loader/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/url-parse": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.1.tgz", - "integrity": "sha512-HOfCOUJt7iSYzEx/UqgtwKRMC6EU91NFhsCHMv9oM03VJcVo2Qrp8T8kI9D7amFf1cu+/3CEhgb3rF9zL7k85Q==", - "dependencies": { - "querystringify": "^2.1.1", - "requires-port": "^1.0.0" - } - }, - "node_modules/url-parse-lax": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz", - "integrity": "sha1-FrXK/Afb42dsGxmZF3gj1lA6yww=", - "dependencies": { - "prepend-http": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/url/node_modules/punycode": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz", - "integrity": "sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0=" - }, - "node_modules/url/node_modules/querystring": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", - "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=", - "engines": { - "node": ">=0.4.x" - } - }, - "node_modules/use": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz", - "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/util": { - "version": "0.11.1", - "resolved": "https://registry.npmjs.org/util/-/util-0.11.1.tgz", - "integrity": "sha512-HShAsny+zS2TZfaXxD9tYj4HQGlBezXZMZuM/S5PKLLoZkShZiGk9o5CzukI1LVHZvjdvZ2Sj1aW/Ndn2NB/HQ==", - "dependencies": { - "inherits": "2.0.3" - } - }, - "node_modules/util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" - }, - "node_modules/util.promisify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/util.promisify/-/util.promisify-1.0.1.tgz", - "integrity": "sha512-g9JpC/3He3bm38zsLupWryXHoEcS22YHthuPQSJdMy6KNrzIRzWqcsHzD/WUnqe45whVou4VIsPew37DoXWNrA==", - "dependencies": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.2", - "has-symbols": "^1.0.1", - "object.getownpropertydescriptors": "^2.1.0" - } - }, - "node_modules/util/node_modules/inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=" - }, - "node_modules/utila": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/utila/-/utila-0.4.0.tgz", - "integrity": "sha1-ihagXURWV6Oupe7MWxKk+lN5dyw=" - }, - "node_modules/utils-merge": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", - "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=", - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/uuid": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", - "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", - "bin": { - "uuid": "bin/uuid" - } - }, - "node_modules/v-runtime-template": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/v-runtime-template/-/v-runtime-template-1.10.0.tgz", - "integrity": "sha512-WLlq9jUepSfUrMEenw3mn7FDXX6hhbl11JjC1OKhwLzifHzVrY5a696TUHDPyj9jke3GGnR7b+2T3od/RL5cww==" - }, - "node_modules/vary": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", - "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/vendors": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/vendors/-/vendors-1.0.4.tgz", - "integrity": "sha512-/juG65kTL4Cy2su4P8HjtkTxk6VmJDiOPBufWniqQ6wknac6jNiXS9vU+hO3wgusiyqWlzTbVHi0dyJqRONg3w==" - }, - "node_modules/verror": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", - "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", - "engines": [ - "node >=0.6.0" - ], - "dependencies": { - "assert-plus": "^1.0.0", - "core-util-is": "1.0.2", - "extsprintf": "^1.2.0" - } - }, - "node_modules/vm-browserify": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vm-browserify/-/vm-browserify-1.1.2.tgz", - "integrity": "sha512-2ham8XPWTONajOR0ohOKOHXkm3+gaBmGut3SRuu75xLd/RRaY6vqgh8NBYYk7+RW3u5AtzPQZG8F10LHkl0lAQ==" - }, - "node_modules/void-elements": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/void-elements/-/void-elements-3.1.0.tgz", - "integrity": "sha1-YU9/v42AHwu18GYfWy9XhXUOTwk=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/vue": { - "version": "2.6.12", - "resolved": "https://registry.npmjs.org/vue/-/vue-2.6.12.tgz", - "integrity": "sha512-uhmLFETqPPNyuLLbsKz6ioJ4q7AZHzD8ZVFNATNyICSZouqP2Sz0rotWQC8UNBF6VGSCs5abnKJoStA6JbCbfg==" - }, - "node_modules/vue-hot-reload-api": { - "version": "2.3.4", - "resolved": "https://registry.npmjs.org/vue-hot-reload-api/-/vue-hot-reload-api-2.3.4.tgz", - "integrity": "sha512-BXq3jwIagosjgNVae6tkHzzIk6a8MHFtzAdwhnV5VlvPTFxDCvIttgSiHWjdGoTJvXtmRu5HacExfdarRcFhog==" - }, - "node_modules/vue-loader": { - "version": "15.9.6", - "resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-15.9.6.tgz", - "integrity": "sha512-j0cqiLzwbeImIC6nVIby2o/ABAWhlppyL/m5oJ67R5MloP0hj/DtFgb0Zmq3J9CG7AJ+AXIvHVnJAPBvrLyuDg==", - "dependencies": { - "@vue/component-compiler-utils": "^3.1.0", - "hash-sum": "^1.0.2", - "loader-utils": "^1.1.0", - "vue-hot-reload-api": "^2.3.0", - "vue-style-loader": "^4.1.0" - } - }, - "node_modules/vue-router": { - "version": "3.5.1", - "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-3.5.1.tgz", - "integrity": "sha512-RRQNLT8Mzr8z7eL4p7BtKvRaTSGdCbTy2+Mm5HTJvLGYSSeG9gDzNasJPP/yOYKLy+/cLG/ftrqq5fvkFwBJEw==" - }, - "node_modules/vue-server-renderer": { - "version": "2.6.12", - "resolved": "https://registry.npmjs.org/vue-server-renderer/-/vue-server-renderer-2.6.12.tgz", - "integrity": "sha512-3LODaOsnQx7iMFTBLjki8xSyOxhCtbZ+nQie0wWY4iOVeEtTg1a3YQAjd82WvKxrWHHTshjvLb7OXMc2/dYuxw==", - "dependencies": { - "chalk": "^1.1.3", - "hash-sum": "^1.0.2", - "he": "^1.1.0", - "lodash.template": "^4.5.0", - "lodash.uniq": "^4.5.0", - "resolve": "^1.2.0", - "serialize-javascript": "^3.1.0", - "source-map": "0.5.6" - } - }, - "node_modules/vue-server-renderer/node_modules/ansi-styles": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", - "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/vue-server-renderer/node_modules/chalk": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", - "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", - "dependencies": { - "ansi-styles": "^2.2.1", - "escape-string-regexp": "^1.0.2", - "has-ansi": "^2.0.0", - "strip-ansi": "^3.0.0", - "supports-color": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/vue-server-renderer/node_modules/serialize-javascript": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-3.1.0.tgz", - "integrity": "sha512-JIJT1DGiWmIKhzRsG91aS6Ze4sFUrYbltlkg2onR5OrnNM02Kl/hnY/T4FN2omvyeBbQmMJv+K4cPOpGzOTFBg==", - "dependencies": { - "randombytes": "^2.1.0" - } - }, - "node_modules/vue-server-renderer/node_modules/source-map": { - "version": "0.5.6", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.6.tgz", - "integrity": "sha1-dc449SvwczxafwwRjYEzSiu19BI=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/vue-server-renderer/node_modules/supports-color": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", - "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/vue-style-loader": { - "version": "4.1.3", - "resolved": "https://registry.npmjs.org/vue-style-loader/-/vue-style-loader-4.1.3.tgz", - "integrity": "sha512-sFuh0xfbtpRlKfm39ss/ikqs9AbKCoXZBpHeVZ8Tx650o0k0q/YCM7FRvigtxpACezfq6af+a7JeqVTWvncqDg==", - "dependencies": { - "hash-sum": "^1.0.2", - "loader-utils": "^1.0.2" - } - }, - "node_modules/vue-template-compiler": { - "version": "2.6.12", - "resolved": "https://registry.npmjs.org/vue-template-compiler/-/vue-template-compiler-2.6.12.tgz", - "integrity": "sha512-OzzZ52zS41YUbkCBfdXShQTe69j1gQDZ9HIX8miuC9C3rBCk9wIRjLiZZLrmX9V+Ftq/YEyv1JaVr5Y/hNtByg==", - "dependencies": { - "de-indent": "^1.0.2", - "he": "^1.1.0" - } - }, - "node_modules/vue-template-es2015-compiler": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/vue-template-es2015-compiler/-/vue-template-es2015-compiler-1.9.1.tgz", - "integrity": "sha512-4gDntzrifFnCEvyoO8PqyJDmguXgVPxKiIxrBKjIowvL9l+N66196+72XVYR8BBf1Uv1Fgt3bGevJ+sEmxfZzw==" - }, - "node_modules/vuepress": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/vuepress/-/vuepress-1.8.2.tgz", - "integrity": "sha512-BU1lUDwsA3ghf7a9ga4dsf0iTc++Z/l7BR1kUagHWVBHw7HNRgRDfAZBDDQXhllMILVToIxaTifpne9mSi94OA==", - "hasInstallScript": true, - "dependencies": { - "@vuepress/core": "1.8.2", - "@vuepress/theme-default": "1.8.2", - "cac": "^6.5.6", - "envinfo": "^7.2.0", - "opencollective-postinstall": "^2.0.2", - "update-notifier": "^4.0.0" - }, - "bin": { - "vuepress": "cli.js" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/vuepress-html-webpack-plugin": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/vuepress-html-webpack-plugin/-/vuepress-html-webpack-plugin-3.2.0.tgz", - "integrity": "sha512-BebAEl1BmWlro3+VyDhIOCY6Gef2MCBllEVAP3NUAtMguiyOwo/dClbwJ167WYmcxHJKLl7b0Chr9H7fpn1d0A==", - "dependencies": { - "html-minifier": "^3.2.3", - "loader-utils": "^0.2.16", - "lodash": "^4.17.3", - "pretty-error": "^2.0.2", - "tapable": "^1.0.0", - "toposort": "^1.0.0", - "util.promisify": "1.0.0" - }, - "engines": { - "node": ">=6.9" - } - }, - "node_modules/vuepress-html-webpack-plugin/node_modules/big.js": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/big.js/-/big.js-3.2.0.tgz", - "integrity": "sha512-+hN/Zh2D08Mx65pZ/4g5bsmNiZUuChDiQfTUQ7qJr4/kuopCr88xZsAXv6mBoZEsUI4OuGHlX59qE94K2mMW8Q==", - "engines": { - "node": "*" - } - }, - "node_modules/vuepress-html-webpack-plugin/node_modules/emojis-list": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-2.1.0.tgz", - "integrity": "sha1-TapNnbAPmBmIDHn6RXrlsJof04k=", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/vuepress-html-webpack-plugin/node_modules/json5": { - "version": "0.5.1", - "resolved": "https://registry.npmjs.org/json5/-/json5-0.5.1.tgz", - "integrity": "sha1-Hq3nrMASA0rYTiOWdn6tn6VJWCE=", - "bin": { - "json5": "lib/cli.js" - } - }, - "node_modules/vuepress-html-webpack-plugin/node_modules/loader-utils": { - "version": "0.2.17", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-0.2.17.tgz", - "integrity": "sha1-+G5jdNQyBabmxg6RlvF8Apm/s0g=", - "dependencies": { - "big.js": "^3.1.3", - "emojis-list": "^2.0.0", - "json5": "^0.5.0", - "object-assign": "^4.0.1" - } - }, - "node_modules/vuepress-html-webpack-plugin/node_modules/util.promisify": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/util.promisify/-/util.promisify-1.0.0.tgz", - "integrity": "sha512-i+6qA2MPhvoKLuxnJNpXAGhg7HphQOSUq2LKMZD0m15EiskXUkMvKdF4Uui0WYeCUGea+o2cw/ZuwehtfsrNkA==", - "dependencies": { - "define-properties": "^1.1.2", - "object.getownpropertydescriptors": "^2.0.3" - } - }, - "node_modules/vuepress-plugin-container": { - "version": "2.1.5", - "resolved": "https://registry.npmjs.org/vuepress-plugin-container/-/vuepress-plugin-container-2.1.5.tgz", - "integrity": "sha512-TQrDX/v+WHOihj3jpilVnjXu9RcTm6m8tzljNJwYhxnJUW0WWQ0hFLcDTqTBwgKIFdEiSxVOmYE+bJX/sq46MA==", - "dependencies": { - "@vuepress/shared-utils": "^1.2.0", - "markdown-it-container": "^2.0.0" - } - }, - "node_modules/vuepress-plugin-google-tag-manager": { - "version": "0.0.5", - "resolved": "https://registry.npmjs.org/vuepress-plugin-google-tag-manager/-/vuepress-plugin-google-tag-manager-0.0.5.tgz", - "integrity": "sha512-Hm1GNDdNmc4Vs9c3OMfTtHicB/oZWNCmzMFPdlOObVN1OjizIjImdm+LZIwiVKVndT2TQ4BPhMx7HQkovmD2Lg==" - }, - "node_modules/vuepress-plugin-sitemap": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/vuepress-plugin-sitemap/-/vuepress-plugin-sitemap-2.3.1.tgz", - "integrity": "sha512-n+8lbukhrKrsI9H/EX0EBgkE1pn85LAQFvQ5dIvrZP4Kz6JxPOPPNTQmZMhahQV1tXbLZQCEN7A1WZH4x+arJQ==", - "dependencies": { - "sitemap": "^3.0.0" - }, - "bin": { - "vuepress-sitemap": "cli.js" - } - }, - "node_modules/vuepress-plugin-smooth-scroll": { - "version": "0.0.3", - "resolved": "https://registry.npmjs.org/vuepress-plugin-smooth-scroll/-/vuepress-plugin-smooth-scroll-0.0.3.tgz", - "integrity": "sha512-qsQkDftLVFLe8BiviIHaLV0Ea38YLZKKonDGsNQy1IE0wllFpFIEldWD8frWZtDFdx6b/O3KDMgVQ0qp5NjJCg==", - "dependencies": { - "smoothscroll-polyfill": "^0.4.3" - } - }, - "node_modules/vuepress-theme-cosmos": { - "version": "1.0.182", - "resolved": "https://registry.npmjs.org/vuepress-theme-cosmos/-/vuepress-theme-cosmos-1.0.182.tgz", - "integrity": "sha512-Mc1ZOsSqLGgbB9xEXsx5QkHUBkKXOoDgkjrp5iX+fwmM4TCmR4MWbTlKpEzfzsxZ1DuixtwVkv0MT+eNvD2Lfw==", - "dependencies": { - "@cosmos-ui/vue": "^0.35.0", - "@vuepress/plugin-google-analytics": "1.7.1", - "algoliasearch": "^4.2.0", - "axios": "^0.21.0", - "cheerio": "^1.0.0-rc.3", - "clipboard-copy": "^3.1.0", - "entities": "2.1.0", - "esm": "^3.2.25", - "gray-matter": "^4.0.2", - "hotkeys-js": "3.8.1", - "jsonp": "^0.2.1", - "markdown-it": "^12.0.0", - "markdown-it-attrs": "^3.0.3", - "prismjs": "^1.22.0", - "pug": "^3.0.1", - "pug-plain-loader": "^1.0.0", - "stylus": "^0.54.8", - "stylus-loader": "^3.0.2", - "tiny-cookie": "^2.3.2", - "v-runtime-template": "^1.10.0", - "vuepress": "^1.5.4", - "vuepress-plugin-google-tag-manager": "0.0.5", - "vuepress-plugin-sitemap": "^2.3.1" - } - }, - "node_modules/watchpack": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.2.0.tgz", - "integrity": "sha512-up4YAn/XHgZHIxFBVCdlMiWDj6WaLKpwVeGQk2I5thdYxF/KmF0aaz6TfJZ/hfl1h/XlcDr7k1KH7ThDagpFaA==", - "dev": true, - "dependencies": { - "glob-to-regexp": "^0.4.1", - "graceful-fs": "^4.1.2" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/watchpack-chokidar2": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/watchpack-chokidar2/-/watchpack-chokidar2-2.0.1.tgz", - "integrity": "sha512-nCFfBIPKr5Sh61s4LPpy1Wtfi0HE8isJ3d2Yb5/Ppw2P2B/3eVSEBjKfN0fmHJSK14+31KwMKmcrzs2GM4P0Ww==", - "optional": true, - "dependencies": { - "chokidar": "^2.1.8" - } - }, - "node_modules/watchpack/node_modules/glob-to-regexp": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", - "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==", - "dev": true - }, - "node_modules/wbuf": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz", - "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==", - "dependencies": { - "minimalistic-assert": "^1.0.0" - } - }, - "node_modules/webidl-conversions": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-4.0.2.tgz", - "integrity": "sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg==" - }, - "node_modules/webpack": { - "version": "4.46.0", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-4.46.0.tgz", - "integrity": "sha512-6jJuJjg8znb/xRItk7bkT0+Q7AHCYjjFnvKIWQPkNIOyRqoCGvkOs0ipeQzrqz4l5FtN5ZI/ukEHroeX/o1/5Q==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/helper-module-context": "1.9.0", - "@webassemblyjs/wasm-edit": "1.9.0", - "@webassemblyjs/wasm-parser": "1.9.0", - "acorn": "^6.4.1", - "ajv": "^6.10.2", - "ajv-keywords": "^3.4.1", - "chrome-trace-event": "^1.0.2", - "enhanced-resolve": "^4.5.0", - "eslint-scope": "^4.0.3", - "json-parse-better-errors": "^1.0.2", - "loader-runner": "^2.4.0", - "loader-utils": "^1.2.3", - "memory-fs": "^0.4.1", - "micromatch": "^3.1.10", - "mkdirp": "^0.5.3", - "neo-async": "^2.6.1", - "node-libs-browser": "^2.2.1", - "schema-utils": "^1.0.0", - "tapable": "^1.1.3", - "terser-webpack-plugin": "^1.4.3", - "watchpack": "^1.7.4", - "webpack-sources": "^1.4.1" - }, - "bin": { - "webpack": "bin/webpack.js" - }, - "engines": { - "node": ">=6.11.5" - } - }, - "node_modules/webpack-chain": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/webpack-chain/-/webpack-chain-6.5.1.tgz", - "integrity": "sha512-7doO/SRtLu8q5WM0s7vPKPWX580qhi0/yBHkOxNkv50f6qB76Zy9o2wRTrrPULqYTvQlVHuvbA8v+G5ayuUDsA==", - "dependencies": { - "deepmerge": "^1.5.2", - "javascript-stringify": "^2.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/webpack-chain/node_modules/javascript-stringify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/javascript-stringify/-/javascript-stringify-2.0.1.tgz", - "integrity": "sha512-yV+gqbd5vaOYjqlbk16EG89xB5udgjqQF3C5FAORDg4f/IS1Yc5ERCv5e/57yBcfJYw05V5JyIXabhwb75Xxow==" - }, - "node_modules/webpack-dev-middleware": { - "version": "3.7.3", - "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-3.7.3.tgz", - "integrity": "sha512-djelc/zGiz9nZj/U7PTBi2ViorGJXEWo/3ltkPbDyxCXhhEXkW0ce99falaok4TPj+AsxLiXJR0EBOb0zh9fKQ==", - "dependencies": { - "memory-fs": "^0.4.1", - "mime": "^2.4.4", - "mkdirp": "^0.5.1", - "range-parser": "^1.2.1", - "webpack-log": "^2.0.0" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/webpack-dev-middleware/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/webpack-dev-server": { - "version": "3.11.2", - "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-3.11.2.tgz", - "integrity": "sha512-A80BkuHRQfCiNtGBS1EMf2ChTUs0x+B3wGDFmOeT4rmJOHhHTCH2naNxIHhmkr0/UillP4U3yeIyv1pNp+QDLQ==", - "dependencies": { - "ansi-html": "0.0.7", - "bonjour": "^3.5.0", - "chokidar": "^2.1.8", - "compression": "^1.7.4", - "connect-history-api-fallback": "^1.6.0", - "debug": "^4.1.1", - "del": "^4.1.1", - "express": "^4.17.1", - "html-entities": "^1.3.1", - "http-proxy-middleware": "0.19.1", - "import-local": "^2.0.0", - "internal-ip": "^4.3.0", - "ip": "^1.1.5", - "is-absolute-url": "^3.0.3", - "killable": "^1.0.1", - "loglevel": "^1.6.8", - "opn": "^5.5.0", - "p-retry": "^3.0.1", - "portfinder": "^1.0.26", - "schema-utils": "^1.0.0", - "selfsigned": "^1.10.8", - "semver": "^6.3.0", - "serve-index": "^1.9.1", - "sockjs": "^0.3.21", - "sockjs-client": "^1.5.0", - "spdy": "^4.0.2", - "strip-ansi": "^3.0.1", - "supports-color": "^6.1.0", - "url": "^0.11.0", - "webpack-dev-middleware": "^3.7.2", - "webpack-log": "^2.0.0", - "ws": "^6.2.1", - "yargs": "^13.3.2" - }, - "bin": { - "webpack-dev-server": "bin/webpack-dev-server.js" - }, - "engines": { - "node": ">= 6.11.5" - } - }, - "node_modules/webpack-dev-server/node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/webpack-dev-server/node_modules/is-absolute-url": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-3.0.3.tgz", - "integrity": "sha512-opmNIX7uFnS96NtPmhWQgQx6/NYFgsUXYMllcfzwWKUMwfo8kku1TvE6hkNcH+Q1ts5cMVrsY7j0bxXQDciu9Q==", - "engines": { - "node": ">=8" - } - }, - "node_modules/webpack-dev-server/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/webpack-dev-server/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/webpack-dev-server/node_modules/supports-color": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", - "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/webpack-log": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/webpack-log/-/webpack-log-2.0.0.tgz", - "integrity": "sha512-cX8G2vR/85UYG59FgkoMamwHUIkSSlV3bBMRsbxVXVUk2j6NleCKjQ/WE9eYg9WY4w25O9w8wKP4rzNZFmUcUg==", - "dependencies": { - "ansi-colors": "^3.0.0", - "uuid": "^3.3.2" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/webpack-merge": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-4.2.2.tgz", - "integrity": "sha512-TUE1UGoTX2Cd42j3krGYqObZbOD+xF7u28WB7tfUordytSjbWTIjK/8V0amkBfTYN4/pB/GIDlJZZ657BGG19g==", - "dependencies": { - "lodash": "^4.17.15" - } - }, - "node_modules/webpack-sources": { - "version": "1.4.3", - "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-1.4.3.tgz", - "integrity": "sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ==", - "dependencies": { - "source-list-map": "^2.0.0", - "source-map": "~0.6.1" - } - }, - "node_modules/webpack-sources/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/webpack/node_modules/acorn": { - "version": "6.4.2", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.2.tgz", - "integrity": "sha512-XtGIhXwF8YM8bJhGxG5kXgjkEuNGLTkoYqVE+KMR+aspr4KGYmKYg7yUe3KghyQ9yheNwLnjmzh/7+gfDBmHCQ==", - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/webpack/node_modules/anymatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz", - "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==", - "optional": true, - "dependencies": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/webpack/node_modules/binary-extensions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", - "optional": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/webpack/node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", - "optional": true, - "dependencies": { - "fill-range": "^7.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/webpack/node_modules/chokidar": { - "version": "3.5.1", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.1.tgz", - "integrity": "sha512-9+s+Od+W0VJJzawDma/gvBNQqkTiqYTWLuZoyAsivsI4AaWTCzHG06/TMjsf1cYe9Cb97UCEhjz7HvnPk2p/tw==", - "optional": true, - "dependencies": { - "anymatch": "~3.1.1", - "braces": "~3.0.2", - "fsevents": "~2.3.1", - "glob-parent": "~5.1.0", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.5.0" - }, - "engines": { - "node": ">= 8.10.0" - } - }, - "node_modules/webpack/node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", - "optional": true, - "dependencies": { - "to-regex-range": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/webpack/node_modules/fsevents": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", - "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" - } - }, - "node_modules/webpack/node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "optional": true, - "dependencies": { - "is-glob": "^4.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/webpack/node_modules/is-binary-path": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", - "optional": true, - "dependencies": { - "binary-extensions": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/webpack/node_modules/is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "optional": true, - "engines": { - "node": ">=0.12.0" - } - }, - "node_modules/webpack/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/webpack/node_modules/readdirp": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz", - "integrity": "sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ==", - "optional": true, - "dependencies": { - "picomatch": "^2.2.1" - }, - "engines": { - "node": ">=8.10.0" - } - }, - "node_modules/webpack/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/webpack/node_modules/to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "optional": true, - "dependencies": { - "is-number": "^7.0.0" - }, - "engines": { - "node": ">=8.0" - } - }, - "node_modules/webpack/node_modules/watchpack": { - "version": "1.7.5", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-1.7.5.tgz", - "integrity": "sha512-9P3MWk6SrKjHsGkLT2KHXdQ/9SNkyoJbabxnKOoJepsvJjJG8uYTR3yTPxPQvNDI3w4Nz1xnE0TLHK4RIVe/MQ==", - "dependencies": { - "chokidar": "^3.4.1", - "graceful-fs": "^4.1.2", - "neo-async": "^2.5.0", - "watchpack-chokidar2": "^2.0.1" - }, - "optionalDependencies": { - "chokidar": "^3.4.1", - "watchpack-chokidar2": "^2.0.1" - } - }, - "node_modules/webpackbar": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-3.2.0.tgz", - "integrity": "sha512-PC4o+1c8gWWileUfwabe0gqptlXUDJd5E0zbpr2xHP1VSOVlZVPBZ8j6NCR8zM5zbKdxPhctHXahgpNK1qFDPw==", - "dependencies": { - "ansi-escapes": "^4.1.0", - "chalk": "^2.4.1", - "consola": "^2.6.0", - "figures": "^3.0.0", - "pretty-time": "^1.1.0", - "std-env": "^2.2.1", - "text-table": "^0.2.0", - "wrap-ansi": "^5.1.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/websocket-driver": { - "version": "0.7.4", - "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz", - "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==", - "dependencies": { - "http-parser-js": ">=0.5.1", - "safe-buffer": ">=5.1.0", - "websocket-extensions": ">=0.1.1" - }, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/websocket-extensions": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz", - "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/whatwg-url": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-7.1.0.tgz", - "integrity": "sha512-WUu7Rg1DroM7oQvGWfOiAK21n74Gg+T4elXEQYkOhtyLeWiJFoOGLXPKI/9gzIie9CtwVLm8wtw6YJdKyxSjeg==", - "dependencies": { - "lodash.sortby": "^4.7.0", - "tr46": "^1.0.1", - "webidl-conversions": "^4.0.2" - } - }, - "node_modules/when": { - "version": "3.6.4", - "resolved": "https://registry.npmjs.org/when/-/when-3.6.4.tgz", - "integrity": "sha1-RztRfsFZ4rhQBUl6E5g/CVQS404=" - }, - "node_modules/which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "which": "bin/which" - } - }, - "node_modules/which-boxed-primitive": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz", - "integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==", - "dependencies": { - "is-bigint": "^1.0.1", - "is-boolean-object": "^1.1.0", - "is-number-object": "^1.0.4", - "is-string": "^1.0.5", - "is-symbol": "^1.0.3" - } - }, - "node_modules/which-module": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.0.tgz", - "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=" - }, - "node_modules/widest-line": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-3.1.0.tgz", - "integrity": "sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg==", - "dependencies": { - "string-width": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/widest-line/node_modules/ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/widest-line/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" - }, - "node_modules/widest-line/node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/widest-line/node_modules/string-width": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", - "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/widest-line/node_modules/strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dependencies": { - "ansi-regex": "^5.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/with": { - "version": "7.0.2", - "resolved": "https://registry.npmjs.org/with/-/with-7.0.2.tgz", - "integrity": "sha512-RNGKj82nUPg3g5ygxkQl0R937xLyho1J24ItRCBTr/m1YnZkzJy1hUiHUJrc/VlsDQzsCnInEGSg3bci0Lmd4w==", - "dependencies": { - "@babel/parser": "^7.9.6", - "@babel/types": "^7.9.6", - "assert-never": "^1.2.1", - "babel-walk": "3.0.0-canary-5" - }, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/worker-farm": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/worker-farm/-/worker-farm-1.7.0.tgz", - "integrity": "sha512-rvw3QTZc8lAxyVrqcSGVm5yP/IJ2UcB3U0graE3LCFoZ0Yn2x4EoVSqJKdB/T5M+FLcRPjz4TDacRf3OCfNUzw==", - "dependencies": { - "errno": "~0.1.7" - } - }, - "node_modules/wrap-ansi": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", - "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", - "dependencies": { - "ansi-styles": "^3.2.0", - "string-width": "^3.0.0", - "strip-ansi": "^5.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/wrap-ansi/node_modules/ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/wrap-ansi/node_modules/strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", - "dependencies": { - "ansi-regex": "^4.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" - }, - "node_modules/write-file-atomic": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", - "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", - "dependencies": { - "imurmurhash": "^0.1.4", - "is-typedarray": "^1.0.0", - "signal-exit": "^3.0.2", - "typedarray-to-buffer": "^3.1.5" - } - }, - "node_modules/ws": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.1.tgz", - "integrity": "sha512-GIyAXC2cB7LjvpgMt9EKS2ldqr0MTrORaleiOno6TweZ6r3TKtoFQWay/2PceJ3RuBasOHzXNn5Lrw1X0bEjqA==", - "dependencies": { - "async-limiter": "~1.0.0" - } - }, - "node_modules/xdg-basedir": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-4.0.0.tgz", - "integrity": "sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==", - "engines": { - "node": ">=8" - } - }, - "node_modules/xmlbuilder": { - "version": "13.0.2", - "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-13.0.2.tgz", - "integrity": "sha512-Eux0i2QdDYKbdbA6AM6xE4m6ZTZr4G4xF9kahI2ukSEMCzwce2eX9WlTI5J3s+NU7hpasFsr8hWIONae7LluAQ==", - "engines": { - "node": ">=6.0" - } - }, - "node_modules/xtend": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", - "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", - "engines": { - "node": ">=0.4" - } - }, - "node_modules/y18n": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz", - "integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==" - }, - "node_modules/yallist": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", - "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==" - }, - "node_modules/yargs": { - "version": "13.3.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.2.tgz", - "integrity": "sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==", - "dependencies": { - "cliui": "^5.0.0", - "find-up": "^3.0.0", - "get-caller-file": "^2.0.1", - "require-directory": "^2.1.1", - "require-main-filename": "^2.0.0", - "set-blocking": "^2.0.0", - "string-width": "^3.0.0", - "which-module": "^2.0.0", - "y18n": "^4.0.0", - "yargs-parser": "^13.1.2" - } - }, - "node_modules/yargs-parser": { - "version": "13.1.2", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.2.tgz", - "integrity": "sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg==", - "dependencies": { - "camelcase": "^5.0.0", - "decamelize": "^1.2.0" - } - }, - "node_modules/yargs-parser/node_modules/camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/yargs/node_modules/find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dependencies": { - "locate-path": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/yargs/node_modules/locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/yargs/node_modules/p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dependencies": { - "p-limit": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/yargs/node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "engines": { - "node": ">=4" - } - }, - "node_modules/zepto": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/zepto/-/zepto-1.2.0.tgz", - "integrity": "sha1-4Se9nmb9hGvl6rSME5SIL3wOT5g=" - } - }, "dependencies": { "@algolia/cache-browser-local-storage": { "version": "4.8.6", @@ -15492,17 +2959,6 @@ "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.1.tgz", "integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw==" }, - "clipboard": { - "version": "2.0.8", - "resolved": "https://registry.npmjs.org/clipboard/-/clipboard-2.0.8.tgz", - "integrity": "sha512-Y6WO0unAIQp5bLmk1zdThRhgJt/x3ks6f30s3oE3H1mgIEU33XyQjEf8gsf6DxC7NPX8Y1SsNWjUjL/ywLnnbQ==", - "optional": true, - "requires": { - "good-listener": "^1.2.2", - "select": "^1.1.2", - "tiny-emitter": "^2.0.0" - } - }, "clipboard-copy": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/clipboard-copy/-/clipboard-copy-3.2.0.tgz", @@ -16408,12 +3864,6 @@ "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=" }, - "delegate": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/delegate/-/delegate-3.2.0.tgz", - "integrity": "sha512-IofjkYBZaZivn0V8nnsMJGBr4jVLxHDheKSW88PyxS5QC4Vo9ZbZVvhzlSxY87fVq3STR6r+4cGepyHkcWOQSw==", - "optional": true - }, "depd": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", @@ -17396,15 +4846,6 @@ "slash": "^2.0.0" } }, - "good-listener": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/good-listener/-/good-listener-1.2.2.tgz", - "integrity": "sha1-1TswzfkxPf+33JoNR3CWqm0UXFA=", - "optional": true, - "requires": { - "delegate": "^3.1.2" - } - }, "got": { "version": "9.6.0", "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz", @@ -19395,9 +6836,9 @@ "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=" }, "path-parse": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz", - "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==" + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" }, "path-to-regexp": { "version": "0.1.7", @@ -19436,6 +6877,11 @@ "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" }, + "picocolors": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-0.2.1.tgz", + "integrity": "sha512-cMlDqaLEqfSaW8Z7N5Jw+lyIW869EzT73/F5lhtY9cLGoVxSXznfgfXMO0Z5K0o0Q2TkTXq+0KFsdnSe3jDViA==" + }, "picomatch": { "version": "2.2.3", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.3.tgz", @@ -19507,27 +6953,18 @@ "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=" }, "postcss": { - "version": "7.0.35", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.35.tgz", - "integrity": "sha512-3QT8bBJeX/S5zKTTjTCIjRF3If4avAT6kqxcASlTWEtAFCb9NH0OUxNDfgZSWdP5fJnBYCMEWkIFfWeugjzYMg==", + "version": "7.0.39", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.39.tgz", + "integrity": "sha512-yioayjNbHn6z1/Bywyb2Y4s3yvDAeXGOyxqD+LnVOinq6Mdmd++SW2wUNVzavyyHxd6+DxzWGIuosg6P1Rj8uA==", "requires": { - "chalk": "^2.4.2", - "source-map": "^0.6.1", - "supports-color": "^6.1.0" + "picocolors": "^0.2.1", + "source-map": "^0.6.1" }, "dependencies": { "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" - }, - "supports-color": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", - "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", - "requires": { - "has-flag": "^3.0.0" - } } } }, @@ -20077,12 +7514,9 @@ "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==" }, "prismjs": { - "version": "1.23.0", - "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.23.0.tgz", - "integrity": "sha512-c29LVsqOaLbBHuIbsTxaKENh1N2EQBOHaWv7gkHN4dgRbxSREqDnDbtFJYdpPauS4YCplMSNCABQ6Eeor69bAA==", - "requires": { - "clipboard": "^2.0.0" - } + "version": "1.25.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.25.0.tgz", + "integrity": "sha512-WCjJHl1KEWbnkQom1+SzftbtXMKQoezOCYs5rECqMN+jP+apI7ftoflyqigqzopSO3hMhTEb0mFClA8lkolgEg==" }, "process": { "version": "0.11.10", @@ -20841,12 +8275,6 @@ "kind-of": "^6.0.0" } }, - "select": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/select/-/select-1.1.2.tgz", - "integrity": "sha1-DnNQrN7ICxEIUoeG7B1EGNEbOW0=", - "optional": true - }, "select-hose": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", @@ -21477,14 +8905,6 @@ "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz", "integrity": "sha1-J5siXfHVgrH1TmWt3UNS4Y+qBxM=" }, - "string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "requires": { - "safe-buffer": "~5.1.0" - } - }, "string-width": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", @@ -21528,6 +8948,14 @@ "define-properties": "^1.1.3" } }, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "requires": { + "safe-buffer": "~5.1.0" + } + }, "strip-ansi": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", @@ -21864,12 +9292,6 @@ "resolved": "https://registry.npmjs.org/tiny-cookie/-/tiny-cookie-2.3.2.tgz", "integrity": "sha512-qbymkVh+6+Gc/c9sqnvbG+dOHH6bschjphK3SHgIfT6h/t+63GBL37JXNoXEc6u/+BcwU6XmaWUuf19ouLVtPg==" }, - "tiny-emitter": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/tiny-emitter/-/tiny-emitter-2.1.0.tgz", - "integrity": "sha512-NB6Dk1A9xgQPMoGqC5CVXn123gWyte215ONT5Pp5a0yt4nlEoO1ZWeCwpncaekPHXO60i47ihFnZPiRPjRMq4Q==", - "optional": true - }, "to-arraybuffer": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz", @@ -22318,9 +9740,9 @@ } }, "url-parse": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.1.tgz", - "integrity": "sha512-HOfCOUJt7iSYzEx/UqgtwKRMC6EU91NFhsCHMv9oM03VJcVo2Qrp8T8kI9D7amFf1cu+/3CEhgb3rF9zL7k85Q==", + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.3.tgz", + "integrity": "sha512-IIORyIQD9rvj0A4CLWsHkBBJuNqWpFQe224b6j9t/ABmquIS0qDU2pY6kl6AuOrL5OkCXHMCFNe1jBcuAggjvQ==", "requires": { "querystringify": "^2.1.1", "requires-port": "^1.0.0" @@ -23173,9 +10595,9 @@ } }, "ws": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.1.tgz", - "integrity": "sha512-GIyAXC2cB7LjvpgMt9EKS2ldqr0MTrORaleiOno6TweZ6r3TKtoFQWay/2PceJ3RuBasOHzXNn5Lrw1X0bEjqA==", + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.2.tgz", + "integrity": "sha512-zmhltoSR8u1cnDsD43TX59mzoMZsLKqUweyYBAIvTngR3shc0W6aOZylZmq/7hqyVxPdi+5Ud2QInblgyE72fw==", "requires": { "async-limiter": "~1.0.0" } diff --git a/docs/rfc/README.md b/docs/rfc/README.md index 3af5a33e8..a66969042 100644 --- a/docs/rfc/README.md +++ b/docs/rfc/README.md @@ -42,5 +42,7 @@ sections. - [RFC-002: Interprocess Communication](./rfc-002-ipc-ecosystem.md) - [RFC-003: Performance Taxonomy](./rfc-003-performance-questions.md) - [RFC-004: E2E Test Framework Enhancements](./rfc-004-e2e-framework.md) +- [RFC-005: Event System](./rfc-005-event-system.rst) +- [RFC-006: Event Subscription](./rfc-006-event-subscription.md) diff --git a/docs/rfc/rfc-000-p2p-roadmap.rst b/docs/rfc/rfc-000-p2p-roadmap.rst index 64dda773e..dc9b54c7f 100644 --- a/docs/rfc/rfc-000-p2p-roadmap.rst +++ b/docs/rfc/rfc-000-p2p-roadmap.rst @@ -40,15 +40,15 @@ Critique of Current Peer-to-Peer Infrastructure The current (refactored) P2P stack is an improvement on the previous iteration (legacy), but as of 0.35, there remains room for improvement in the design and -implementation of the P2P layer. +implementation of the P2P layer. Some limitations of the current stack include: - heavy reliance on buffering to avoid backups in the flow of components, which is fragile to maintain and can lead to unexpected memory usage patterns and forces the routing layer to make decisions about when messages - should be discarded. - + should be discarded. + - the current p2p stack relies on convention (rather than the compiler) to enforce the API boundaries and conventions between reactors and the router, making it very easy to write "wrong" reactor code or introduce a bad @@ -64,7 +64,7 @@ Some limitations of the current stack include: difficult to expose that information to monitoring/observability tools. This general opacity also makes it difficult to interact with the peer system from other areas of the code base (e.g. tests, reactors). - + - the legacy stack provided some control to operators to force the system to dial new peers or seed nodes or manipulate the topology of the system _in situ_. The current stack can't easily provide this, and while the new stack @@ -94,16 +94,16 @@ blocksync and consensus) from procedure calls to message passing. This is a relatively simple change and could be implemented with the following components: -- a constant to represent "local" delivery as the ``To``` field on +- a constant to represent "local" delivery as the ``To`` field on ``p2p.Envelope``. - + - special path for routing local messages that doesn't require message serialization (protobuf marshalling/unmarshaling). - + Adding these semantics, particularly if in conjunction with synchronous semantics provides a solution to dependency graph problems currently present in the Tendermint codebase, which will simplify development, make it possible -to isolate components for testing. +to isolate components for testing. Eventually, this will also make it possible to have a logical Tendermint node running in multiple processes or in a collection of containers, although the @@ -129,7 +129,7 @@ of request/response ID to allow identifying out-of-order responses over a single connection. Additionally, expanded the programming model of the ``p2p.Channel`` to accommodate some kind of _future_ or similar paradigm to make it viable to write reactor code without needing for the reactor developer -to wrestle with lower level concurency constructs. +to wrestle with lower level concurrency constructs. Timeout Handling (QoS) @@ -143,7 +143,7 @@ detect or attribute. Additionally, the current system provides three main parameters to control quality of service: - buffer sizes for channels and queues. - + - priorities for channels - queue implementation details for shedding load. @@ -151,13 +151,13 @@ parameters to control quality of service: These end up being quite coarse controls, and changing the settings are difficult because as the queues and channels are able to buffer large numbers of messages it can be hard to see the impact of a given change, particularly -in our extant test environment. In general, we should endeavor to: +in our extant test environment. In general, we should endeavor to: - set real timeouts, via contexts, on most message send operations, so that senders rather than queues can be responsible for timeout logic. Additionally, this will make it possible to avoid sending messages during shutdown. - + - reduce (to the greatest extent possible) the amount of buffering in channels and the queues, to more readily surface backpressure and reduce the potential for buildup of stale messages. @@ -173,8 +173,8 @@ transport types and makes it more likely that message-based caching and rate limiting will be implemented at the transport layer rather than at a more appropriate level. -The transport then, would be responsible for negitating the connection and the -handshake and otherwise behave like a socket/file discriptor with ``Read` and +The transport then, would be responsible for negotiating the connection and the +handshake and otherwise behave like a socket/file descriptor with ``Read`` and ``Write`` methods. While this was included in the initial design for the new P2P layer, it may be @@ -185,7 +185,7 @@ Service Discovery ~~~~~~~~~~~~~~~~~ In the current system, Tendermint assumes that all nodes in a network are -largely equivelent, and nodes tend to be "chatty" making many requests of +largely equivalent, and nodes tend to be "chatty" making many requests of large numbers of peers and waiting for peers to (hopefully) respond. While this works and has allowed Tendermint to get to a certain point, this both produces a theoretical scaling bottle neck and makes it harder to test and @@ -194,7 +194,7 @@ verify components of the system. In addition to peer's identity and connection information, peers should be able to advertise a number of services or capabilities, and node operators or developers should be able to specify peer capability requirements (e.g. target -at least -percent of peers with capability.) +at least -percent of peers with capability.) These capabilities may be useful in selecting peers to send messages to, it may make sense to extend Tendermint's message addressing capability to allow @@ -215,7 +215,7 @@ Continued Homegrown Implementation The current peer system is homegrown and is conceptually compatible with the needs of the project, and while there are limitations to the system, the p2p layer is not (currently as of 0.35) a major source of bugs or friction during -development. +development. However, the current implementation makes a number of allowances for interoperability, and there are a collection of iterative improvements that @@ -228,18 +228,18 @@ implementation, upcoming work would include: connections using different protocols (e.g. QUIC, etc.) - entirely remove the constructs and implementations of the legacy peer - implementation. - + implementation. + - establish and enforce clearer chains of responsibility for connection establishment (e.g. handshaking, setup,) which is currently shared between - three components. + three components. - report better metrics regarding the into the state of peers and network connectivity, which are opaque outside of the system. This is constrained at the moment as a side effect of the split responsibility for connection establishment. - -- extend the PEX system to include service information so that ndoes in the + +- extend the PEX system to include service information so that nodes in the network weren't necessarily homogeneous. While maintaining a bespoke peer management layer would seem to distract from @@ -272,20 +272,20 @@ case that our internal systems need to know much less about peers than otherwise specified. Similarly, the current system has a notion of peer scoring that cannot be communicated to libp2p, which may be fine as this is only used to support peer exchange (PEX,) which would become a property libp2p -and not expressed in it's current higher-level form. +and not expressed in it's current higher-level form. -In general, the effort to switch to libp2p would involve: +In general, the effort to switch to libp2p would involve: - timing it during an appropriate protocol-breaking window, as it doesn't seem - viable to support both libp2p *and* the current p2p protocol. - + viable to support both libp2p *and* the current p2p protocol. + - providing some in-memory testing network to support the use case that the current ``p2p.MemoryNetwork`` provides. - re-homing the ``p2p.Router`` implementation on top of libp2p components to be able to maintain the current reactor implementations. - -Open question include: + +Open question include: - how much local buffering should we be doing? It sort of seems like we should figure out what the expected behavior is for libp2p for QoS-type @@ -302,7 +302,7 @@ Open question include: - how do efforts to select "the best" (healthy, close, well-behaving, etc.) peers work out if Tendermint is not maintaining a local peer database? - + - would adding additional higher level semantics (internal message passing, request/response pairs, service discovery, etc.) facilitate removing some of the direct linkages between constructs/components in the system and reduce @@ -311,6 +311,6 @@ Open question include: References ---------- -- `Tracking Ticket for P2P Refactor Project `_ +- `Tracking Ticket for P2P Refactor Project `_ - `ADR 61: P2P Refactor Scope <../architecture/adr-061-p2p-refactor-scope.md>`_ - `ADR 62: P2P Architecture and Abstraction <../architecture/adr-061-p2p-architecture.md>`_ diff --git a/docs/rfc/rfc-005-event-system.rst b/docs/rfc/rfc-005-event-system.rst new file mode 100644 index 000000000..b4a00b43d --- /dev/null +++ b/docs/rfc/rfc-005-event-system.rst @@ -0,0 +1,122 @@ +===================== +RFC 005: Event System +===================== + +Changelog +--------- + +- 2021-09-17: Initial Draft (@tychoish) + +Abstract +-------- + +The event system within Tendermint, which supports a lot of core +functionality, also represents a major infrastructural liability. As part of +our upcoming review of the RPC interfaces and our ongoing thoughts about +stability and performance, as well as the preparation for Tendermint 1.0, we +should revisit the design and implementation of the event system. This +document discusses both the current state of the system and potential +directions for future improvement. + +Background +---------- + +Current State of Events +~~~~~~~~~~~~~~~~~~~~~~~ + +The event system makes it possible for clients, both internal and external, +to receive notifications of state replication events, such as new blocks, +new transactions, validator set changes, as well as intermediate events during +consensus. Because the event system is very cross cutting, the behavior and +performance of the event publication and subscription system has huge impacts +for all of Tendermint. + +The subscription service is exposed over the RPC interface, but also powers +the indexing (e.g. to an external database,) and is the mechanism by which +`BroadcastTxCommit` is able to wait for transactions to land in a block. + +The current pubsub mechanism relies on a couple of buffered channels, +primarily between all event creators and subscribers, but also for each +subscription. The result of this design is that, in some situations with the +right collection of slow subscription consumers the event system can put +backpressure on the consensus state machine and message gossiping in the +network, thereby causing nodes to lag. + +Improvements +~~~~~~~~~~~~ + +The current system relies on implicit, bounded queues built by the buffered channels, +and though threadsafe, can force all activity within Tendermint to serialize, +which does not need to happen. Additionally, timeouts for subscription +consumers related to the implementation of the RPC layer, may complicate the +use of the system. + +References +~~~~~~~~~~ + +- Legacy Implementation + - `publication of events `_ + - `send operation `_ + - `send loop `_ +- Related RFCs + - `RFC 002: IPC Ecosystem <./rfc-002-ipc-ecosystem.md>`_ + - `RFC 003: Performance Questions <./rfc-003-performance-questions.md>`_ + +Discussion +---------- + +Changes to Published Events +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +As part of this process, the Tendermint team should do a study of the existing +event types and ensure that there are viable production use cases for +subscriptions to all event types. Instinctively it seems plausible that some +of the events may not be useable outside of tendermint, (e.g. ``TimeoutWait`` +or ``NewRoundStep``) and it might make sense to remove them. Certainly, it +would be good to make sure that we don't maintain infrastructure for unused or +un-useful message indefinitely. + +Blocking Subscription +~~~~~~~~~~~~~~~~~~~~~ + +The blocking subscription mechanism makes it possible to have *send* +operations into the subscription channel be un-buffered (the event processing +channel is still buffered.) In the blocking case, events from one subscription +can block processing that event for other non-blocking subscriptions. The main +case, it seems for blocking subscriptions is ensuring that a transaction has +been committed to a block for ``BroadcastTxCommit``. Removing blocking +subscriptions entirely, and potentially finding another way to implement +``BroadcastTxCommit``, could lead to important simplifications and +improvements to throughput without requiring large changes. + +Subscription Identification +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Before `#6386 `_, all +subscriptions were identified by the combination of a client ID and a query, +and with that change, it became possible to identify all subscription given +only an ID, but compatibility with the legacy identification means that there's a +good deal of legacy code as well as client side efficiency that could be +improved. + +Pubsub Changes +~~~~~~~~~~~~~~ + +The pubsub core should be implemented in a way that removes the possibility of +backpressure from the event system to impact the core system *or* for one +subscription to impact the behavior of another area of the +system. Additionally, because the current system is implemented entirely in +terms of a collection of buffered channels, the event system (and large +numbers of subscriptions) can be a source of memory pressure. + +These changes could include: + +- explicit cancellation and timeouts promulgated from callers (e.g. RPC end + points, etc,) this should be done using contexts. + +- subscription system should be able to spill to disk to avoid putting memory + pressure on the core behavior of the node (consensus, gossip). + +- subscriptions implemented as cursors rather than channels, with either + condition variables to simulate the existing "push" API or a client side + iterator API with some kind of long polling-type interface. diff --git a/docs/rfc/rfc-006-event-subscription.md b/docs/rfc/rfc-006-event-subscription.md new file mode 100644 index 000000000..4372f8d28 --- /dev/null +++ b/docs/rfc/rfc-006-event-subscription.md @@ -0,0 +1,204 @@ +# RFC 006: Event Subscription + +## Changelog + +- 30-Oct-2021: Initial draft (@creachadair) + +## Abstract + +The Tendermint consensus node allows clients to subscribe to its event stream +via methods on its RPC service. The ability to view the event stream is +valuable for clients, but the current implementation has some deficiencies that +make it difficult for some clients to use effectively. This RFC documents these +issues and discusses possible approaches to solving them. + + +## Background + +A running Tendermint consensus node exports a [JSON-RPC service][rpc-service] +that provides a [large set of methods][rpc-methods] for inspecting and +interacting with the node. One important cluster of these methods are the +`subscribe`, `unsubscribe`, and `unsubscribe_all` methods, which permit clients +to subscribe to a filtered stream of the [events generated by the node][events] +as it runs. + +Unlike the other methods of the service, the methods in the "event +subscription" cluster are not accessible via [ordinary HTTP GET or POST +requests][rpc-transport], but require upgrading the HTTP connection to a +[websocket][ws]. This is necessary because the `subscribe` request needs a +persistent channel to deliver results back to the client, and an ordinary HTTP +connection does not reliably persist across multiple requests. Since these +methods do not work properly without a persistent channel, they are _only_ +exported via a websocket connection, and are not routed for plain HTTP. + + +## Discussion + +There are some operational problems with the current implementation of event +subscription in the RPC service: + +- **Event delivery is not valid JSON-RPC.** When a client issues a `subscribe` + request, the server replies (correctly) with an initial empty acknowledgement + (`{}`). After that, each matching event is delivered "unsolicited" (without + another request from the client), as a separate [response object][json-response] + with the same ID as the initial request. + + This matters because it means a standard JSON-RPC client library can't + interact correctly with the event subscription mechanism. + + Even for clients that can handle unsolicited values pushed by the server, + these responses are invalid: They have an ID, so they cannot be treated as + [notifications][json-notify]; but the ID corresponds to a request that was + already completed. In practice, this means that general-purpose JSON-RPC + libraries cannot use this method correctly -- it requires a custom client. + + The Go RPC client from the Tendermint core can support this case, but clients + in other languages have no easy solution. + + This is the cause of issue [#2949][issue2949]. + +- **Subscriptions are terminated by disconnection.** When the connection to the + client is interrupted, the subscription is silently dropped. + + This is a reasonable behavior, but it matters because a client whose + subscription is dropped gets no useful error feedback, just a closed + connection. Should they try again? Is the node overloaded? Was the client + too slow? Did the caller forget to respond to pings? Debugging these kinds + of failures is unnecessarily painful. + + Websockets compound this, because websocket connections time out if no + traffic is seen for a while, and keeping them alive requires active + cooperation between the client and server. With a plain TCP socket, liveness + is handled transparently by the keepalive mechanism. On a websocket, + however, one side has to occasionally send a PING (if the connection is + otherwise idle). The other side must return a matching PONG in time, or the + connection is dropped. Apart from being tedious, this is highly susceptible + to CPU load. + + The Tendermint Go implementation automatically sends and responds to pings. + Clients in other languages (or not wanting to use the Tendermint libraries) + need to handle it explicitly. This burdens the client for no practical + benefit: A subscriber has no information about when matching events may be + available, so it shouldn't have to participate in keeping the connection + alive. + +- **Mismatched load profiles.** Most of the RPC service is mainly important for + low-volume local use, either by the application the node serves (e.g., the + ABCI methods) or by the node operator (e.g., the info methods). Event + subscription is important for remote clients, and may represent a much higher + volume of traffic. + + This matters because both are using the same JSON-RPC mechanism. For + low-volume local use, the ergonomics of JSON-RPC are a good fit: It's easy to + issue queries from the command line (e.g., using `curl`) or to write scripts + that call the RPC methods to monitor the running node. + + For high-volume remote use, JSON-RPC is not such a good fit: Even leaving + aside the non-standard delivery protocol mentioned above, the time and memory + cost of encoding event data matters for the stability of the node when there + can be potentially hundreds of subscribers. Moreover, a subscription is + long-lived compared to most RPC methods, in that it may persist as long the + node is active. + +- **Mismatched security profiles.** The RPC service exports several methods + that should not be open to arbitrary remote callers, both for correctness + reasons (e.g., `remove_tx` and `broadcast_tx_*`) and for operational + stability reasons (e.g., `tx_search`). A node may still need to expose + events, however, to support UI tools. + + This matters, because all the methods share the same network endpoint. While + it is possible to block the top-level GET and POST handlers with a proxy, + exposing the `/websocket` handler exposes not _only_ the event subscription + methods, but the rest of the service as well. + +### Possible Improvements + +There are several things we could do to improve the experience of developers +who need to subscribe to events from the consensus node. These are not all +mutually exclusive. + +1. **Split event subscription into a separate service**. Instead of exposing + event subscription on the same endpoint as the rest of the RPC service, + dedicate a separate endpoint on the node for _only_ event subscription. The + rest of the RPC services (_sans_ events) would remain as-is. + + This would make it easy to disable or firewall outside access to sensitive + RPC methods, without blocking access to event subscription (and vice versa). + This is probably worth doing, even if we don't take any of the other steps + described here. + +2. **Use a different protocol for event subscription.** There are various ways + we could approach this, depending how much we're willing to shake up the + current API. Here are sketches of a few options: + + - Keep the websocket, but rework the API to be more JSON-RPC compliant, + perhaps by converting event delivery into notifications. This is less + up-front change for existing clients, but retains all of the existing + implementation complexity, and doesn't contribute much toward more serious + performance and UX improvements later. + + - Switch from websocket to plain HTTP, and rework the subscription API to + use a more conventional request/response pattern instead of streaming. + This is a little more up-front work for existing clients, but leverages + better library support for clients not written in Go. + + The protocol would become more chatty, but we could mitigate that with + batching, and in return we would get more control over what to do about + slow clients: Instead of simply silently dropping them, as we do now, we + could drop messages and signal the client that they missed some data ("M + dropped messages since your last poll"). + + This option is probably the best balance between work, API change, and + benefit, and has a nice incidental effect that it would be easier to debug + subscriptions from the command-line, like the other RPC methods. + + - Switch to gRPC: Preserves a persistent connection and gives us a more + efficient binary wire format (protobuf), at the cost of much more work for + clients and harder debugging. This may be the best option if performance + and server load are our top concerns. + + Given that we are currently using JSON-RPC, however, I'm not convinced the + costs of encoding and sending messages on the event subscription channel + are the limiting factor on subscription efficiency, however. + +3. **Delegate event subscriptions to a proxy.** Give responsibility for + managing event subscription to a proxy that runs separately from the node, + and switch the node to push events to the proxy (like a webhook) instead of + serving subscribers directly. This is more work for the operator (another + process to configure and run) but may scale better for big networks. + + I mention this option for completeness, but making this change would be a + fairly substantial project. If we want to consider shifting responsibility + for event subscription outside the node anyway, we should probably be more + systematic about it. For a more principled approach, see point (4) below. + +4. **Move event subscription downstream of indexing.** We are already planning + to give applications more control over event indexing. By extension, we + might allow the application to also control how events are filtered, + queried, and subscribed. Having the application control these concerns, + rather than the node, might make life easier for developers building UI and + tools for that application. + + This is a much larger change, so I don't think it is likely to be practical + in the near-term, but it's worth considering as a broader option. Some of + the existing code for filtering and selection could be made more reusable, + so applications would not need to reinvent everything. + + +## References + +- [Tendermint RPC service][rpc-service] +- [Tendermint RPC routes][rpc-methods] +- [Discussion of the event system][events] +- [Discussion about RPC transport options][rpc-transport] (from RFC 002) +- [RFC 6455: The websocket protocol][ws] +- [JSON-RPC 2.0 Specification](https://www.jsonrpc.org/specification) + +[rpc-service]: https://docs.tendermint.com/master/rpc/ +[rpc-methods]: https://github.com/tendermint/tendermint/blob/master/internal/rpc/core/routes.go#L12 +[events]: ./rfc-005-event-system.rst +[rpc-transport]: ./rfc-002-ipc-ecosystem.md#rpc-transport +[ws]: https://datatracker.ietf.org/doc/html/rfc6455 +[json-response]: https://www.jsonrpc.org/specification#response_object +[json-notify]: https://www.jsonrpc.org/specification#notification +[issue2949]: https://github.com/tendermint/tendermint/issues/2949 diff --git a/docs/roadmap/README.md b/docs/roadmap/README.md new file mode 100644 index 000000000..bd9280c45 --- /dev/null +++ b/docs/roadmap/README.md @@ -0,0 +1,6 @@ +--- +order: false +parent: + title: Roadmap + order: 7 +--- \ No newline at end of file diff --git a/docs/roadmap/roadmap.md b/docs/roadmap/roadmap.md new file mode 100644 index 000000000..19d9d89fb --- /dev/null +++ b/docs/roadmap/roadmap.md @@ -0,0 +1,97 @@ +--- +order: 1 +--- + +# Tendermint Roadmap + +*Last Updated: Friday 8 October 2021* + +This document endeavours to inform the wider Tendermint community about development plans and priorities for Tendermint Core, and when we expect features to be delivered. It is intended to broadly inform all users of Tendermint, including application developers, node operators, integrators, and the engineering and research teams. + +Anyone wishing to propose work to be a part of this roadmap should do so by opening an [issue](https://github.com/tendermint/spec/issues/new/choose) in the spec. Bug reports and other implementation concerns should be brought up in the [core repository](https://github.com/tendermint/tendermint). + +This roadmap should be read as a high-level guide to plans and priorities, rather than a commitment to schedules and deliverables. Features earlier on the roadmap will generally be more specific and detailed than those later on. We will update this document periodically to reflect the current status. + +The upgrades are split into two components: **Epics**, the features that define a release and to a large part dictate the timing of releases; and **minors**, features of smaller scale and lower priority, that could land in neighboring releases. + +## V0.35 (completed Q3 2021) + +### Prioritized Mempool + +Transactions were previously added to blocks in the order with which they arrived to the mempool. Adding a priority field via `CheckTx` gives applications more control over which transactions make it into a block. This is important in the presence of transaction fees. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-067-mempool-refactor.md) + +### Refactor of the P2P Framework + +The Tendermint P2P system is undergoing a large redesign to improve its performance and reliability. The first phase of this redesign is included in 0.35. This phase cleans and decouples abstractions, improves peer lifecycle management, peer address handling and enables pluggable transports. It is implemented to be protocol-compatible with the previous implementation. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-062-p2p-architecture.md) + +### State Sync Improvements + +Following the initial version of state sync, several improvements have been made. These include the addition of [Reverse Sync](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-068-reverse-sync.md) needed for evidence handling, the introduction of a [P2P State Provider](https://github.com/tendermint/tendermint/pull/6807) as an alternative to RPC endpoints, new configuration parameters to adjust throughput, and several bug fixes. + +### Custom event indexing + PSQL Indexer + +Added a new `EventSink` interface to allow alternatives to Tendermint's proprietary transaction indexer. We also added a PostgreSQL Indexer implementation, allowing rich SQL-based index queries. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-065-custom-event-indexing.md) + +### Minor Works + +- Several Go packages were reorganized to make the distinction between public APIs and implementation details more clear. +- Block indexer to index begin-block and end-block events. [More](https://github.com/tendermint/tendermint/pull/6226) +- Block, state, evidence, and light storage keys were reworked to preserve lexicographic order. This change requires a database migration. [More](https://github.com/tendermint/tendermint/pull/5771) +- Introduciton of Tendermint modes. Part of this change includes the possibility to run a separate seed node that runs the PEX reactor only. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) + +## V0.36 (expected Q1 2022) + +### ABCI++ + +An overhaul of the existing interface between the application and consensus, to give the application more control over block construction. ABCI++ adds new hooks allowing modification of transactions before they get into a block, verification of a block before voting, injection of signed information into votes, and more compact delivery of blocks after agreement (to allow for concurrent execution). [More](https://github.com/tendermint/spec/blob/master/rfc/004-abci%2B%2B.md) + +### Proposer-Based Timestamps + +Proposer-based timestamps are a replacement of [BFT time](https://docs.tendermint.com/master/spec/consensus/bft-time.html), whereby the proposer chooses a timestamp and validators vote on the block only if the timestamp is considered *timely*. This increases reliance on an accurate local clock, but in exchange makes block time more reliable and resistant to faults. This has important use cases in light clients, IBC relayers, CosmosHub inflation and enabling signature aggregation. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-071-proposer-based-timestamps.md) + +### Soft Upgrades + +We are working on a suite of tools and patterns to make it easier for both node operators and application developers to quickly and safely upgrade to newer versions of Tendermint. [More](https://github.com/tendermint/spec/pull/222) + +### Minor Works + +- Remove the "legacy" P2P framework, and clean up of P2P package. [More](https://github.com/tendermint/tendermint/issues/5670) +- Remove the global mutex from the local ABCI client to enable application-controlled concurrency. [More](https://github.com/tendermint/tendermint/issues/7073) +- Enable P2P support for light clients +- Node orchestration of services + Node initialization and composibility +- Remove redundancy in several data structures. Remove unused components such as the block sync v2 reactor, gRPC in the RPC layer, and the socket-based remote signer. +- Improve node visibility by introducing more metrics + +## V0.37 (expected Q3 2022) + +### Complete P2P Refactor + +Finish the final phase of the P2P system. Ongoing research and planning is taking place to decide whether to adopt [libp2p](https://libp2p.io/), alternative transports to `MConn` such as [QUIC](https://en.wikipedia.org/wiki/QUIC) and handshake/authentication protocols such as [Noise](https://noiseprotocol.org/). Research into more advanced gossiping techniques. + +### Streamline Storage Engine + +Tendermint currently has an abstraction to allow support for multiple database backends. This generality incurs maintenance overhead and interferes with application-specific optimizations that Tendermint could use (ACID guarantees, etc.). We plan to converge on a single database and streamline the Tendermint storage engine. [More](https://github.com/tendermint/tendermint/pull/6897) + +### Evaluate Interprocess Communication + +Tendermint nodes currently have multiple areas of communication with other processes (ABCI, remote-signer, P2P, JSONRPC, websockets, events as examples). Many of these have multiple implementations in which a single suffices. Consolidate and clean up IPC. [More](https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-002-ipc-ecosystem.md) + +### Minor Works + +- Amnesia attack handling. [More](https://github.com/tendermint/tendermint/issues/5270) +- Remove / Update Consensus WAL. [More](https://github.com/tendermint/tendermint/issues/6397) +- Signature Aggregation. [More](https://github.com/tendermint/tendermint/issues/1319) +- Remove gogoproto dependency. [More](https://github.com/tendermint/tendermint/issues/5446) + +## V1.0 (expected Q4 2022) + +Has the same feature set as V0.37 but with a focus towards testing, protocol correctness and minor tweaks to ensure a stable product. Such work might include extending the [consensus testing framework](https://github.com/tendermint/tendermint/issues/5920), the use of canary/long-lived testnets and greater integration tests. + +## Post 1.0 Work + +- Improved block propagation with erasure coding and/or compact blocks. [More](https://github.com/tendermint/spec/issues/347) +- Consensus engine refactor +- Bidirectional ABCI +- Randomized Leader Election +- ZK proofs / other cryptographic primitives +- Multichain Tendermint diff --git a/docs/tendermint-core/README.md b/docs/tendermint-core/README.md index fb359a08b..0de5ed908 100644 --- a/docs/tendermint-core/README.md +++ b/docs/tendermint-core/README.md @@ -1,7 +1,7 @@ --- order: 1 parent: - title: System + title: Understanding Tendermint order: 5 --- @@ -10,11 +10,15 @@ parent: This section dives into the internals of Go-Tendermint. - [Using Tendermint](./using-tendermint.md) -- [Running in Production](./running-in-production.md) - [Subscribing to events](./subscription.md) - [Block Structure](./block-structure.md) - [RPC](./rpc.md) -- [Block Sync](./block-sync.md) -- [State Sync](./state-sync.md) -- [Mempool](./mempool.md) +- [Block Sync](./block-sync/README.md) +- [State Sync](./state-sync/README.md) +- [Mempool](./mempool/README.md) - [Light Client](./light-client.md) +- [Consensus](./consensus/README.md) +- [Peer Exachange (PEX)](./pex/README.md) +- [Evidence](./evidence/README.md) + +For full specifications refer to the [spec repo](https://github.com/tendermint/spec). diff --git a/docs/tendermint-core/block-sync.md b/docs/tendermint-core/block-sync/README.md similarity index 93% rename from docs/tendermint-core/block-sync.md rename to docs/tendermint-core/block-sync/README.md index 43e849fcc..3ffb0953d 100644 --- a/docs/tendermint-core/block-sync.md +++ b/docs/tendermint-core/block-sync/README.md @@ -1,7 +1,11 @@ --- -order: 10 +order: 1 +parent: + title: Block Sync + order: 6 --- + # Block Sync *Formerly known as Fast Sync* @@ -61,3 +65,7 @@ another event for exposing the fast-sync `complete` status and the state `height The user can query the events by subscribing `EventQueryBlockSyncStatus` Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details. + +## Implementation + +To read more on the implamentation please see the [reactor doc](./reactor.md) and the [implementation doc](./implementation.md) diff --git a/docs/tendermint-core/block-sync/img/bc-reactor-routines.png b/docs/tendermint-core/block-sync/img/bc-reactor-routines.png new file mode 100644 index 000000000..3f574a79b Binary files /dev/null and b/docs/tendermint-core/block-sync/img/bc-reactor-routines.png differ diff --git a/docs/tendermint-core/block-sync/img/bc-reactor.png b/docs/tendermint-core/block-sync/img/bc-reactor.png new file mode 100644 index 000000000..f7fe0f819 Binary files /dev/null and b/docs/tendermint-core/block-sync/img/bc-reactor.png differ diff --git a/docs/tendermint-core/block-sync/implementation.md b/docs/tendermint-core/block-sync/implementation.md new file mode 100644 index 000000000..59274782c --- /dev/null +++ b/docs/tendermint-core/block-sync/implementation.md @@ -0,0 +1,47 @@ +--- +order: 3 +--- + +# Implementation + +## Blocksync Reactor + +- coordinates the pool for syncing +- coordinates the store for persistence +- coordinates the playing of blocks towards the app using a sm.BlockExecutor +- handles switching between fastsync and consensus +- it is a p2p.BaseReactor +- starts the pool.Start() and its poolRoutine() +- registers all the concrete types and interfaces for serialisation + +### poolRoutine + +- listens to these channels: + - pool requests blocks from a specific peer by posting to requestsCh, block reactor then sends + a &bcBlockRequestMessage for a specific height + - pool signals timeout of a specific peer by posting to timeoutsCh + - switchToConsensusTicker to periodically try and switch to consensus + - trySyncTicker to periodically check if we have fallen behind and then catch-up sync + - if there aren't any new blocks available on the pool it skips syncing +- tries to sync the app by taking downloaded blocks from the pool, gives them to the app and stores + them on disk +- implements Receive which is called by the switch/peer + - calls AddBlock on the pool when it receives a new block from a peer + +## Block Pool + +- responsible for downloading blocks from peers +- makeRequestersRoutine() + - removes timeout peers + - starts new requesters by calling makeNextRequester() +- requestRoutine(): + - picks a peer and sends the request, then blocks until: + - pool is stopped by listening to pool.Quit + - requester is stopped by listening to Quit + - request is redone + - we receive a block + - gotBlockCh is strange + +## Go Routines in Blocksync Reactor + +![Go Routines Diagram](img/bc-reactor-routines.png) diff --git a/docs/tendermint-core/block-sync/reactor.md b/docs/tendermint-core/block-sync/reactor.md new file mode 100644 index 000000000..3e2875340 --- /dev/null +++ b/docs/tendermint-core/block-sync/reactor.md @@ -0,0 +1,278 @@ +--- +order: 2 +--- +# Reactor + +The Blocksync Reactor's high level responsibility is to enable peers who are +far behind the current state of the consensus to quickly catch up by downloading +many blocks in parallel, verifying their commits, and executing them against the +ABCI application. + +Tendermint full nodes run the Blocksync Reactor as a service to provide blocks +to new nodes. New nodes run the Blocksync Reactor in "fast_sync" mode, +where they actively make requests for more blocks until they sync up. +Once caught up, "fast_sync" mode is disabled and the node switches to +using (and turns on) the Consensus Reactor. + +## Architecture and algorithm + +The Blocksync reactor is organised as a set of concurrent tasks: + +- Receive routine of Blocksync Reactor +- Task for creating Requesters +- Set of Requesters tasks and - Controller task. + +![Blocksync Reactor Architecture Diagram](img/bc-reactor.png) + +### Data structures + +These are the core data structures necessarily to provide the Blocksync Reactor logic. + +Requester data structure is used to track assignment of request for `block` at position `height` to a peer with id equals to `peerID`. + +```go +type Requester { + mtx Mutex + block Block + height int64 + peerID p2p.ID + redoChannel chan p2p.ID //redo may send multi-time; peerId is used to identify repeat +} +``` + +Pool is a core data structure that stores last executed block (`height`), assignment of requests to peers (`requesters`), current height for each peer and number of pending requests for each peer (`peers`), maximum peer height, etc. + +```go +type Pool { + mtx Mutex + requesters map[int64]*Requester + height int64 + peers map[p2p.ID]*Peer + maxPeerHeight int64 + numPending int32 + store BlockStore + requestsChannel chan<- BlockRequest + errorsChannel chan<- peerError +} +``` + +Peer data structure stores for each peer current `height` and number of pending requests sent to the peer (`numPending`), etc. + +```go +type Peer struct { + id p2p.ID + height int64 + numPending int32 + timeout *time.Timer + didTimeout bool +} +``` + +BlockRequest is internal data structure used to denote current mapping of request for a block at some `height` to a peer (`PeerID`). + +```go +type BlockRequest { + Height int64 + PeerID p2p.ID +} +``` + +### Receive routine of Blocksync Reactor + +It is executed upon message reception on the BlocksyncChannel inside p2p receive routine. There is a separate p2p receive routine (and therefore receive routine of the Blocksync Reactor) executed for each peer. Note that try to send will not block (returns immediately) if outgoing buffer is full. + +```go +handleMsg(pool, m): + upon receiving bcBlockRequestMessage m from peer p: + block = load block for height m.Height from pool.store + if block != nil then + try to send BlockResponseMessage(block) to p + else + try to send bcNoBlockResponseMessage(m.Height) to p + + upon receiving bcBlockResponseMessage m from peer p: + pool.mtx.Lock() + requester = pool.requesters[m.Height] + if requester == nil then + error("peer sent us a block we didn't expect") + continue + + if requester.block == nil and requester.peerID == p then + requester.block = m + pool.numPending -= 1 // atomic decrement + peer = pool.peers[p] + if peer != nil then + peer.numPending-- + if peer.numPending == 0 then + peer.timeout.Stop() + // NOTE: we don't send Quit signal to the corresponding requester task! + else + trigger peer timeout to expire after peerTimeout + pool.mtx.Unlock() + + + upon receiving bcStatusRequestMessage m from peer p: + try to send bcStatusResponseMessage(pool.store.Height) + + upon receiving bcStatusResponseMessage m from peer p: + pool.mtx.Lock() + peer = pool.peers[p] + if peer != nil then + peer.height = m.height + else + peer = create new Peer data structure with id = p and height = m.Height + pool.peers[p] = peer + + if m.Height > pool.maxPeerHeight then + pool.maxPeerHeight = m.Height + pool.mtx.Unlock() + +onTimeout(p): + send error message to pool error channel + peer = pool.peers[p] + peer.didTimeout = true +``` + +### Requester tasks + +Requester task is responsible for fetching a single block at position `height`. + +```go +fetchBlock(height, pool): + while true do { + peerID = nil + block = nil + peer = pickAvailablePeer(height) + peerID = peer.id + + enqueue BlockRequest(height, peerID) to pool.requestsChannel + redo = false + while !redo do + select { + upon receiving Quit message do + return + upon receiving redo message with id on redoChannel do + if peerID == id { + mtx.Lock() + pool.numPending++ + redo = true + mtx.UnLock() + } + } + } + +pickAvailablePeer(height): + selectedPeer = nil + while selectedPeer = nil do + pool.mtx.Lock() + for each peer in pool.peers do + if !peer.didTimeout and peer.numPending < maxPendingRequestsPerPeer and peer.height >= height then + peer.numPending++ + selectedPeer = peer + break + pool.mtx.Unlock() + + if selectedPeer = nil then + sleep requestIntervalMS + + return selectedPeer +``` + +sleep for requestIntervalMS + +### Task for creating Requesters + +This task is responsible for continuously creating and starting Requester tasks. + +```go +createRequesters(pool): + while true do + if !pool.isRunning then break + if pool.numPending < maxPendingRequests or size(pool.requesters) < maxTotalRequesters then + pool.mtx.Lock() + nextHeight = pool.height + size(pool.requesters) + requester = create new requester for height nextHeight + pool.requesters[nextHeight] = requester + pool.numPending += 1 // atomic increment + start requester task + pool.mtx.Unlock() + else + sleep requestIntervalMS + pool.mtx.Lock() + for each peer in pool.peers do + if !peer.didTimeout && peer.numPending > 0 && peer.curRate < minRecvRate then + send error on pool error channel + peer.didTimeout = true + if peer.didTimeout then + for each requester in pool.requesters do + if requester.getPeerID() == peer then + enqueue msg on requestor's redoChannel + delete(pool.peers, peerID) + pool.mtx.Unlock() +``` + +### Main blocksync reactor controller task + +```go +main(pool): + create trySyncTicker with interval trySyncIntervalMS + create statusUpdateTicker with interval statusUpdateIntervalSeconds + create switchToConsensusTicker with interval switchToConsensusIntervalSeconds + + while true do + select { + upon receiving BlockRequest(Height, Peer) on pool.requestsChannel: + try to send bcBlockRequestMessage(Height) to Peer + + upon receiving error(peer) on errorsChannel: + stop peer for error + + upon receiving message on statusUpdateTickerChannel: + broadcast bcStatusRequestMessage(bcR.store.Height) // message sent in a separate routine + + upon receiving message on switchToConsensusTickerChannel: + pool.mtx.Lock() + receivedBlockOrTimedOut = pool.height > 0 || (time.Now() - pool.startTime) > 5 Seconds + ourChainIsLongestAmongPeers = pool.maxPeerHeight == 0 || pool.height >= pool.maxPeerHeight + haveSomePeers = size of pool.peers > 0 + pool.mtx.Unlock() + if haveSomePeers && receivedBlockOrTimedOut && ourChainIsLongestAmongPeers then + switch to consensus mode + + upon receiving message on trySyncTickerChannel: + for i = 0; i < 10; i++ do + pool.mtx.Lock() + firstBlock = pool.requesters[pool.height].block + secondBlock = pool.requesters[pool.height].block + if firstBlock == nil or secondBlock == nil then continue + pool.mtx.Unlock() + verify firstBlock using LastCommit from secondBlock + if verification failed + pool.mtx.Lock() + peerID = pool.requesters[pool.height].peerID + redoRequestsForPeer(peerId) + delete(pool.peers, peerID) + stop peer peerID for error + pool.mtx.Unlock() + else + delete(pool.requesters, pool.height) + save firstBlock to store + pool.height++ + execute firstBlock + } + +redoRequestsForPeer(pool, peerId): + for each requester in pool.requesters do + if requester.getPeerID() == peerID + enqueue msg on redoChannel for requester +``` + +## Channels + +Defines `maxMsgSize` for the maximum size of incoming messages, +`SendQueueCapacity` and `RecvBufferCapacity` for maximum sending and +receiving buffers respectively. These are supposed to prevent amplification +attacks by setting up the upper limit on how much data we can receive & send to +a peer. + +Sending incorrectly encoded data will result in stopping the peer. diff --git a/docs/tendermint-core/consensus/README.md b/docs/tendermint-core/consensus/README.md new file mode 100644 index 000000000..bd7def551 --- /dev/null +++ b/docs/tendermint-core/consensus/README.md @@ -0,0 +1,42 @@ +--- +order: 1 +parent: + title: Consensus + order: 6 +--- + +# Consensus + +Tendermint Consensus is a distributed protocol executed by validator processes to agree on +the next block to be added to the Tendermint blockchain. The protocol proceeds in rounds, where +each round is a try to reach agreement on the next block. A round starts by having a dedicated +process (called proposer) suggesting to other processes what should be the next block with +the `ProposalMessage`. +The processes respond by voting for a block with `VoteMessage` (there are two kinds of vote +messages, prevote and precommit votes). Note that a proposal message is just a suggestion what the +next block should be; a validator might vote with a `VoteMessage` for a different block. If in some +round, enough number of processes vote for the same block, then this block is committed and later +added to the blockchain. `ProposalMessage` and `VoteMessage` are signed by the private key of the +validator. The internals of the protocol and how it ensures safety and liveness properties are +explained in a forthcoming document. + +For efficiency reasons, validators in Tendermint consensus protocol do not agree directly on the +block as the block size is big, i.e., they don't embed the block inside `Proposal` and +`VoteMessage`. Instead, they reach agreement on the `BlockID` (see `BlockID` definition in +[Blockchain](https://github.com/tendermint/spec/blob/master/spec/core/data_structures.md#blockid) section) +that uniquely identifies each block. The block itself is +disseminated to validator processes using peer-to-peer gossiping protocol. It starts by having a +proposer first splitting a block into a number of block parts, that are then gossiped between +processes using `BlockPartMessage`. + +Validators in Tendermint communicate by peer-to-peer gossiping protocol. Each validator is connected +only to a subset of processes called peers. By the gossiping protocol, a validator send to its peers +all needed information (`ProposalMessage`, `VoteMessage` and `BlockPartMessage`) so they can +reach agreement on some block, and also obtain the content of the chosen block (block parts). As +part of the gossiping protocol, processes also send auxiliary messages that inform peers about the +executed steps of the core consensus algorithm (`NewRoundStepMessage` and `NewValidBlockMessage`), and +also messages that inform peers what votes the process has seen (`HasVoteMessage`, +`VoteSetMaj23Message` and `VoteSetBitsMessage`). These messages are then used in the gossiping +protocol to determine what messages a process should send to its peers. + +We now describe the content of each message exchanged during Tendermint consensus protocol. diff --git a/docs/tendermint-core/consensus/reactor.md b/docs/tendermint-core/consensus/reactor.md new file mode 100644 index 000000000..ee43846ec --- /dev/null +++ b/docs/tendermint-core/consensus/reactor.md @@ -0,0 +1,370 @@ +--- +order: 2 +--- + +# Reactor + +Consensus Reactor defines a reactor for the consensus service. It contains the ConsensusState service that +manages the state of the Tendermint consensus internal state machine. +When Consensus Reactor is started, it starts Broadcast Routine which starts ConsensusState service. +Furthermore, for each peer that is added to the Consensus Reactor, it creates (and manages) the known peer state +(that is used extensively in gossip routines) and starts the following three routines for the peer p: +Gossip Data Routine, Gossip Votes Routine and QueryMaj23Routine. Finally, Consensus Reactor is responsible +for decoding messages received from a peer and for adequate processing of the message depending on its type and content. +The processing normally consists of updating the known peer state and for some messages +(`ProposalMessage`, `BlockPartMessage` and `VoteMessage`) also forwarding message to ConsensusState module +for further processing. In the following text we specify the core functionality of those separate unit of executions +that are part of the Consensus Reactor. + +## ConsensusState service + +Consensus State handles execution of the Tendermint BFT consensus algorithm. It processes votes and proposals, +and upon reaching agreement, commits blocks to the chain and executes them against the application. +The internal state machine receives input from peers, the internal validator and from a timer. + +Inside Consensus State we have the following units of execution: Timeout Ticker and Receive Routine. +Timeout Ticker is a timer that schedules timeouts conditional on the height/round/step that are processed +by the Receive Routine. + +### Receive Routine of the ConsensusState service + +Receive Routine of the ConsensusState handles messages which may cause internal consensus state transitions. +It is the only routine that updates RoundState that contains internal consensus state. +Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities. +It receives messages from peers, internal validators and from Timeout Ticker +and invokes the corresponding handlers, potentially updating the RoundState. +The details of the protocol (together with formal proofs of correctness) implemented by the Receive Routine are +discussed in separate document. For understanding of this document +it is sufficient to understand that the Receive Routine manages and updates RoundState data structure that is +then extensively used by the gossip routines to determine what information should be sent to peer processes. + +## Round State + +RoundState defines the internal consensus state. It contains height, round, round step, a current validator set, +a proposal and proposal block for the current round, locked round and block (if some block is being locked), set of +received votes and last commit and last validators set. + +```go +type RoundState struct { + Height int64 + Round int + Step RoundStepType + Validators ValidatorSet + Proposal Proposal + ProposalBlock Block + ProposalBlockParts PartSet + LockedRound int + LockedBlock Block + LockedBlockParts PartSet + Votes HeightVoteSet + LastCommit VoteSet + LastValidators ValidatorSet +} +``` + +Internally, consensus will run as a state machine with the following states: + +- RoundStepNewHeight +- RoundStepNewRound +- RoundStepPropose +- RoundStepProposeWait +- RoundStepPrevote +- RoundStepPrevoteWait +- RoundStepPrecommit +- RoundStepPrecommitWait +- RoundStepCommit + +## Peer Round State + +Peer round state contains the known state of a peer. It is being updated by the Receive routine of +Consensus Reactor and by the gossip routines upon sending a message to the peer. + +```golang +type PeerRoundState struct { + Height int64 // Height peer is at + Round int // Round peer is at, -1 if unknown. + Step RoundStepType // Step peer is at + Proposal bool // True if peer has proposal for this round + ProposalBlockPartsHeader PartSetHeader + ProposalBlockParts BitArray + ProposalPOLRound int // Proposal's POL round. -1 if none. + ProposalPOL BitArray // nil until ProposalPOLMessage received. + Prevotes BitArray // All votes peer has for this round + Precommits BitArray // All precommits peer has for this round + LastCommitRound int // Round of commit for last height. -1 if none. + LastCommit BitArray // All commit precommits of commit for last height. + CatchupCommitRound int // Round that we have commit for. Not necessarily unique. -1 if none. + CatchupCommit BitArray // All commit precommits peer has for this height & CatchupCommitRound +} +``` + +## Receive method of Consensus reactor + +The entry point of the Consensus reactor is a receive method. When a message is +received from a peer p, normally the peer round state is updated +correspondingly, and some messages are passed for further processing, for +example to ConsensusState service. We now specify the processing of messages in +the receive method of Consensus reactor for each message type. In the following +message handler, `rs` and `prs` denote `RoundState` and `PeerRoundState`, +respectively. + +### NewRoundStepMessage handler + +```go +handleMessage(msg): + if msg is from smaller height/round/step then return + // Just remember these values. + prsHeight = prs.Height + prsRound = prs.Round + prsCatchupCommitRound = prs.CatchupCommitRound + prsCatchupCommit = prs.CatchupCommit + + Update prs with values from msg + if prs.Height or prs.Round has been updated then + reset Proposal related fields of the peer state + if prs.Round has been updated and msg.Round == prsCatchupCommitRound then + prs.Precommits = psCatchupCommit + if prs.Height has been updated then + if prsHeight+1 == msg.Height && prsRound == msg.LastCommitRound then + prs.LastCommitRound = msg.LastCommitRound + prs.LastCommit = prs.Precommits + } else { + prs.LastCommitRound = msg.LastCommitRound + prs.LastCommit = nil + } + Reset prs.CatchupCommitRound and prs.CatchupCommit +``` + +### NewValidBlockMessage handler + +```go +handleMessage(msg): + if prs.Height != msg.Height then return + + if prs.Round != msg.Round && !msg.IsCommit then return + + prs.ProposalBlockPartsHeader = msg.BlockPartsHeader + prs.ProposalBlockParts = msg.BlockParts +``` + +The number of block parts is limited to 1601 (`types.MaxBlockPartsCount`) to +protect the node against DOS attacks. + +### HasVoteMessage handler + +```go +handleMessage(msg): + if prs.Height == msg.Height then + prs.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index) +``` + +### VoteSetMaj23Message handler + +```go +handleMessage(msg): + if prs.Height == msg.Height then + Record in rs that a peer claim to have â…” majority for msg.BlockID + Send VoteSetBitsMessage showing votes node has for that BlockId +``` + +### ProposalMessage handler + +```go +handleMessage(msg): + if prs.Height != msg.Height || prs.Round != msg.Round || prs.Proposal then return + prs.Proposal = true + if prs.ProposalBlockParts == empty set then // otherwise it is set in NewValidBlockMessage handler + prs.ProposalBlockPartsHeader = msg.BlockPartsHeader + prs.ProposalPOLRound = msg.POLRound + prs.ProposalPOL = nil + Send msg through internal peerMsgQueue to ConsensusState service +``` + +### ProposalPOLMessage handler + +```go +handleMessage(msg): + if prs.Height != msg.Height or prs.ProposalPOLRound != msg.ProposalPOLRound then return + prs.ProposalPOL = msg.ProposalPOL +``` + +The number of votes is limited to 10000 (`types.MaxVotesCount`) to protect the +node against DOS attacks. + +### BlockPartMessage handler + +```go +handleMessage(msg): + if prs.Height != msg.Height || prs.Round != msg.Round then return + Record in prs that peer has block part msg.Part.Index + Send msg trough internal peerMsgQueue to ConsensusState service +``` + +### VoteMessage handler + +```go +handleMessage(msg): + Record in prs that a peer knows vote with index msg.vote.ValidatorIndex for particular height and round + Send msg trough internal peerMsgQueue to ConsensusState service +``` + +### VoteSetBitsMessage handler + +```go +handleMessage(msg): + Update prs for the bit-array of votes peer claims to have for the msg.BlockID +``` + +The number of votes is limited to 10000 (`types.MaxVotesCount`) to protect the +node against DOS attacks. + +## Gossip Data Routine + +It is used to send the following messages to the peer: `BlockPartMessage`, `ProposalMessage` and +`ProposalPOLMessage` on the DataChannel. The gossip data routine is based on the local RoundState (`rs`) +and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below: + +```go +1a) if rs.ProposalBlockPartsHeader == prs.ProposalBlockPartsHeader and the peer does not have all the proposal parts then + Part = pick a random proposal block part the peer does not have + Send BlockPartMessage(rs.Height, rs.Round, Part) to the peer on the DataChannel + if send returns true, record that the peer knows the corresponding block Part + Continue + +1b) if (0 < prs.Height) and (prs.Height < rs.Height) then + help peer catch up using gossipDataForCatchup function + Continue + +1c) if (rs.Height != prs.Height) or (rs.Round != prs.Round) then + Sleep PeerGossipSleepDuration + Continue + +// at this point rs.Height == prs.Height and rs.Round == prs.Round +1d) if (rs.Proposal != nil and !prs.Proposal) then + Send ProposalMessage(rs.Proposal) to the peer + if send returns true, record that the peer knows Proposal + if 0 <= rs.Proposal.POLRound then + polRound = rs.Proposal.POLRound + prevotesBitArray = rs.Votes.Prevotes(polRound).BitArray() + Send ProposalPOLMessage(rs.Height, polRound, prevotesBitArray) + Continue + +2) Sleep PeerGossipSleepDuration +``` + +### Gossip Data For Catchup + +This function is responsible for helping peer catch up if it is at the smaller height (prs.Height < rs.Height). +The function executes the following logic: + +```go + if peer does not have all block parts for prs.ProposalBlockPart then + blockMeta = Load Block Metadata for height prs.Height from blockStore + if (!blockMeta.BlockID.PartsHeader == prs.ProposalBlockPartsHeader) then + Sleep PeerGossipSleepDuration + return + Part = pick a random proposal block part the peer does not have + Send BlockPartMessage(prs.Height, prs.Round, Part) to the peer on the DataChannel + if send returns true, record that the peer knows the corresponding block Part + return + else Sleep PeerGossipSleepDuration +``` + +## Gossip Votes Routine + +It is used to send the following message: `VoteMessage` on the VoteChannel. +The gossip votes routine is based on the local RoundState (`rs`) +and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below: + +```go +1a) if rs.Height == prs.Height then + if prs.Step == RoundStepNewHeight then + vote = random vote from rs.LastCommit the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + + if prs.Step <= RoundStepPrevote and prs.Round != -1 and prs.Round <= rs.Round then + Prevotes = rs.Votes.Prevotes(prs.Round) + vote = random vote from Prevotes the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + + if prs.Step <= RoundStepPrecommit and prs.Round != -1 and prs.Round <= rs.Round then + Precommits = rs.Votes.Precommits(prs.Round) + vote = random vote from Precommits the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + + if prs.ProposalPOLRound != -1 then + PolPrevotes = rs.Votes.Prevotes(prs.ProposalPOLRound) + vote = random vote from PolPrevotes the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + +1b) if prs.Height != 0 and rs.Height == prs.Height+1 then + vote = random vote from rs.LastCommit peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + +1c) if prs.Height != 0 and rs.Height >= prs.Height+2 then + Commit = get commit from BlockStore for prs.Height + vote = random vote from Commit the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + +2) Sleep PeerGossipSleepDuration +``` + +## QueryMaj23Routine + +It is used to send the following message: `VoteSetMaj23Message`. `VoteSetMaj23Message` is sent to indicate that a given +BlockID has seen +2/3 votes. This routine is based on the local RoundState (`rs`) and the known PeerRoundState +(`prs`). The routine repeats forever the logic shown below. + +```go +1a) if rs.Height == prs.Height then + Prevotes = rs.Votes.Prevotes(prs.Round) + if there is a â…” majority for some blockId in Prevotes then + m = VoteSetMaj23Message(prs.Height, prs.Round, Prevote, blockId) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +1b) if rs.Height == prs.Height then + Precommits = rs.Votes.Precommits(prs.Round) + if there is a â…” majority for some blockId in Precommits then + m = VoteSetMaj23Message(prs.Height,prs.Round,Precommit,blockId) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +1c) if rs.Height == prs.Height and prs.ProposalPOLRound >= 0 then + Prevotes = rs.Votes.Prevotes(prs.ProposalPOLRound) + if there is a â…” majority for some blockId in Prevotes then + m = VoteSetMaj23Message(prs.Height,prs.ProposalPOLRound,Prevotes,blockId) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +1d) if prs.CatchupCommitRound != -1 and 0 < prs.Height and + prs.Height <= blockStore.Height() then + Commit = LoadCommit(prs.Height) + m = VoteSetMaj23Message(prs.Height,Commit.Round,Precommit,Commit.BlockID) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +2) Sleep PeerQueryMaj23SleepDuration +``` + +## Broadcast routine + +The Broadcast routine subscribes to an internal event bus to receive new round steps and votes messages, and broadcasts messages to peers upon receiving those +events. +It broadcasts `NewRoundStepMessage` or `CommitStepMessage` upon new round state event. Note that +broadcasting these messages does not depend on the PeerRoundState; it is sent on the StateChannel. +Upon receiving VoteMessage it broadcasts `HasVoteMessage` message to its peers on the StateChannel. + +## Channels + +Defines 4 channels: state, data, vote and vote_set_bits. Each channel +has `SendQueueCapacity` and `RecvBufferCapacity` and +`RecvMessageCapacity` set to `maxMsgSize`. + +Sending incorrectly encoded data will result in stopping the peer. diff --git a/docs/tendermint-core/evidence/README.md b/docs/tendermint-core/evidence/README.md new file mode 100644 index 000000000..2070c48c0 --- /dev/null +++ b/docs/tendermint-core/evidence/README.md @@ -0,0 +1,13 @@ +--- +order: 1 +parent: + title: Evidence + order: 3 +--- + +Evidence is used to identify validators who have or are acting malicious. There are multiple types of evidence, to read more on the evidence types please see [Evidence Types](https://docs.tendermint.com/master/spec/core/data_structures.html#evidence). + +The evidence reactor works similar to the mempool reactor. When evidence is observed, it is sent to all the peers in a repetitive manner. This ensures evidence is sent to as many people as possible to avoid sensoring. After evidence is received by peers and committed in a block it is pruned from the evidence module. + +Sending incorrectly encoded data or data exceeding `maxMsgSize` will result +in stopping the peer. diff --git a/docs/tendermint-core/mempool.md b/docs/tendermint-core/mempool.md deleted file mode 100644 index 8dd968781..000000000 --- a/docs/tendermint-core/mempool.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -order: 12 ---- - -# Mempool - -## Transaction ordering - -Currently, there's no ordering of transactions other than the order they've -arrived (via RPC or from other nodes). - -So the only way to specify the order is to send them to a single node. - -valA: - -- `tx1` -- `tx2` -- `tx3` - -If the transactions are split up across different nodes, there's no way to -ensure they are processed in the expected order. - -valA: - -- `tx1` -- `tx2` - -valB: - -- `tx3` - -If valB is the proposer, the order might be: - -- `tx3` -- `tx1` -- `tx2` - -If valA is the proposer, the order might be: - -- `tx1` -- `tx2` -- `tx3` - -That said, if the transactions contain some internal value, like an -order/nonce/sequence number, the application can reject transactions that are -out of order. So if a node receives `tx3`, then `tx1`, it can reject `tx3` and then -accept `tx1`. The sender can then retry sending `tx3`, which should probably be -rejected until the node has seen `tx2`. diff --git a/docs/tendermint-core/mempool/README.md b/docs/tendermint-core/mempool/README.md new file mode 100644 index 000000000..1821cf849 --- /dev/null +++ b/docs/tendermint-core/mempool/README.md @@ -0,0 +1,71 @@ +--- +order: 1 +parent: + title: Mempool + order: 2 +--- + +The mempool is a in memory pool of potentially valid transactions, +both to broadcast to other nodes, as well as to provide to the +consensus reactor when it is selected as the block proposer. + +There are two sides to the mempool state: + +- External: get, check, and broadcast new transactions +- Internal: return valid transaction, update list after block commit + +## External functionality + +External functionality is exposed via network interfaces +to potentially untrusted actors. + +- CheckTx - triggered via RPC or P2P +- Broadcast - gossip messages after a successful check + +## Internal functionality + +Internal functionality is exposed via method calls to other +code compiled into the tendermint binary. + +- ReapMaxBytesMaxGas - get txs to propose in the next block. Guarantees that the + size of the txs is less than MaxBytes, and gas is less than MaxGas +- Update - remove tx that were included in last block +- ABCI.CheckTx - call ABCI app to validate the tx + +What does it provide the consensus reactor? +What guarantees does it need from the ABCI app? +(talk about interleaving processes in concurrency) + +## Optimizations + +The implementation within this library also implements a tx cache. +This is so that signatures don't have to be reverified if the tx has +already been seen before. +However, we only store valid txs in the cache, not invalid ones. +This is because invalid txs could become good later. +Txs that are included in a block aren't removed from the cache, +as they still may be getting received over the p2p network. +These txs are stored in the cache by their hash, to mitigate memory concerns. + +Applications should implement replay protection, read [Replay +Protection](https://github.com/tendermint/tendermint/blob/8cdaa7f515a9d366bbc9f0aff2a263a1a6392ead/docs/app-dev/app-development.md#replay-protection) for more information. + +## Configuration + +The mempool has various configurable paramet + +Sending incorrectly encoded data or data exceeding `maxMsgSize` will result +in stopping the peer. + +`maxMsgSize` equals `MaxBatchBytes` (10MB) + 4 (proto overhead). +`MaxBatchBytes` is a mempool config parameter -> defined locally. The reactor +sends transactions to the connected peers in batches. The maximum size of one +batch is `MaxBatchBytes`. + +The mempool will not send a tx back to any peer which it received it from. + +The reactor assigns an `uint16` number for each peer and maintains a map from +p2p.ID to `uint16`. Each mempool transaction carries a list of all the senders +(`[]uint16`). The list is updated every time mempool receives a transaction it +is already seen. `uint16` assumes that a node will never have over 65535 active +peers (0 is reserved for unknown source - e.g. RPC). diff --git a/docs/tendermint-core/mempool/config.md b/docs/tendermint-core/mempool/config.md new file mode 100644 index 000000000..4e8a9ec73 --- /dev/null +++ b/docs/tendermint-core/mempool/config.md @@ -0,0 +1,105 @@ +--- +order: 2 +--- + +# Configuration + +Here we describe configuration options around mempool. +For the purposes of this document, they are described +in a toml file, but some of them can also be passed in as +environmental variables. + +Config: + +```toml +[mempool] + +recheck = true +broadcast = true +wal-dir = "" + +# Maximum number of transactions in the mempool +size = 5000 + +# Limit the total size of all txs in the mempool. +# This only accounts for raw transactions (e.g. given 1MB transactions and +# max-txs-bytes=5MB, mempool will only accept 5 transactions). +max-txs-bytes = 1073741824 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache-size = 10000 + +# Do not remove invalid transactions from the cache (default: false) +# Set to true if it's not possible for any invalid transaction to become valid +# again in the future. +keep-invalid-txs-in-cache = false + +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}. +max-tx-bytes = 1048576 + +# Maximum size of a batch of transactions to send to a peer +# Including space needed by encoding (one varint per transaction). +# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 +max-batch-bytes = 0 +``` + + + +## Recheck + +Recheck determines if the mempool rechecks all pending +transactions after a block was committed. Once a block +is committed, the mempool removes all valid transactions +that were successfully included in the block. + +If `recheck` is true, then it will rerun CheckTx on +all remaining transactions with the new block state. + +## Broadcast + +Determines whether this node gossips any valid transactions +that arrive in mempool. Default is to gossip anything that +passes checktx. If this is disabled, transactions are not +gossiped, but instead stored locally and added to the next +block this node is the proposer. + +## WalDir + +This defines the directory where mempool writes the write-ahead +logs. These files can be used to reload unbroadcasted +transactions if the node crashes. + +If the directory passed in is an absolute path, the wal file is +created there. If the directory is a relative path, the path is +appended to home directory of the tendermint process to +generate an absolute path to the wal directory +(default `$HOME/.tendermint` or set via `TM_HOME` or `--home`) + +## Size + +Size defines the total amount of transactions stored in the mempool. Default is `5_000` but can be adjusted to any number you would like. The higher the size the more strain on the node. + +## Max Transactions Bytes + +Max transactions bytes defines the total size of all the transactions in the mempool. Default is 1 GB. + +## Cache size + +Cache size determines the size of the cache holding transactions we have already seen. The cache exists to avoid running `checktx` each time we receive a transaction. + +## Keep Invalid Transactions In Cache + +Keep invalid transactions in cache determines wether a transaction in the cache, which is invalid, should be evicted. An invalid transaction here may mean that the transaction may rely on a different tx that has not been included in a block. + +## Max Transaction Bytes + +Max transaction bytes defines the max size a transaction can be for your node. If you would like your node to only keep track of smaller transactions this field would need to be changed. Default is 1MB. + +## Max Batch Bytes + +Max batch bytes defines the amount of bytes the node will send to a peer. Default is 0. + +> Note: Unused due to https://github.com/tendermint/tendermint/issues/5796 diff --git a/docs/tendermint-core/pex/README.md b/docs/tendermint-core/pex/README.md new file mode 100644 index 000000000..5f5c3ed42 --- /dev/null +++ b/docs/tendermint-core/pex/README.md @@ -0,0 +1,177 @@ +--- +order: 1 +parent: + title: Peer Exchange + order: 5 +--- + +# Peer Strategy and Exchange + +Here we outline the design of the PeerStore +and how it used by the Peer Exchange Reactor (PEX) to ensure we are connected +to good peers and to gossip peers to others. + +## Peer Types + +Certain peers are special in that they are specified by the user as `persistent`, +which means we auto-redial them if the connection fails, or if we fail to dial +them. +Some peers can be marked as `private`, which means +we will not put them in the peer store or gossip them to others. + +All peers except private peers and peers coming from them are tracked using the +peer store. + +The rest of our peers are only distinguished by being either +inbound (they dialed our public address) or outbound (we dialed them). + +## Discovery + +Peer discovery begins with a list of seeds. + +When we don't have enough peers, we + +1. ask existing peers +2. dial seeds if we're not dialing anyone currently + +On startup, we will also immediately dial the given list of `persistent_peers`, +and will attempt to maintain persistent connections with them. If the +connections die, or we fail to dial, we will redial every 5s for a few minutes, +then switch to an exponential backoff schedule, and after about a day of +trying, stop dialing the peer. This behavior is when `persistent_peers_max_dial_period` is configured to zero. + +But If `persistent_peers_max_dial_period` is set greater than zero, terms between each dial to each persistent peer +will not exceed `persistent_peers_max_dial_period` during exponential backoff. +Therefore, `dial_period` = min(`persistent_peers_max_dial_period`, `exponential_backoff_dial_period`) +and we keep trying again regardless of `maxAttemptsToDial` + +As long as we have less than `MaxNumOutboundPeers`, we periodically request +additional peers from each of our own and try seeds. + +## Listening + +Peers listen on a configurable ListenAddr that they self-report in their +NodeInfo during handshakes with other peers. Peers accept up to +`MaxNumInboundPeers` incoming peers. + +## Address Book + +Peers are tracked via their ID (their PubKey.Address()). +Peers are added to the peer store from the PEX when they first connect to us or +when we hear about them from other peers. + +The peer store is arranged in sets of buckets, and distinguishes between +vetted (old) and unvetted (new) peers. It keeps different sets of buckets for +vetted and unvetted peers. Buckets provide randomization over peer selection. +Peers are put in buckets according to their IP groups. + +IP group can be a masked IP (e.g. `1.2.0.0` or `2602:100::`) or `local` for +local addresses or `unroutable` for unroutable addresses. The mask which +corresponds to the `/16` subnet is used for IPv4, `/32` subnet - for IPv6. +Each group has a limited number of buckets to prevent DoS attacks coming from +that group (e.g. an attacker buying a `/16` block of IPs and launching a DoS +attack). + +[highwayhash](https://arxiv.org/abs/1612.06257) is used as a hashing function +when calculating a bucket. + +When placing a peer into a new bucket: + +```md +hash(key + sourcegroup + int64(hash(key + group + sourcegroup)) % bucket_per_group) % num_new_buckets +``` + +When placing a peer into an old bucket: + +```md +hash(key + group + int64(hash(key + addr)) % buckets_per_group) % num_old_buckets +``` + +where `key` - random 24 HEX string, `group` - IP group of the peer (e.g. `1.2.0.0`), +`sourcegroup` - IP group of the sender (peer who sent us this address) (e.g. `174.11.0.0`), +`addr` - string representation of the peer's address (e.g. `174.11.10.2:26656`). + +A vetted peer can only be in one bucket. An unvetted peer can be in multiple buckets, and +each instance of the peer can have a different IP:PORT. + +If we're trying to add a new peer but there's no space in its bucket, we'll +remove the worst peer from that bucket to make room. + +## Vetting + +When a peer is first added, it is unvetted. +Marking a peer as vetted is outside the scope of the `p2p` package. +For Tendermint, a Peer becomes vetted once it has contributed sufficiently +at the consensus layer; ie. once it has sent us valid and not-yet-known +votes and/or block parts for `NumBlocksForVetted` blocks. +Other users of the p2p package can determine their own conditions for when a peer is marked vetted. + +If a peer becomes vetted but there are already too many vetted peers, +a randomly selected one of the vetted peers becomes unvetted. + +If a peer becomes unvetted (either a new peer, or one that was previously vetted), +a randomly selected one of the unvetted peers is removed from the peer store. + +More fine-grained tracking of peer behaviour can be done using +a trust metric (see below), but it's best to start with something simple. + +## Select Peers to Dial + +When we need more peers, we pick addresses randomly from the addrbook with some +configurable bias for unvetted peers. The bias should be lower when we have +fewer peers and can increase as we obtain more, ensuring that our first peers +are more trustworthy, but always giving us the chance to discover new good +peers. + +We track the last time we dialed a peer and the number of unsuccessful attempts +we've made. If too many attempts are made, we mark the peer as bad. + +Connection attempts are made with exponential backoff (plus jitter). Because +the selection process happens every `ensurePeersPeriod`, we might not end up +dialing a peer for much longer than the backoff duration. + +If we fail to connect to the peer after 16 tries (with exponential backoff), we +remove from peer store completely. But for persistent peers, we indefinitely try to +dial all persistent peers unless `persistent_peers_max_dial_period` is configured to zero + +## Select Peers to Exchange + +When we’re asked for peers, we select them as follows: + +- select at most `maxGetSelection` peers +- try to select at least `minGetSelection` peers - if we have less than that, select them all. +- select a random, unbiased `getSelectionPercent` of the peers + +Send the selected peers. Note we select peers for sending without bias for vetted/unvetted. + +## Preventing Spam + +There are various cases where we decide a peer has misbehaved and we disconnect from them. +When this happens, the peer is removed from the peer store and black listed for +some amount of time. We call this "Disconnect and Mark". +Note that the bad behaviour may be detected outside the PEX reactor itself +(for instance, in the mconnection, or another reactor), but it must be communicated to the PEX reactor +so it can remove and mark the peer. + +In the PEX, if a peer sends us an unsolicited list of peers, +or if the peer sends a request too soon after another one, +we Disconnect and MarkBad. + +## Trust Metric + +The quality of peers can be tracked in more fine-grained detail using a +Proportional-Integral-Derivative (PID) controller that incorporates +current, past, and rate-of-change data to inform peer quality. + +While a PID trust metric has been implemented, it remains for future work +to use it in the PEX. + +See the [trustmetric](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-006-trust-metric.md) +and [trustmetric useage](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-007-trust-metric-usage.md) +architecture docs for more details. + + + + + + diff --git a/docs/tendermint-core/state-sync.md b/docs/tendermint-core/state-sync.md deleted file mode 100644 index 52286e6c7..000000000 --- a/docs/tendermint-core/state-sync.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -order: 11 ---- - -# State Sync - -With block sync a node is downloading all of the data of an application from genesis and verifying it. -With state sync your node will download data related to the head or near the head of the chain and verify the data. -This leads to drastically shorter times for joining a network. - -Information on how to configure state sync is located in the [nodes section](../nodes/state-sync.md) - -## Events - -When a node starts with the statesync flag enabled in the config file, it will emit two events: one upon starting statesync and the other upon completion. - -The user can query the events by subscribing `EventQueryStateSyncStatus` -Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details. \ No newline at end of file diff --git a/docs/tendermint-core/state-sync/README.md b/docs/tendermint-core/state-sync/README.md new file mode 100644 index 000000000..39e76ce39 --- /dev/null +++ b/docs/tendermint-core/state-sync/README.md @@ -0,0 +1,85 @@ +--- +order: 1 +parent: + title: State Sync + order: 4 +--- + + +State sync allows new nodes to rapidly bootstrap and join the network by discovering, fetching, +and restoring state machine snapshots. For more information, see the [state sync ABCI section](https://docs.tendermint.com/master/spec/abci/abci.html#state-sync)). + +The state sync reactor has two main responsibilities: + +* Serving state machine snapshots taken by the local ABCI application to new nodes joining the + network. + +* Discovering existing snapshots and fetching snapshot chunks for an empty local application + being bootstrapped. + +The state sync process for bootstrapping a new node is described in detail in the section linked +above. While technically part of the reactor (see `statesync/syncer.go` and related components), +this document will only cover the P2P reactor component. + +For details on the ABCI methods and data types, see the [ABCI documentation](https://docs.tendermint.com/master/spec/abci/). + +Information on how to configure state sync is located in the [nodes section](../../nodes/state-sync.md) + +## State Sync P2P Protocol + +When a new node begin state syncing, it will ask all peers it encounters if it has any +available snapshots: + +```go +type snapshotsRequestMessage struct{} +``` + +The receiver will query the local ABCI application via `ListSnapshots`, and send a message +containing snapshot metadata (limited to 4 MB) for each of the 10 most recent snapshots: + +```go +type snapshotsResponseMessage struct { + Height uint64 + Format uint32 + Chunks uint32 + Hash []byte + Metadata []byte +} +``` + +The node running state sync will offer these snapshots to the local ABCI application via +`OfferSnapshot` ABCI calls, and keep track of which peers contain which snapshots. Once a snapshot +is accepted, the state syncer will request snapshot chunks from appropriate peers: + +```go +type chunkRequestMessage struct { + Height uint64 + Format uint32 + Index uint32 +} +``` + +The receiver will load the requested chunk from its local application via `LoadSnapshotChunk`, +and respond with it (limited to 16 MB): + +```go +type chunkResponseMessage struct { + Height uint64 + Format uint32 + Index uint32 + Chunk []byte + Missing bool +} +``` + +Here, `Missing` is used to signify that the chunk was not found on the peer, since an empty +chunk is a valid (although unlikely) response. + +The returned chunk is given to the ABCI application via `ApplySnapshotChunk` until the snapshot +is restored. If a chunk response is not returned within some time, it will be re-requested, +possibly from a different peer. + +The ABCI application is able to request peer bans and chunk refetching as part of the ABCI protocol. + +If no state sync is in progress (i.e. during normal operation), any unsolicited response messages +are discarded. diff --git a/docs/tools/README.md b/docs/tools/README.md index 3e87a2ea1..5d778f470 100644 --- a/docs/tools/README.md +++ b/docs/tools/README.md @@ -27,3 +27,11 @@ testing Tendermint networks. This repository contains various different configurations of test networks for, and relating to, Tendermint. + +Use [Docker Compose](./docker-compose.md) to spin up Tendermint testnets on your +local machine. + +Use [Terraform and Ansible](./terraform-and-ansible.md) to deploy Tendermint +testnets to the cloud. + +See the `tendermint testnet --help` command for more help initializing testnets. diff --git a/docs/tools/debugging/pro.md b/docs/tools/debugging/pro.md index b43ed5cba..a248aa130 100644 --- a/docs/tools/debugging/pro.md +++ b/docs/tools/debugging/pro.md @@ -99,7 +99,7 @@ We’re hoping that these Tendermint tools will become de facto the first respon Let us know what your experience has been so far! Have you had a chance to try `tendermint debug` or `tendermint inspect` yet? -Join our [discord chat](https://discord.gg/vcExX9T), where we discuss the current issues and future improvements. +Join our [discord chat](https://discord.gg/cosmosnetwork), where we discuss the current issues and future improvements. — diff --git a/docs/networks/docker-compose.md b/docs/tools/docker-compose.md similarity index 100% rename from docs/networks/docker-compose.md rename to docs/tools/docker-compose.md diff --git a/docs/tools/remote-signer-validation.md b/docs/tools/remote-signer-validation.md index 80a6a64bc..ab5500860 100644 --- a/docs/tools/remote-signer-validation.md +++ b/docs/tools/remote-signer-validation.md @@ -1,4 +1,4 @@ -# tm-signer-harness +# Remote Signer Located under the `tools/tm-signer-harness` folder in the [Tendermint repository](https://github.com/tendermint/tendermint). diff --git a/docs/networks/terraform-and-ansible.md b/docs/tools/terraform-and-ansible.md similarity index 100% rename from docs/networks/terraform-and-ansible.md rename to docs/tools/terraform-and-ansible.md diff --git a/docs/tutorials/go-built-in.md b/docs/tutorials/go-built-in.md index e94fe171e..d31b8d71e 100644 --- a/docs/tutorials/go-built-in.md +++ b/docs/tutorials/go-built-in.md @@ -23,6 +23,8 @@ yourself with the syntax. By following along with this guide, you'll create a Tendermint Core project called kvstore, a (very) simple distributed BFT key-value store. +> Note: please use a released version of Tendermint with this guide. The guides will work with the latest version. Please, do not use master. + ## Built-in app vs external app Running your application inside the same process as Tendermint Core will give @@ -50,10 +52,13 @@ We'll start by creating a new Go project. ```bash mkdir kvstore cd kvstore +go mod init github.com// ``` Inside the example directory create a `main.go` file with the following content: +> Note: there is no need to clone or fork Tendermint in this tutorial. + ```go package main @@ -430,7 +435,7 @@ func newTendermint(app abci.Application, configFile string) (*nm.Node, error) { config, pv, nodeKey, - proxy.NewLocalClientCreator(app), + abcicli.NewLocalClientCreator(app), nm.DefaultGenesisDocProviderFunc(config), nm.DefaultDBProvider, nm.DefaultMetricsProvider(config.Instrumentation), @@ -482,7 +487,7 @@ node, err := nm.NewNode( config, pv, nodeKey, - proxy.NewLocalClientCreator(app), + abcicli.NewLocalClientCreator(app), nm.DefaultGenesisDocProviderFunc(config), nm.DefaultDBProvider, nm.DefaultMetricsProvider(config.Instrumentation), @@ -495,7 +500,7 @@ if err != nil { `NewNode` requires a few things including a configuration file, a private validator, a node key and a few others in order to construct the full node. -Note we use `proxy.NewLocalClientCreator` here to create a local client instead +Note we use `abcicli.NewLocalClientCreator` here to create a local client instead of one communicating through a socket or gRPC. [viper](https://github.com/spf13/viper) is being used for reading the config, diff --git a/docs/tutorials/readme.md b/docs/tutorials/readme.md index a60fba349..0216df800 100644 --- a/docs/tutorials/readme.md +++ b/docs/tutorials/readme.md @@ -4,4 +4,4 @@ parent: order: 2 --- -# Guides +# Tutorials diff --git a/docs/versions b/docs/versions index 7ae4a265a..7c63b5056 100644 --- a/docs/versions +++ b/docs/versions @@ -1,4 +1,5 @@ +master master v0.32.x v0.32 v0.33.x v0.33 v0.34.x v0.34 -master master +v0.35.x v0.35 diff --git a/go.mod b/go.mod index 0c90a7144..2725c3df9 100644 --- a/go.mod +++ b/go.mod @@ -4,41 +4,42 @@ go 1.16 require ( github.com/BurntSushi/toml v0.4.1 - github.com/Workiva/go-datastructures v1.0.53 - github.com/adlio/schema v1.1.13 + github.com/adlio/schema v1.1.14 github.com/btcsuite/btcd v0.22.0-beta github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce + github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect + github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect + github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect github.com/fortytw2/leaktest v1.3.0 - github.com/go-kit/kit v0.11.0 + github.com/go-kit/kit v0.12.0 github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.2 - github.com/golangci/golangci-lint v1.42.1 + github.com/golangci/golangci-lint v1.43.0 github.com/google/orderedcode v0.0.1 github.com/google/uuid v1.3.0 github.com/gorilla/websocket v1.4.2 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/lib/pq v1.10.3 + github.com/lib/pq v1.10.4 github.com/libp2p/go-buffer-pool v0.0.2 - github.com/minio/highwayhash v1.0.2 github.com/mroth/weightedrand v0.4.1 github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b github.com/ory/dockertest v3.3.5+incompatible github.com/prometheus/client_golang v1.11.0 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/rs/cors v1.8.0 - github.com/rs/zerolog v1.25.0 + github.com/rs/zerolog v1.26.0 github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa github.com/spf13/cobra v1.2.1 - github.com/spf13/viper v1.8.1 + github.com/spf13/viper v1.9.0 github.com/stretchr/testify v1.7.0 - github.com/tendermint/tm-db v0.6.4 - github.com/vektra/mockery/v2 v2.9.3 - golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a - golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 + github.com/tendermint/tm-db v0.6.6 + github.com/vektra/mockery/v2 v2.9.4 + golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 + golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - google.golang.org/grpc v1.40.0 + google.golang.org/grpc v1.42.0 gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect pgregory.net/rapid v0.4.7 ) diff --git a/go.sum b/go.sum index c00fc24ea..9b411330e 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,6 @@ -4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a h1:wFEQiK85fRsEVF0CRrPAos5LoAryUsIX1kPW/WrIqFw= -4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= +4d63.com/gochecknoglobals v0.1.0 h1:zeZSRqj5yCg28tCkIV/z/lWbwvNm5qnKVS15PI8nhD0= +4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= +bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -22,6 +23,11 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -31,6 +37,7 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -44,18 +51,23 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Antonboom/errname v0.1.4 h1:lGSlI42Gm4bI1e+IITtXJXvxFM8N7naWimVFKcb0McY= -github.com/Antonboom/errname v0.1.4/go.mod h1:jRXo3m0E0EuCnK3wbsSVH3X55Z4iTDLl6ZfCxwFj4TM= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Antonboom/errname v0.1.5 h1:IM+A/gz0pDhKmlt5KSNTVAvfLMb+65RxavBXpRtCUEg= +github.com/Antonboom/errname v0.1.5/go.mod h1:DugbBstvPFQbv/5uLcRRzfrNqKE9tVdVCqWCLp6Cifo= +github.com/Antonboom/nilnil v0.1.0 h1:DLDavmg0a6G/F4Lt9t7Enrbgb3Oph6LnDE6YVsmTt74= +github.com/Antonboom/nilnil v0.1.0/go.mod h1:PhHLvRPSghY5Y7mX4TW+BHZQYo1A8flE5H20D3IPZBo= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= @@ -63,8 +75,8 @@ github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3Q github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= @@ -76,12 +88,11 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/Workiva/go-datastructures v1.0.53 h1:J6Y/52yX10Xc5JjXmGtWoSSxs3mZnGSaq37xZZh7Yig= -github.com/Workiva/go-datastructures v1.0.53/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A= -github.com/adlio/schema v1.1.13 h1:LeNMVg5Z1FX+Qgz8tJUijBLRdcpbFUElz+d1489On98= -github.com/adlio/schema v1.1.13/go.mod h1:L5Z7tw+7lRK1Fnpi/LT/ooCP1elkXn0krMWBQHUhEDE= +github.com/adlio/schema v1.1.14 h1:lIjyp5/2wSuEOmeQGNPpaRsVGZRqz9A/B+PaMtEotaU= +github.com/adlio/schema v1.1.14/go.mod h1:hQveFEMiDlG/M9yz9RAajnH5DzT6nAfqOG9YkEQU2pg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -89,13 +100,15 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= -github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/ashanbrown/forbidigo v1.2.0 h1:RMlEFupPCxQ1IogYOQUnIQwGEUGK8g5vAPMRyJoSxbc= @@ -105,21 +118,27 @@ github.com/ashanbrown/makezero v0.0.0-20210520155254-b6261585ddde/go.mod h1:oG9D github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.38.68/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go-v2 v1.7.0/go.mod h1:tb9wi5s61kTDA5qCkcDbt3KRVV74GGslQkl/DRdX/P4= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.5.0/go.mod h1:acH3+MQoiMzozT/ivU+DbRg7Ooo2298RdRaWcOv+4vM= -github.com/aws/smithy-go v1.5.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A= github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= +github.com/blizzy78/varnamelen v0.3.0 h1:80mYO7Y5ppeEefg1Jzu+NBg16iwToOQVnDnNIoWSShs= +github.com/blizzy78/varnamelen v0.3.0/go.mod h1:hbwRdBvoBqxk34XyQ6HA0UH3G0/1TKuv5AC4eaBT0Ec= github.com/bombsimon/wsl/v3 v3.3.0 h1:Mka/+kRLoQJq7g2rggtgQsjuI/K5Efd87WX96EWFxjM= github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= +github.com/breml/bidichk v0.1.1 h1:Qpy8Rmgos9qdJxhka0K7ADEE5bQZX9PQUthkgggHpFM= +github.com/breml/bidichk v0.1.1/go.mod h1:zbfeitpevDUGI7V91Uzzuwrn4Vls8MoBMrwtt78jmso= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.0-beta h1:LTDpDKUM5EeOFBPM8IXpinEcmZ6FWfNZbE3lfrfdnWo= github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA= @@ -134,31 +153,43 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/casbin/casbin/v2 v2.31.6/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= +github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY= +github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= +github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charithe/durationcheck v0.0.8 h1:cnZrThioNW9gSV5JsRIXmkyHUbcDH7Y9hkzFDVc9/j0= -github.com/charithe/durationcheck v0.0.8/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charithe/durationcheck v0.0.9 h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy6EEutYk= +github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af h1:spmv8nSH9h5oCQf40jt/ufBCt9j0/58u4G+rkeMqXGI= github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 h1:NmTXa/uVnDyp0TY5MKi197+3HWcnYWfnHGyaFthlnGw= -github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/continuity v0.2.0 h1:j/9Wnn+hrEWjLvHuIxUU1YI5JjEjVlT2AA68cse9rwY= +github.com/containerd/continuity v0.2.0/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -176,6 +207,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/daixiang0/gci v0.2.9 h1:iwJvwQpBZmMg31w+QQ6jsyZ54KEATn6/nfARbBNW294= github.com/daixiang0/gci v0.2.9/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -212,10 +244,11 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/esimonov/ifshort v1.0.2 h1:K5s1W2fGfkoWXsFlxBNqT6J0ZCncPaKrGM5qe0bni68= -github.com/esimonov/ifshort v1.0.2/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= +github.com/esimonov/ifshort v1.0.3 h1:JD6x035opqGec5fZ0TLjXeROD2p5H7oLGn8MKfy9HTM= +github.com/esimonov/ifshort v1.0.3/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ= @@ -227,37 +260,44 @@ github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+ne github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc= github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= github.com/fzipp/gocyclo v0.3.1 h1:A9UeX3HJSXTBzvHzhqoYVuE0eAhe+aM8XBCCwsPMZOc= github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= -github.com/go-critic/go-critic v0.5.6 h1:siUR1+322iVikWXoV75I1YRfNaC/yaLzhdF9Zwd8Tus= -github.com/go-critic/go-critic v0.5.6/go.mod h1:cVjj0DfqewQVIlIAGexPCaGaZDAqGE29PYDDADIVNEo= +github.com/go-critic/go-critic v0.6.1 h1:lS8B9LH/VVsvQQP7Ao5TJyQqteVKVs3E4dXiHMyubtI= +github.com/go-critic/go-critic v0.6.1/go.mod h1:SdNCfU0yF3UBjtaZGw6586/WocupMOJuiqgom5DsQxM= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.11.0 h1:IGmIEl7aHTYh6E2HlT+ptILBotjo4xl8PMDl852etiI= -github.com/go-kit/kit v0.11.0/go.mod h1:73/6Ixaufkvb5Osvkls8C79vuQ49Ba1rUEUYNSf+FUw= +github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= +github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= @@ -269,8 +309,9 @@ github.com/go-toolsmith/astcast v1.0.0 h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= github.com/go-toolsmith/astcopy v1.0.0 h1:OMgl1b1MEpjFQ1m5ztEO06rz5CUd3oBv9RF7+DyvdG8= github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v1.0.0 h1:4zxD8j3JRFNyLN46lodQuqz3xdKSrur7U/sr0SDS/gQ= github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astequal v1.0.1 h1:JbSszi42Jiqu36Gnf363HWS9MTEAz67vTQLponh3Moc= +github.com/go-toolsmith/astequal v1.0.1/go.mod h1:4oGA3EZXTVItV/ipGiOx7NWkY5veFfcsOJVS2YxltLw= github.com/go-toolsmith/astfmt v1.0.0 h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k= github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= @@ -297,12 +338,15 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -311,6 +355,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -331,8 +376,9 @@ github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= @@ -341,16 +387,16 @@ github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 h1:9kfjN3AdxcbsZB github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.42.1 h1:nC4WyrbdnNdohDVUoNKjy/4N4FTM1gCFaVeXecy6vzM= -github.com/golangci/golangci-lint v1.42.1/go.mod h1:MuInrVlgg2jq4do6XI1jbkErbVHVbwdrLLtGv6p2wPI= +github.com/golangci/golangci-lint v1.43.0 h1:SLwZFEmDgopqZpfP495zCtV9REUf551JJlJ51Ql7NZA= +github.com/golangci/golangci-lint v1.43.0/go.mod h1:VIFlUqidx5ggxDfQagdvd9E67UjMXtTHBkBQ7sHoC5Q= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= github.com/golangci/misspell v0.3.5 h1:pLzmVdl3VxTOncgzHcvLOKirdvcx/TydsClUQXTehjo= github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5 h1:c9Mqqrm/Clj5biNaG7rABrmwUq88nHh0uABo2b/WYmc= -github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= +github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2 h1:SgM7GDZTxtTTQPU84heOxy34iG5Du7F2jcoZnvp+fXI= +github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -368,12 +414,14 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -388,19 +436,21 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254 h1:Nb2aRlC404yz7gQIfRZxX9/MLvQiqXyiBTJtgAy6yrI= @@ -417,15 +467,20 @@ github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= -github.com/gostaticanalysis/analysisutil v0.4.1 h1:/7clKqrVfiVwiBQLM0Uke4KvXnO6JcCTS7HwF2D6wG8= github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= +github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= +github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= -github.com/gostaticanalysis/comment v1.4.1 h1:xHopR5L2lRz6OsjH4R2HG5wRhW9ySl3FsHIvi5pcXwc= github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= +github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q= +github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5 h1:rx8127mFPqXXsfPSo8BwnIU97MKFZc89WHAHt8PwDVY= github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= +github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= +github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= +github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -441,25 +496,32 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= +github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -476,7 +538,7 @@ github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKEN github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -489,8 +551,8 @@ github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJS github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= -github.com/jingyugao/rowserrcheck v1.1.0 h1:u6h4eiNuCLqk73Ic5TXQq9yZS+uEXTdusn7c3w1Mr6A= -github.com/jingyugao/rowserrcheck v1.1.0/go.mod h1:TOQpc2SLx6huPfoFGK3UOnEG+u02D3C1GeosjupAKCA= +github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= +github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -501,21 +563,24 @@ github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+ github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/txtarfs v0.0.0-20210218200122-0702f000015a/go.mod h1:izVPOvVRsHiKkeGCT6tYBNWyDVuzj9wAaBb5R9qamfw= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d h1:XeSMXURZPtUffuWAaq90o6kLgZdgu+QA8wk4MPC8ikI= github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= @@ -525,9 +590,9 @@ github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -535,14 +600,15 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kulti/thelper v0.4.0 h1:2Nx7XbdbE/BYZeoip2mURKUdtHQRuy6Ug+wR7K9ywNM= github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= -github.com/kunwardeep/paralleltest v1.0.2 h1:/jJRv0TiqPoEy/Y8dQxCFJhD56uS/pnvtatgTZBHokU= -github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2GhLxSfY+oqUsvG30= +github.com/kunwardeep/paralleltest v1.0.3 h1:UdKIkImEAXjR1chUWLn+PNXqWUGs//7tzMeWuP7NhmI= +github.com/kunwardeep/paralleltest v1.0.3/go.mod h1:vLydzomDFpk7yu5UX02RmP0H8QfRPOV/oFhWN85Mjb4= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77LoN/M= github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= @@ -553,15 +619,15 @@ github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3 github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.3 h1:v9QZf2Sn6AmjXtQeFpdoq/eaNtYP6IN+7lcrygsIAtg= github.com/lib/pq v1.10.3/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk= +github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= @@ -570,19 +636,24 @@ github.com/maratori/testpackage v1.0.1 h1:QtJ5ZjqapShm0w5DosRjg0PRlSdAdlx+W6cCKo github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 h1:pWxk9e//NbPwfxat7RXkts09K+dEBJWakUWwICVqYbA= github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.11 h1:nQ+aFkoE2TMGc0b68U2OKSexC+eq46+XwZzWXHRmPYs= +github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -594,17 +665,17 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 h1:QASJXOGm2RZ5Ardbc86qNFvby9AqkLDibfChMtAg5QM= -github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.1.1 h1:mkXNHP14Y6tfq+ocnQaiKEtgJDM41yaoyQq4qn6TD/4= -github.com/mgechev/revive v1.1.1/go.mod h1:PKqk4L74K6wVNwY2b6fr+9Qqr/3hIsHVfZCJdbvozrY= +github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 h1:zpIH83+oKzcpryru8ceC6BxnoG8TBrhgAvRg8obzup0= +github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= +github.com/mgechev/revive v1.1.2 h1:MiYA/o9M7REjvOF20QN43U8OtXDDHQFKLCtJnxLGLog= +github.com/mgechev/revive v1.1.2/go.mod h1:bnXsMr+ZTH09V5rssEI+jHAZ4z+ZdyhgO/zsy3EhK+0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= -github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= @@ -618,14 +689,17 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/moricho/tparallel v0.2.1 h1:95FytivzT6rYzdJLdtfn6m1bfFJylOJK41+lgv/EHf4= github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= @@ -633,16 +707,17 @@ github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinK github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= github.com/mroth/weightedrand v0.4.1 h1:rHcbUBopmi/3x4nnrvwGJBhX9d0vk+KgoLUZeDP6YyI= github.com/mroth/weightedrand v0.4.1/go.mod h1:3p2SIcC8al1YMzGhAIoXD+r9olo/g/cdJgAD905gyNE= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= -github.com/nakabonne/nestif v0.3.0 h1:+yOViDGhg8ygGrmII72nV9B/zGxY188TYpfolntsaPw= -github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= +github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= +github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= -github.com/nats-io/jwt/v2 v2.0.2/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= -github.com/nats-io/nats-server/v2 v2.2.6/go.mod h1:sEnFaxqe09cDmfMgACxZbziXnhQFhwk+aKkZjBBRYrI= -github.com/nats-io/nats.go v1.11.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/jwt/v2 v2.0.3/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= +github.com/nats-io/nats-server/v2 v2.5.0/go.mod h1:Kj86UtrXAL6LwYRA6H4RqzkHhK0Vcv2ZnKD5WbQ1t3g= +github.com/nats-io/nats.go v1.12.1/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= @@ -678,32 +753,42 @@ github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5 github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c= +github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= +github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= -github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -721,6 +806,7 @@ github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSg github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= @@ -732,26 +818,30 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= -github.com/quasilyte/go-ruleguard v0.3.4 h1:F6l5p6+7WBcTKS7foNQ4wqA39zjn2+RbdbyzGxIq1B0= -github.com/quasilyte/go-ruleguard v0.3.4/go.mod h1:57FZgMnoo6jqxkYKmVj5Fc8vOt0rVzoE/UNAmFFIPqA= +github.com/quasilyte/go-ruleguard v0.3.13 h1:O1G41cq1jUr3cJmqp7vOUT0SokqjzmS9aESWJuIDRaY= +github.com/quasilyte/go-ruleguard v0.3.13/go.mod h1:Ul8wwdqR6kBVOCt2dipDBkE+T6vAV/iixkrKuRTN1oQ= github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/dsl v0.3.2/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.3.10/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20210203162857-b223e0831f88/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= +github.com/quasilyte/go-ruleguard/rules v0.0.0-20210428214800-545e0d2e0bf7/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY= github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -767,8 +857,8 @@ github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.18.0/go.mod h1:9nvC1axdVrAHcu/s9taAVfBuIdTZLVQmKQyvrUjF5+I= -github.com/rs/zerolog v1.25.0 h1:Rj7XygbUHKUlDPcVdoLyR91fJBsduXj5fRxyqIQj/II= -github.com/rs/zerolog v1.25.0/go.mod h1:7KHcEGe0QZPOm2IE4Kpb5rTh6n1h2hIgS5OOnu1rUaI= +github.com/rs/zerolog v1.26.0 h1:ORM4ibhEZeTeQlCojCK2kPz1ogAY4bGs4tD+SaAdGaE= +github.com/rs/zerolog v1.26.0/go.mod h1:yBiM87lvSqX8h0Ww4sdzNSkVYZ8dL2xjZJG1lAuGZEo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryancurrah/gomodguard v1.2.3 h1:ww2fsjqocGCAFamzvv/b8IsRduuHHeK2MHTcTxZTQX8= @@ -776,30 +866,31 @@ github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoL github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw= github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA= github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa h1:0U2s5loxrTy6/VgfVoLuVLFJcURKLH49ie0zSch7gh4= github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/securego/gosec/v2 v2.8.1 h1:Tyy/nsH39TYCOkqf5HAgRE+7B5D8sHDwPdXRgFWokh8= -github.com/securego/gosec/v2 v2.8.1/go.mod h1:pUmsq6+VyFEElJMUX+QB3p3LWNHXg1R3xh2ssVJPs8Q= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/securego/gosec/v2 v2.9.1 h1:anHKLS/ApTYU6NZkKa/5cQqqcbKZURjvc+MtR++S4EQ= +github.com/securego/gosec/v2 v2.9.1/go.mod h1:oDcDLcatOJxkCGaCaq8lua1jTnYf6Sou4wdiJ1n4iHc= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil/v3 v3.21.7/go.mod h1:RGl11Y7XMTQPmHh8F0ayC6haKNBgH4PXMJuTAcMOlz4= +github.com/shirou/gopsutil/v3 v3.21.10/go.mod h1:t75NhzCZ/dYyPQjyQmrAYP6c8+LCdFANeBMdLPCNnew= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/sivchari/tenv v1.4.7 h1:FdTpgRlTue5eb5nXIYgS/lyVXSjugU8UUVDwhP1NLU8= +github.com/sivchari/tenv v1.4.7/go.mod h1:5nF+bITvkebQVanjU6IuMbvIot/7ReNsUV7I5NbprB0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa h1:YJfZp12Z3AFhSBeXOlv4BO55RMwPn2NoQeDsrdWnBtY= github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa/go.mod h1:oJyF+mSPHbB5mVY2iO9KV3pTt/QbIkGaO8gQ2WrDbP4= @@ -816,8 +907,9 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -833,11 +925,11 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= -github.com/ssgreg/nlreturn/v2 v2.1.0 h1:6/s4Rc49L6Uo6RLjhWZGBpWWjfzk2yrf1nIW8m4wgVA= -github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/spf13/viper v1.9.0 h1:yR6EXjTp0y0cLN8OZg1CRZmOBdI88UcGkhgyJhu6nZk= +github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4= +github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= +github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= @@ -855,30 +947,37 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/sylvia7788/contextcheck v1.0.4 h1:MsiVqROAdr0efZc/fOCt0c235qm9XJqHtWwM+2h2B04= +github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca h1:Ld/zXl5t4+D69SiV4JoN7kkfvJdOWlPpfxrzxpLMoUk= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b h1:HxLVTlqcHhFAz3nWUcuvpH7WuOMv8LQoCWmruLfFH2U= github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= -github.com/tendermint/tm-db v0.6.4 h1:3N2jlnYQkXNQclQwd/eKV/NzlqPlfK21cpRRIx80XXQ= -github.com/tendermint/tm-db v0.6.4/go.mod h1:dptYhIpJ2M5kUuenLr+Yyf3zQOv1SgBZcl8/BmWlMBw= -github.com/tetafro/godot v1.4.9 h1:wsNd0RuUxISqqudFqchsSsMqsM188DoZVPBeKl87tP0= -github.com/tetafro/godot v1.4.9/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= +github.com/tendermint/tm-db v0.6.6 h1:EzhaOfR0bdKyATqcd5PNeyeq8r+V4bRPHBfyFdD9kGM= +github.com/tendermint/tm-db v0.6.6/go.mod h1:wP8d49A85B7/erz/r4YbKssKw6ylsO/hKtFk7E1aWZI= +github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= +github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= +github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw= +github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94 h1:ig99OeTyDwQWhPe2iw9lwfQVF1KB3Q4fpP3X7/2VBG8= github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= -github.com/tklauser/go-sysconf v0.3.7/go.mod h1:JZIdXh4RmBvZDBZ41ld2bGxRV3n4daiiqA3skYhAoQ4= -github.com/tklauser/numcpus v0.2.3/go.mod h1:vpEPS/JC+oZGGQ/My/vJnNsvMDQL6PwOqt8dsCw5j+E= +github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= +github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck/v2 v2.3.0 h1:i3DNjtyyL1xwaBQOsPPk8LAcpayWfQv2rxNi9b/eEx4= -github.com/tomarrell/wrapcheck/v2 v2.3.0/go.mod h1:aF5rnkdtqNWP/gC7vPUO5pKsB0Oac2FDTQP4F+dpZMU= +github.com/tomarrell/wrapcheck/v2 v2.4.0 h1:mU4H9KsqqPZUALOUbVOpjy8qNQbWLoLI9fV68/1tq30= +github.com/tomarrell/wrapcheck/v2 v2.4.0/go.mod h1:68bQ/eJg55BROaRTbMjC7vuhL2OgfoG8bLp9ZyoBfyY= github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= github.com/tommy-muehle/go-mnd/v2 v2.4.0 h1:1t0f8Uiaq+fqKteUR4N9Umr6E99R+lDnLnq7PwX2PPE= github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= @@ -892,12 +991,14 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX github.com/uudashr/gocognit v1.0.5 h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4= github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= -github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/vektra/mockery/v2 v2.9.3 h1:ma6hcGQw4q/lhFUTJ+E9V8/5tsIcht9i2Q4d1qo26SQ= -github.com/vektra/mockery/v2 v2.9.3/go.mod h1:2gU4Cf/f8YyC8oEaSXfCnZBMxMjMl/Ko205rlP0fO90= +github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= +github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8= +github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= +github.com/vektra/mockery/v2 v2.9.4 h1:ZjpYWY+YLkDIKrKtFnYPxJax10lktcUapWZtOSg4g7g= +github.com/vektra/mockery/v2 v2.9.4/go.mod h1:2gU4Cf/f8YyC8oEaSXfCnZBMxMjMl/Ko205rlP0fO90= github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= @@ -911,12 +1012,13 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= @@ -935,14 +1037,18 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -960,9 +1066,16 @@ golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= @@ -973,6 +1086,7 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -997,8 +1111,9 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0 h1:UG21uOlmZabA4fW5i7ZX6bjw1xELEGg/ZLgZq9auk/Q= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1035,7 +1150,6 @@ golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -1044,12 +1158,20 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b h1:SXy8Ld8oKlcogOvUAh0J5Pm5RKzgYBMMxLxt6n5XW50= +golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1062,6 +1184,10 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1092,6 +1218,7 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1105,8 +1232,10 @@ golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1125,12 +1254,13 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1139,16 +1269,31 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211013075003-97ac67df715c h1:taxlMj0D/1sOAuv/CbSD+MMDof2vbyPTqz5FNYKpXt8= +golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1159,19 +1304,22 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1246,7 +1394,6 @@ golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82u golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1261,16 +1408,25 @@ golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= +golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ= +golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1294,6 +1450,12 @@ google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjR google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1348,8 +1510,20 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4 h1:ysnBoUyeL/H6RCvNRhWHjKoDEmguI+mPU+qHgK8qv/w= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1374,9 +1548,15 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5 google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1406,8 +1586,9 @@ gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.63.2 h1:tGK/CyBg7SMzb60vP1M03vNZ3VDu3wGQJwn7Sxi9r3c= +gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -1448,6 +1629,7 @@ mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/internal/blocksync/doc.go b/internal/blocksync/doc.go index 3111130e4..5f84b1261 100644 --- a/internal/blocksync/doc.go +++ b/internal/blocksync/doc.go @@ -13,14 +13,9 @@ will no longer blocksync and thus no longer run the blocksync process. Note, the blocksync reactor Service gossips entire block and relevant data such that each receiving peer may construct the entire view of the blocksync state. -There are currently two versions of the blocksync reactor Service: - -- v0: The initial implementation that is battle-tested, but whose test coverage - is lacking and is not formally verifiable. -- v2: The latest implementation that has much higher test coverage and is formally - verified. However, the current implementation of v2 is not as battle-tested and - is known to have various bugs that could make it unreliable in production - environments. +There is currently only one version of the blocksync reactor Service +that is battle-tested, but whose test coverage is lacking and is not +formally verified. The v0 blocksync reactor Service has one p2p channel, BlockchainChannel. This channel is responsible for handling messages that both request blocks and respond diff --git a/internal/blocksync/v0/pool.go b/internal/blocksync/pool.go similarity index 95% rename from internal/blocksync/v0/pool.go rename to internal/blocksync/pool.go index b3704f333..6f06c9883 100644 --- a/internal/blocksync/v0/pool.go +++ b/internal/blocksync/pool.go @@ -1,13 +1,14 @@ -package v0 +package blocksync import ( + "context" "errors" "fmt" "math" "sync/atomic" "time" - flow "github.com/tendermint/tendermint/internal/libs/flowrate" + "github.com/tendermint/tendermint/internal/libs/flowrate" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" @@ -91,7 +92,13 @@ type BlockPool struct { // NewBlockPool returns a new BlockPool with the height equal to start. Block // requests and errors will be sent to requestsCh and errorsCh accordingly. -func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool { +func NewBlockPool( + logger log.Logger, + start int64, + requestsCh chan<- BlockRequest, + errorsCh chan<- peerError, +) *BlockPool { + bp := &BlockPool{ peers: make(map[types.NodeID]*bpPeer), @@ -104,21 +111,21 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p errorsCh: errorsCh, lastSyncRate: 0, } - bp.BaseService = *service.NewBaseService(nil, "BlockPool", bp) + bp.BaseService = *service.NewBaseService(logger, "BlockPool", bp) return bp } // OnStart implements service.Service by spawning requesters routine and recording // pool's start time. -func (pool *BlockPool) OnStart() error { +func (pool *BlockPool) OnStart(ctx context.Context) error { pool.lastAdvance = time.Now() pool.lastHundredBlockTimeStamp = pool.lastAdvance - go pool.makeRequestersRoutine() + go pool.makeRequestersRoutine(ctx) return nil } // spawns requesters as needed -func (pool *BlockPool) makeRequestersRoutine() { +func (pool *BlockPool) makeRequestersRoutine(ctx context.Context) { for { if !pool.IsRunning() { break @@ -138,7 +145,7 @@ func (pool *BlockPool) makeRequestersRoutine() { pool.removeTimedoutPeers() default: // request for more blocks. - pool.makeNextRequester() + pool.makeNextRequester(ctx) } } } @@ -391,7 +398,7 @@ func (pool *BlockPool) pickIncrAvailablePeer(height int64) *bpPeer { return nil } -func (pool *BlockPool) makeNextRequester() { +func (pool *BlockPool) makeNextRequester(ctx context.Context) { pool.mtx.Lock() defer pool.mtx.Unlock() @@ -405,7 +412,7 @@ func (pool *BlockPool) makeNextRequester() { pool.requesters[nextHeight] = request atomic.AddInt32(&pool.numPending, 1) - err := request.Start() + err := request.Start(ctx) if err != nil { request.Logger.Error("Error starting request", "err", err) } @@ -471,7 +478,7 @@ type bpPeer struct { base int64 pool *BlockPool id types.NodeID - recvMonitor *flow.Monitor + recvMonitor *flowrate.Monitor timeout *time.Timer @@ -495,7 +502,7 @@ func (peer *bpPeer) setLogger(l log.Logger) { } func (peer *bpPeer) resetMonitor() { - peer.recvMonitor = flow.New(time.Second, time.Second*40) + peer.recvMonitor = flowrate.New(time.Second, time.Second*40) initialValue := float64(minRecvRate) * math.E peer.recvMonitor.SetREMA(initialValue) } @@ -564,7 +571,7 @@ func newBPRequester(pool *BlockPool, height int64) *bpRequester { return bpr } -func (bpr *bpRequester) OnStart() error { +func (bpr *bpRequester) OnStart(ctx context.Context) error { go bpr.requestRoutine() return nil } diff --git a/internal/blocksync/v0/pool_test.go b/internal/blocksync/pool_test.go similarity index 87% rename from internal/blocksync/v0/pool_test.go rename to internal/blocksync/pool_test.go index 67617d2b7..0718fee16 100644 --- a/internal/blocksync/v0/pool_test.go +++ b/internal/blocksync/pool_test.go @@ -1,6 +1,7 @@ -package v0 +package blocksync import ( + "context" "fmt" mrand "math/rand" "testing" @@ -78,23 +79,20 @@ func makePeers(numPeers int, minHeight, maxHeight int64) testPeers { } func TestBlockPoolBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + start := int64(42) peers := makePeers(10, start+1, 1000) errorsCh := make(chan peerError, 1000) requestsCh := make(chan BlockRequest, 1000) - pool := NewBlockPool(start, requestsCh, errorsCh) - pool.SetLogger(log.TestingLogger()) + pool := NewBlockPool(log.TestingLogger(), start, requestsCh, errorsCh) - err := pool.Start() - if err != nil { + if err := pool.Start(ctx); err != nil { t.Error(err) } - t.Cleanup(func() { - if err := pool.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); pool.Wait() }) peers.start() defer peers.stop() @@ -138,21 +136,19 @@ func TestBlockPoolBasic(t *testing.T) { } func TestBlockPoolTimeout(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + start := int64(42) peers := makePeers(10, start+1, 1000) errorsCh := make(chan peerError, 1000) requestsCh := make(chan BlockRequest, 1000) - pool := NewBlockPool(start, requestsCh, errorsCh) - pool.SetLogger(log.TestingLogger()) - err := pool.Start() + pool := NewBlockPool(log.TestingLogger(), start, requestsCh, errorsCh) + err := pool.Start(ctx) if err != nil { t.Error(err) } - t.Cleanup(func() { - if err := pool.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); pool.Wait() }) for _, peer := range peers { t.Logf("Peer %v", peer.id) @@ -201,6 +197,9 @@ func TestBlockPoolTimeout(t *testing.T) { } func TestBlockPoolRemovePeer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peers := make(testPeers, 10) for i := 0; i < 10; i++ { peerID := types.NodeID(fmt.Sprintf("%d", i+1)) @@ -210,15 +209,10 @@ func TestBlockPoolRemovePeer(t *testing.T) { requestsCh := make(chan BlockRequest) errorsCh := make(chan peerError) - pool := NewBlockPool(1, requestsCh, errorsCh) - pool.SetLogger(log.TestingLogger()) - err := pool.Start() + pool := NewBlockPool(log.TestingLogger(), 1, requestsCh, errorsCh) + err := pool.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := pool.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); pool.Wait() }) // add peers for peerID, peer := range peers { diff --git a/internal/blocksync/v0/reactor.go b/internal/blocksync/reactor.go similarity index 91% rename from internal/blocksync/v0/reactor.go rename to internal/blocksync/reactor.go index 4ddfa4edc..a6845b719 100644 --- a/internal/blocksync/v0/reactor.go +++ b/internal/blocksync/reactor.go @@ -1,47 +1,24 @@ -package v0 +package blocksync import ( + "context" "fmt" "runtime/debug" "sync" "time" - bc "github.com/tendermint/tendermint/internal/blocksync" - cons "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/p2p" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - tmSync "github.com/tendermint/tendermint/libs/sync" + tmsync "github.com/tendermint/tendermint/libs/sync" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) -var ( - _ service.Service = (*Reactor)(nil) - - // ChannelShims contains a map of ChannelDescriptorShim objects, where each - // object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding - // p2p proto.Message the new p2p Channel is responsible for handling. - // - // - // TODO: Remove once p2p refactor is complete. - // ref: https://github.com/tendermint/tendermint/issues/5670 - ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - BlockSyncChannel: { - MsgType: new(bcproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(BlockSyncChannel), - Priority: 5, - SendQueueCapacity: 1000, - RecvBufferCapacity: 1024, - RecvMessageCapacity: bc.MaxMsgSize, - MaxSendBytes: 100, - }, - }, - } -) +var _ service.Service = (*Reactor)(nil) const ( // BlockSyncChannel is a channel for blocks and status updates @@ -59,10 +36,21 @@ const ( syncTimeout = 60 * time.Second ) +func GetChannelDescriptor() *p2p.ChannelDescriptor { + return &p2p.ChannelDescriptor{ + ID: BlockSyncChannel, + MessageType: new(bcproto.Message), + Priority: 5, + SendQueueCapacity: 1000, + RecvBufferCapacity: 1024, + RecvMessageCapacity: MaxMsgSize, + } +} + type consensusReactor interface { // For when we switch from block sync reactor to the consensus // machine. - SwitchToConsensus(state sm.State, skipWAL bool) + SwitchToConsensus(ctx context.Context, state sm.State, skipWAL bool) } type peerError struct { @@ -85,7 +73,7 @@ type Reactor struct { store *store.BlockStore pool *BlockPool consReactor consensusReactor - blockSync *tmSync.AtomicBool + blockSync *tmsync.AtomicBool blockSyncCh *p2p.Channel // blockSyncOutBridgeCh defines a channel that acts as a bridge between sending Envelope @@ -107,7 +95,7 @@ type Reactor struct { // stopping the p2p Channel(s). poolWG sync.WaitGroup - metrics *cons.Metrics + metrics *consensus.Metrics syncStartTime time.Time } @@ -122,7 +110,7 @@ func NewReactor( blockSyncCh *p2p.Channel, peerUpdates *p2p.PeerUpdates, blockSync bool, - metrics *cons.Metrics, + metrics *consensus.Metrics, ) (*Reactor, error) { if state.LastBlockHeight != store.Height() { return nil, fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height()) @@ -140,9 +128,9 @@ func NewReactor( initialState: state, blockExec: blockExec, store: store, - pool: NewBlockPool(startHeight, requestsCh, errorsCh), + pool: NewBlockPool(logger, startHeight, requestsCh, errorsCh), consReactor: consReactor, - blockSync: tmSync.NewBool(blockSync), + blockSync: tmsync.NewBool(blockSync), requestsCh: requestsCh, errorsCh: errorsCh, blockSyncCh: blockSyncCh, @@ -164,11 +152,13 @@ func NewReactor( // // If blockSync is enabled, we also start the pool and the pool processing // goroutine. If the pool fails to start, an error is returned. -func (r *Reactor) OnStart() error { +func (r *Reactor) OnStart(ctx context.Context) error { if r.blockSync.IsSet() { - if err := r.pool.Start(); err != nil { + if err := r.pool.Start(ctx); err != nil { return err } + r.poolWG.Add(1) + go r.requestRoutine() r.poolWG.Add(1) go r.poolRoutine(false) @@ -373,17 +363,20 @@ func (r *Reactor) processPeerUpdates() { // SwitchToBlockSync is called by the state sync reactor when switching to fast // sync. -func (r *Reactor) SwitchToBlockSync(state sm.State) error { +func (r *Reactor) SwitchToBlockSync(ctx context.Context, state sm.State) error { r.blockSync.Set() r.initialState = state r.pool.height = state.LastBlockHeight + 1 - if err := r.pool.Start(); err != nil { + if err := r.pool.Start(ctx); err != nil { return err } r.syncStartTime = time.Now() + r.poolWG.Add(1) + go r.requestRoutine() + r.poolWG.Add(1) go r.poolRoutine(true) @@ -394,7 +387,6 @@ func (r *Reactor) requestRoutine() { statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second) defer statusUpdateTicker.Stop() - r.poolWG.Add(1) defer r.poolWG.Done() for { @@ -432,6 +424,17 @@ func (r *Reactor) requestRoutine() { } } +func (r *Reactor) stopCtx() context.Context { + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + <-r.closeCh + cancel() + }() + + return ctx +} + // poolRoutine handles messages from the poolReactor telling the reactor what to // do. // @@ -450,13 +453,12 @@ func (r *Reactor) poolRoutine(stateSynced bool) { lastRate = 0.0 didProcessCh = make(chan struct{}, 1) + ctx = r.stopCtx() ) defer trySyncTicker.Stop() defer switchToConsensusTicker.Stop() - go r.requestRoutine() - defer r.poolWG.Done() FOR_LOOP: @@ -499,7 +501,7 @@ FOR_LOOP: r.blockSync.UnSet() if r.consReactor != nil { - r.consReactor.SwitchToConsensus(state, blocksSynced > 0 || stateSynced) + r.consReactor.SwitchToConsensus(ctx, state, blocksSynced > 0 || stateSynced) } break FOR_LOOP @@ -605,6 +607,8 @@ FOR_LOOP: case <-r.closeCh: break FOR_LOOP + case <-r.pool.Quit(): + break FOR_LOOP } } } diff --git a/internal/blocksync/v0/reactor_test.go b/internal/blocksync/reactor_test.go similarity index 75% rename from internal/blocksync/v0/reactor_test.go rename to internal/blocksync/reactor_test.go index 4ed1ce478..3bf09b629 100644 --- a/internal/blocksync/v0/reactor_test.go +++ b/internal/blocksync/reactor_test.go @@ -1,27 +1,29 @@ -package v0 +package blocksync import ( + "context" "os" "testing" "time" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" - cons "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/mempool/mock" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" + "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + sf "github.com/tendermint/tendermint/internal/state/test/factory" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - sf "github.com/tendermint/tendermint/state/test/factory" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) type reactorTestSuite struct { @@ -40,6 +42,7 @@ type reactorTestSuite struct { } func setup( + ctx context.Context, t *testing.T, genDoc *types.GenesisDoc, privVal types.PrivValidator, @@ -48,13 +51,16 @@ func setup( ) *reactorTestSuite { t.Helper() + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + numNodes := len(maxBlockHeights) require.True(t, numNodes >= 1, "must specify at least one block height (nodes)") rts := &reactorTestSuite{ logger: log.TestingLogger().With("module", "block_sync", "testCase", t.Name()), - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), + network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes}), nodes: make([]types.NodeID, 0, numNodes), reactors: make(map[types.NodeID]*Reactor, numNodes), app: make(map[types.NodeID]proxy.AppConns, numNodes), @@ -64,22 +70,24 @@ func setup( blockSync: true, } - chDesc := p2p.ChannelDescriptor{ID: byte(BlockSyncChannel)} - rts.blockSyncChannels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(bcproto.Message), int(chBuf)) + chDesc := &p2p.ChannelDescriptor{ID: BlockSyncChannel, MessageType: new(bcproto.Message)} + rts.blockSyncChannels = rts.network.MakeChannelsNoCleanup(t, chDesc) i := 0 for nodeID := range rts.network.Nodes { - rts.addNode(t, nodeID, genDoc, privVal, maxBlockHeights[i]) + rts.addNode(ctx, t, nodeID, genDoc, privVal, maxBlockHeights[i]) i++ } t.Cleanup(func() { + cancel() for _, nodeID := range rts.nodes { rts.peerUpdates[nodeID].Close() if rts.reactors[nodeID].IsRunning() { - require.NoError(t, rts.reactors[nodeID].Stop()) - require.NoError(t, rts.app[nodeID].Stop()) + rts.reactors[nodeID].Wait() + rts.app[nodeID].Wait() + require.False(t, rts.reactors[nodeID].IsRunning()) } } @@ -88,7 +96,9 @@ func setup( return rts } -func (rts *reactorTestSuite) addNode(t *testing.T, +func (rts *reactorTestSuite) addNode( + ctx context.Context, + t *testing.T, nodeID types.NodeID, genDoc *types.GenesisDoc, privVal types.PrivValidator, @@ -96,9 +106,11 @@ func (rts *reactorTestSuite) addNode(t *testing.T, ) { t.Helper() + logger := log.TestingLogger() + rts.nodes = append(rts.nodes, nodeID) - rts.app[nodeID] = proxy.NewAppConns(proxy.NewLocalClientCreator(&abci.BaseApplication{})) - require.NoError(t, rts.app[nodeID].Start()) + rts.app[nodeID] = proxy.NewAppConns(abciclient.NewLocalCreator(&abci.BaseApplication{}), logger, proxy.NopMetrics()) + require.NoError(t, rts.app[nodeID].Start(ctx)) blockDB := dbm.NewMemDB() stateDB := dbm.NewMemDB() @@ -164,10 +176,10 @@ func (rts *reactorTestSuite) addNode(t *testing.T, rts.blockSyncChannels[nodeID], rts.peerUpdates[nodeID], rts.blockSync, - cons.NopMetrics()) + consensus.NopMetrics()) require.NoError(t, err) - require.NoError(t, rts.reactors[nodeID].Start()) + require.NoError(t, rts.reactors[nodeID].Start(ctx)) require.True(t, rts.reactors[nodeID].IsRunning()) } @@ -181,14 +193,18 @@ func (rts *reactorTestSuite) start(t *testing.T) { } func TestReactor_AbruptDisconnect(t *testing.T) { - config := cfg.ResetTestRoot("block_sync_reactor_test") - defer os.RemoveAll(config.RootDir) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := config.ResetTestRoot("block_sync_reactor_test") + require.NoError(t, err) + defer os.RemoveAll(cfg.RootDir) valSet, privVals := factory.ValidatorSet(1, 30) - genDoc := factory.GenesisDoc(config, time.Now(), valSet.Validators, nil) + genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil) maxBlockHeight := int64(64) - rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) + rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height()) @@ -217,14 +233,18 @@ func TestReactor_AbruptDisconnect(t *testing.T) { } func TestReactor_SyncTime(t *testing.T) { - config := cfg.ResetTestRoot("block_sync_reactor_test") - defer os.RemoveAll(config.RootDir) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := config.ResetTestRoot("block_sync_reactor_test") + require.NoError(t, err) + defer os.RemoveAll(cfg.RootDir) valSet, privVals := factory.ValidatorSet(1, 30) - genDoc := factory.GenesisDoc(config, time.Now(), valSet.Validators, nil) + genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil) maxBlockHeight := int64(101) - rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) + rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height()) rts.start(t) @@ -241,14 +261,19 @@ func TestReactor_SyncTime(t *testing.T) { } func TestReactor_NoBlockResponse(t *testing.T) { - config := cfg.ResetTestRoot("block_sync_reactor_test") - defer os.RemoveAll(config.RootDir) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := config.ResetTestRoot("block_sync_reactor_test") + require.NoError(t, err) + + defer os.RemoveAll(cfg.RootDir) valSet, privVals := factory.ValidatorSet(1, 30) - genDoc := factory.GenesisDoc(config, time.Now(), valSet.Validators, nil) + genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil) maxBlockHeight := int64(65) - rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) + rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height()) @@ -289,14 +314,18 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) { // See: https://github.com/tendermint/tendermint/issues/6005 t.SkipNow() - config := cfg.ResetTestRoot("block_sync_reactor_test") - defer os.RemoveAll(config.RootDir) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := config.ResetTestRoot("block_sync_reactor_test") + require.NoError(t, err) + defer os.RemoveAll(cfg.RootDir) maxBlockHeight := int64(48) valSet, privVals := factory.ValidatorSet(1, 30) - genDoc := factory.GenesisDoc(config, time.Now(), valSet.Validators, nil) + genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil) - rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0, 0, 0, 0}, 1000) + rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0, 0, 0, 0}, 1000) require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height()) @@ -329,12 +358,12 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) { // XXX: This causes a potential race condition. // See: https://github.com/tendermint/tendermint/issues/6005 valSet, otherPrivVals := factory.ValidatorSet(1, 30) - otherGenDoc := factory.GenesisDoc(config, time.Now(), valSet.Validators, nil) - newNode := rts.network.MakeNode(t, p2ptest.NodeOptions{ + otherGenDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil) + newNode := rts.network.MakeNode(ctx, t, p2ptest.NodeOptions{ MaxPeers: uint16(len(rts.nodes) + 1), MaxConnected: uint16(len(rts.nodes) + 1), }) - rts.addNode(t, newNode.NodeID, otherGenDoc, otherPrivVals[0], maxBlockHeight) + rts.addNode(ctx, t, newNode.NodeID, otherGenDoc, otherPrivVals[0], maxBlockHeight) // add a fake peer just so we do not wait for the consensus ticker to timeout rts.reactors[newNode.NodeID].pool.SetPeerRange("00ff", 10, 10) diff --git a/internal/blocksync/v2/internal/behavior/doc.go b/internal/blocksync/v2/internal/behavior/doc.go deleted file mode 100644 index c4bd06cce..000000000 --- a/internal/blocksync/v2/internal/behavior/doc.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Package Behavior provides a mechanism for reactors to report behavior of peers. - -Instead of a reactor calling the switch directly it will call the behavior module which will -handle the stoping and marking peer as good on behalf of the reactor. - -There are four different behaviors a reactor can report. - -1. bad message - -type badMessage struct { - explanation string -} - -This message will request the peer be stopped for an error - -2. message out of order - -type messageOutOfOrder struct { - explanation string -} - -This message will request the peer be stopped for an error - -3. consesnsus Vote - -type consensusVote struct { - explanation string -} - -This message will request the peer be marked as good - -4. block part - -type blockPart struct { - explanation string -} - -This message will request the peer be marked as good - -*/ -package behavior diff --git a/internal/blocksync/v2/internal/behavior/peer_behaviour.go b/internal/blocksync/v2/internal/behavior/peer_behaviour.go deleted file mode 100644 index 90948d888..000000000 --- a/internal/blocksync/v2/internal/behavior/peer_behaviour.go +++ /dev/null @@ -1,47 +0,0 @@ -package behavior - -import "github.com/tendermint/tendermint/types" - -// PeerBehavior is a struct describing a behavior a peer performed. -// `peerID` identifies the peer and reason characterizes the specific -// behavior performed by the peer. -type PeerBehavior struct { - peerID types.NodeID - reason interface{} -} - -type badMessage struct { - explanation string -} - -// BadMessage returns a badMessage PeerBehavior. -func BadMessage(peerID types.NodeID, explanation string) PeerBehavior { - return PeerBehavior{peerID: peerID, reason: badMessage{explanation}} -} - -type messageOutOfOrder struct { - explanation string -} - -// MessageOutOfOrder returns a messagOutOfOrder PeerBehavior. -func MessageOutOfOrder(peerID types.NodeID, explanation string) PeerBehavior { - return PeerBehavior{peerID: peerID, reason: messageOutOfOrder{explanation}} -} - -type consensusVote struct { - explanation string -} - -// ConsensusVote returns a consensusVote PeerBehavior. -func ConsensusVote(peerID types.NodeID, explanation string) PeerBehavior { - return PeerBehavior{peerID: peerID, reason: consensusVote{explanation}} -} - -type blockPart struct { - explanation string -} - -// BlockPart returns blockPart PeerBehavior. -func BlockPart(peerID types.NodeID, explanation string) PeerBehavior { - return PeerBehavior{peerID: peerID, reason: blockPart{explanation}} -} diff --git a/internal/blocksync/v2/internal/behavior/reporter.go b/internal/blocksync/v2/internal/behavior/reporter.go deleted file mode 100644 index c150a98d5..000000000 --- a/internal/blocksync/v2/internal/behavior/reporter.go +++ /dev/null @@ -1,87 +0,0 @@ -package behavior - -import ( - "errors" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/types" -) - -// Reporter provides an interface for reactors to report the behavior -// of peers synchronously to other components. -type Reporter interface { - Report(behavior PeerBehavior) error -} - -// SwitchReporter reports peer behavior to an internal Switch. -type SwitchReporter struct { - sw *p2p.Switch -} - -// NewSwitchReporter return a new SwitchReporter instance which wraps the Switch. -func NewSwitchReporter(sw *p2p.Switch) *SwitchReporter { - return &SwitchReporter{ - sw: sw, - } -} - -// Report reports the behavior of a peer to the Switch. -func (spbr *SwitchReporter) Report(behavior PeerBehavior) error { - peer := spbr.sw.Peers().Get(behavior.peerID) - if peer == nil { - return errors.New("peer not found") - } - - switch reason := behavior.reason.(type) { - case consensusVote, blockPart: - spbr.sw.MarkPeerAsGood(peer) - case badMessage: - spbr.sw.StopPeerForError(peer, reason.explanation) - case messageOutOfOrder: - spbr.sw.StopPeerForError(peer, reason.explanation) - default: - return errors.New("unknown reason reported") - } - - return nil -} - -// MockReporter is a concrete implementation of the Reporter -// interface used in reactor tests to ensure reactors report the correct -// behavior in manufactured scenarios. -type MockReporter struct { - mtx tmsync.RWMutex - pb map[types.NodeID][]PeerBehavior -} - -// NewMockReporter returns a Reporter which records all reported -// behaviors in memory. -func NewMockReporter() *MockReporter { - return &MockReporter{ - pb: map[types.NodeID][]PeerBehavior{}, - } -} - -// Report stores the PeerBehavior produced by the peer identified by peerID. -func (mpbr *MockReporter) Report(behavior PeerBehavior) error { - mpbr.mtx.Lock() - defer mpbr.mtx.Unlock() - mpbr.pb[behavior.peerID] = append(mpbr.pb[behavior.peerID], behavior) - - return nil -} - -// GetBehaviors returns all behaviors reported on the peer identified by peerID. -func (mpbr *MockReporter) GetBehaviors(peerID types.NodeID) []PeerBehavior { - mpbr.mtx.RLock() - defer mpbr.mtx.RUnlock() - if items, ok := mpbr.pb[peerID]; ok { - result := make([]PeerBehavior, len(items)) - copy(result, items) - - return result - } - - return []PeerBehavior{} -} diff --git a/internal/blocksync/v2/internal/behavior/reporter_test.go b/internal/blocksync/v2/internal/behavior/reporter_test.go deleted file mode 100644 index 861a63df0..000000000 --- a/internal/blocksync/v2/internal/behavior/reporter_test.go +++ /dev/null @@ -1,205 +0,0 @@ -package behavior_test - -import ( - "sync" - "testing" - - bh "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior" - "github.com/tendermint/tendermint/types" -) - -// TestMockReporter tests the MockReporter's ability to store reported -// peer behavior in memory indexed by the peerID. -func TestMockReporter(t *testing.T) { - var peerID types.NodeID = "MockPeer" - pr := bh.NewMockReporter() - - behaviors := pr.GetBehaviors(peerID) - if len(behaviors) != 0 { - t.Error("Expected to have no behaviors reported") - } - - badMessage := bh.BadMessage(peerID, "bad message") - if err := pr.Report(badMessage); err != nil { - t.Error(err) - } - behaviors = pr.GetBehaviors(peerID) - if len(behaviors) != 1 { - t.Error("Expected the peer have one reported behavior") - } - - if behaviors[0] != badMessage { - t.Error("Expected Bad Message to have been reported") - } -} - -type scriptItem struct { - peerID types.NodeID - behavior bh.PeerBehavior -} - -// equalBehaviors returns true if a and b contain the same PeerBehaviors with -// the same freequencies and otherwise false. -func equalBehaviors(a []bh.PeerBehavior, b []bh.PeerBehavior) bool { - aHistogram := map[bh.PeerBehavior]int{} - bHistogram := map[bh.PeerBehavior]int{} - - for _, behavior := range a { - aHistogram[behavior]++ - } - - for _, behavior := range b { - bHistogram[behavior]++ - } - - if len(aHistogram) != len(bHistogram) { - return false - } - - for _, behavior := range a { - if aHistogram[behavior] != bHistogram[behavior] { - return false - } - } - - for _, behavior := range b { - if bHistogram[behavior] != aHistogram[behavior] { - return false - } - } - - return true -} - -// TestEqualPeerBehaviors tests that equalBehaviors can tell that two slices -// of peer behaviors can be compared for the behaviors they contain and the -// freequencies that those behaviors occur. -func TestEqualPeerBehaviors(t *testing.T) { - var ( - peerID types.NodeID = "MockPeer" - consensusVote = bh.ConsensusVote(peerID, "voted") - blockPart = bh.BlockPart(peerID, "blocked") - equals = []struct { - left []bh.PeerBehavior - right []bh.PeerBehavior - }{ - // Empty sets - {[]bh.PeerBehavior{}, []bh.PeerBehavior{}}, - // Single behaviors - {[]bh.PeerBehavior{consensusVote}, []bh.PeerBehavior{consensusVote}}, - // Equal Frequencies - {[]bh.PeerBehavior{consensusVote, consensusVote}, - []bh.PeerBehavior{consensusVote, consensusVote}}, - // Equal frequencies different orders - {[]bh.PeerBehavior{consensusVote, blockPart}, - []bh.PeerBehavior{blockPart, consensusVote}}, - } - unequals = []struct { - left []bh.PeerBehavior - right []bh.PeerBehavior - }{ - // Comparing empty sets to non empty sets - {[]bh.PeerBehavior{}, []bh.PeerBehavior{consensusVote}}, - // Different behaviors - {[]bh.PeerBehavior{consensusVote}, []bh.PeerBehavior{blockPart}}, - // Same behavior with different frequencies - {[]bh.PeerBehavior{consensusVote}, - []bh.PeerBehavior{consensusVote, consensusVote}}, - } - ) - - for _, test := range equals { - if !equalBehaviors(test.left, test.right) { - t.Errorf("expected %#v and %#v to be equal", test.left, test.right) - } - } - - for _, test := range unequals { - if equalBehaviors(test.left, test.right) { - t.Errorf("expected %#v and %#v to be unequal", test.left, test.right) - } - } -} - -// TestPeerBehaviorConcurrency constructs a scenario in which -// multiple goroutines are using the same MockReporter instance. -// This test reproduces the conditions in which MockReporter will -// be used within a Reactor `Receive` method tests to ensure thread safety. -func TestMockPeerBehaviorReporterConcurrency(t *testing.T) { - var ( - behaviorScript = []struct { - peerID types.NodeID - behaviors []bh.PeerBehavior - }{ - {"1", []bh.PeerBehavior{bh.ConsensusVote("1", "")}}, - {"2", []bh.PeerBehavior{bh.ConsensusVote("2", ""), bh.ConsensusVote("2", ""), bh.ConsensusVote("2", "")}}, - { - "3", - []bh.PeerBehavior{bh.BlockPart("3", ""), - bh.ConsensusVote("3", ""), - bh.BlockPart("3", ""), - bh.ConsensusVote("3", "")}}, - { - "4", - []bh.PeerBehavior{bh.ConsensusVote("4", ""), - bh.ConsensusVote("4", ""), - bh.ConsensusVote("4", ""), - bh.ConsensusVote("4", "")}}, - { - "5", - []bh.PeerBehavior{bh.BlockPart("5", ""), - bh.ConsensusVote("5", ""), - bh.BlockPart("5", ""), - bh.ConsensusVote("5", "")}}, - } - ) - - var receiveWg sync.WaitGroup - pr := bh.NewMockReporter() - scriptItems := make(chan scriptItem) - done := make(chan int) - numConsumers := 3 - for i := 0; i < numConsumers; i++ { - receiveWg.Add(1) - go func() { - defer receiveWg.Done() - for { - select { - case pb := <-scriptItems: - if err := pr.Report(pb.behavior); err != nil { - t.Error(err) - } - case <-done: - return - } - } - }() - } - - var sendingWg sync.WaitGroup - sendingWg.Add(1) - go func() { - defer sendingWg.Done() - for _, item := range behaviorScript { - for _, reason := range item.behaviors { - scriptItems <- scriptItem{item.peerID, reason} - } - } - }() - - sendingWg.Wait() - - for i := 0; i < numConsumers; i++ { - done <- 1 - } - - receiveWg.Wait() - - for _, items := range behaviorScript { - reported := pr.GetBehaviors(items.peerID) - if !equalBehaviors(reported, items.behaviors) { - t.Errorf("expected peer %s to have behaved \nExpected: %#v \nGot %#v \n", - items.peerID, items.behaviors, reported) - } - } -} diff --git a/internal/blocksync/v2/io.go b/internal/blocksync/v2/io.go deleted file mode 100644 index 743428516..000000000 --- a/internal/blocksync/v2/io.go +++ /dev/null @@ -1,187 +0,0 @@ -package v2 - -import ( - "errors" - - "github.com/gogo/protobuf/proto" - "github.com/tendermint/tendermint/internal/p2p" - bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" -) - -var ( - errPeerQueueFull = errors.New("peer queue full") -) - -type iIO interface { - sendBlockRequest(peer p2p.Peer, height int64) error - sendBlockToPeer(block *types.Block, peer p2p.Peer) error - sendBlockNotFound(height int64, peer p2p.Peer) error - sendStatusResponse(base, height int64, peer p2p.Peer) error - - sendStatusRequest(peer p2p.Peer) error - broadcastStatusRequest() error - - trySwitchToConsensus(state state.State, skipWAL bool) bool -} - -type switchIO struct { - sw *p2p.Switch -} - -func newSwitchIo(sw *p2p.Switch) *switchIO { - return &switchIO{ - sw: sw, - } -} - -const ( - // BlockchainChannel is a channel for blocks and status updates (`BlockStore` height) - BlockchainChannel = byte(0x40) -) - -type consensusReactor interface { - // for when we switch from blockchain reactor and block sync to - // the consensus machine - SwitchToConsensus(state state.State, skipWAL bool) -} - -func (sio *switchIO) sendBlockRequest(peer p2p.Peer, height int64) error { - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_BlockRequest{ - BlockRequest: &bcproto.BlockRequest{ - Height: height, - }, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - queued := peer.TrySend(BlockchainChannel, msgBytes) - if !queued { - return errPeerQueueFull - } - return nil -} - -func (sio *switchIO) sendStatusResponse(base int64, height int64, peer p2p.Peer) error { - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_StatusResponse{ - StatusResponse: &bcproto.StatusResponse{ - Height: height, - Base: base, - }, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return errPeerQueueFull - } - - return nil -} - -func (sio *switchIO) sendBlockToPeer(block *types.Block, peer p2p.Peer) error { - if block == nil { - panic("trying to send nil block") - } - - bpb, err := block.ToProto() - if err != nil { - return err - } - - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_BlockResponse{ - BlockResponse: &bcproto.BlockResponse{ - Block: bpb, - }, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return errPeerQueueFull - } - - return nil -} - -func (sio *switchIO) sendBlockNotFound(height int64, peer p2p.Peer) error { - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_NoBlockResponse{ - NoBlockResponse: &bcproto.NoBlockResponse{ - Height: height, - }, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return errPeerQueueFull - } - - return nil -} - -func (sio *switchIO) trySwitchToConsensus(state state.State, skipWAL bool) bool { - conR, ok := sio.sw.Reactor("CONSENSUS").(consensusReactor) - if ok { - conR.SwitchToConsensus(state, skipWAL) - } - return ok -} - -func (sio *switchIO) sendStatusRequest(peer p2p.Peer) error { - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_StatusRequest{ - StatusRequest: &bcproto.StatusRequest{}, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return errPeerQueueFull - } - - return nil -} - -func (sio *switchIO) broadcastStatusRequest() error { - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_StatusRequest{ - StatusRequest: &bcproto.StatusRequest{}, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - // XXX: maybe we should use an io specific peer list here - sio.sw.Broadcast(BlockchainChannel, msgBytes) - - return nil -} diff --git a/internal/blocksync/v2/metrics.go b/internal/blocksync/v2/metrics.go deleted file mode 100644 index c68ec6447..000000000 --- a/internal/blocksync/v2/metrics.go +++ /dev/null @@ -1,125 +0,0 @@ -package v2 - -import ( - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" -) - -const ( - // MetricsSubsystem is a subsystem shared by all metrics exposed by this - // package. - MetricsSubsystem = "blockchain" -) - -// Metrics contains metrics exposed by this package. -type Metrics struct { - // events_in - EventsIn metrics.Counter - // events_in - EventsHandled metrics.Counter - // events_out - EventsOut metrics.Counter - // errors_in - ErrorsIn metrics.Counter - // errors_handled - ErrorsHandled metrics.Counter - // errors_out - ErrorsOut metrics.Counter - // events_shed - EventsShed metrics.Counter - // events_sent - EventsSent metrics.Counter - // errors_sent - ErrorsSent metrics.Counter - // errors_shed - ErrorsShed metrics.Counter -} - -// PrometheusMetrics returns metrics for in and out events, errors, etc. handled by routines. -// Can we burn in the routine name here? -func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { - labels := []string{} - for i := 0; i < len(labelsAndValues); i += 2 { - labels = append(labels, labelsAndValues[i]) - } - return &Metrics{ - EventsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_in", - Help: "Events read from the channel.", - }, labels).With(labelsAndValues...), - EventsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_handled", - Help: "Events handled", - }, labels).With(labelsAndValues...), - EventsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_out", - Help: "Events output from routine.", - }, labels).With(labelsAndValues...), - ErrorsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_in", - Help: "Errors read from the channel.", - }, labels).With(labelsAndValues...), - ErrorsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_handled", - Help: "Errors handled.", - }, labels).With(labelsAndValues...), - ErrorsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_out", - Help: "Errors output from routine.", - }, labels).With(labelsAndValues...), - ErrorsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_sent", - Help: "Errors sent to routine.", - }, labels).With(labelsAndValues...), - ErrorsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_shed", - Help: "Errors dropped from sending.", - }, labels).With(labelsAndValues...), - EventsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_sent", - Help: "Events sent to routine.", - }, labels).With(labelsAndValues...), - EventsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_shed", - Help: "Events dropped from sending.", - }, labels).With(labelsAndValues...), - } -} - -// NopMetrics returns no-op Metrics. -func NopMetrics() *Metrics { - return &Metrics{ - EventsIn: discard.NewCounter(), - EventsHandled: discard.NewCounter(), - EventsOut: discard.NewCounter(), - ErrorsIn: discard.NewCounter(), - ErrorsHandled: discard.NewCounter(), - ErrorsOut: discard.NewCounter(), - EventsShed: discard.NewCounter(), - EventsSent: discard.NewCounter(), - ErrorsSent: discard.NewCounter(), - ErrorsShed: discard.NewCounter(), - } -} diff --git a/internal/blocksync/v2/processor.go b/internal/blocksync/v2/processor.go deleted file mode 100644 index b448e7d8a..000000000 --- a/internal/blocksync/v2/processor.go +++ /dev/null @@ -1,193 +0,0 @@ -package v2 - -import ( - "fmt" - - tmState "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" -) - -// Events generated by the processor: -// block execution failure, event will indicate the peer(s) that caused the error -type pcBlockVerificationFailure struct { - priorityNormal - height int64 - firstPeerID types.NodeID - secondPeerID types.NodeID -} - -func (e pcBlockVerificationFailure) String() string { - return fmt.Sprintf("pcBlockVerificationFailure{%d 1st peer: %v, 2nd peer: %v}", - e.height, e.firstPeerID, e.secondPeerID) -} - -// successful block execution -type pcBlockProcessed struct { - priorityNormal - height int64 - peerID types.NodeID -} - -func (e pcBlockProcessed) String() string { - return fmt.Sprintf("pcBlockProcessed{%d peer: %v}", e.height, e.peerID) -} - -// processor has finished -type pcFinished struct { - priorityNormal - blocksSynced int - tmState tmState.State -} - -func (p pcFinished) Error() string { - return "finished" -} - -type queueItem struct { - block *types.Block - peerID types.NodeID -} - -type blockQueue map[int64]queueItem - -type pcState struct { - // blocks waiting to be processed - queue blockQueue - - // draining indicates that the next rProcessBlock event with a queue miss constitutes completion - draining bool - - // the number of blocks successfully synced by the processor - blocksSynced int - - // the processorContext which contains the processor dependencies - context processorContext -} - -func (state *pcState) String() string { - return fmt.Sprintf("height: %d queue length: %d draining: %v blocks synced: %d", - state.height(), len(state.queue), state.draining, state.blocksSynced) -} - -// newPcState returns a pcState initialized with the last verified block enqueued -func newPcState(context processorContext) *pcState { - return &pcState{ - queue: blockQueue{}, - draining: false, - blocksSynced: 0, - context: context, - } -} - -// nextTwo returns the next two unverified blocks -func (state *pcState) nextTwo() (queueItem, queueItem, error) { - if first, ok := state.queue[state.height()+1]; ok { - if second, ok := state.queue[state.height()+2]; ok { - return first, second, nil - } - } - return queueItem{}, queueItem{}, fmt.Errorf("not found") -} - -// synced returns true when at most the last verified block remains in the queue -func (state *pcState) synced() bool { - return len(state.queue) <= 1 -} - -func (state *pcState) enqueue(peerID types.NodeID, block *types.Block, height int64) { - if item, ok := state.queue[height]; ok { - panic(fmt.Sprintf( - "duplicate block %d (%X) enqueued by processor (sent by %v; existing block %X from %v)", - height, block.Hash(), peerID, item.block.Hash(), item.peerID)) - } - - state.queue[height] = queueItem{block: block, peerID: peerID} -} - -func (state *pcState) height() int64 { - return state.context.tmState().LastBlockHeight -} - -// purgePeer moves all unprocessed blocks from the queue -func (state *pcState) purgePeer(peerID types.NodeID) { - // what if height is less than state.height? - for height, item := range state.queue { - if item.peerID == peerID { - delete(state.queue, height) - } - } -} - -// handle processes FSM events -func (state *pcState) handle(event Event) (Event, error) { - switch event := event.(type) { - case bcResetState: - state.context.setState(event.state) - return noOp, nil - - case scFinishedEv: - if state.synced() { - return pcFinished{tmState: state.context.tmState(), blocksSynced: state.blocksSynced}, nil - } - state.draining = true - return noOp, nil - - case scPeerError: - state.purgePeer(event.peerID) - return noOp, nil - - case scBlockReceived: - if event.block == nil { - return noOp, nil - } - - // enqueue block if height is higher than state height, else ignore it - if event.block.Height > state.height() { - state.enqueue(event.peerID, event.block, event.block.Height) - } - return noOp, nil - - case rProcessBlock: - tmState := state.context.tmState() - firstItem, secondItem, err := state.nextTwo() - if err != nil { - if state.draining { - return pcFinished{tmState: tmState, blocksSynced: state.blocksSynced}, nil - } - return noOp, nil - } - - var ( - first, second = firstItem.block, secondItem.block - firstParts = first.MakePartSet(types.BlockPartSizeBytes) - firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstParts.Header()} - ) - - // verify if +second+ last commit "confirms" +first+ block - err = state.context.verifyCommit(tmState.ChainID, firstID, first.Height, second.LastCommit) - if err != nil { - state.purgePeer(firstItem.peerID) - if firstItem.peerID != secondItem.peerID { - state.purgePeer(secondItem.peerID) - } - return pcBlockVerificationFailure{ - height: first.Height, firstPeerID: firstItem.peerID, secondPeerID: secondItem.peerID}, - nil - } - - state.context.saveBlock(first, firstParts, second.LastCommit) - - if err := state.context.applyBlock(firstID, first); err != nil { - panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) - } - - state.context.recordConsMetrics(first) - - delete(state.queue, first.Height) - state.blocksSynced++ - - return pcBlockProcessed{height: first.Height, peerID: firstItem.peerID}, nil - } - - return noOp, nil -} diff --git a/internal/blocksync/v2/processor_context.go b/internal/blocksync/v2/processor_context.go deleted file mode 100644 index bc6852565..000000000 --- a/internal/blocksync/v2/processor_context.go +++ /dev/null @@ -1,112 +0,0 @@ -package v2 - -import ( - "fmt" - - cons "github.com/tendermint/tendermint/internal/consensus" - "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" -) - -type processorContext interface { - applyBlock(blockID types.BlockID, block *types.Block) error - verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error - saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) - tmState() state.State - setState(state.State) - recordConsMetrics(block *types.Block) -} - -type pContext struct { - store blockStore - applier blockApplier - state state.State - metrics *cons.Metrics -} - -func newProcessorContext(st blockStore, ex blockApplier, s state.State, m *cons.Metrics) *pContext { - return &pContext{ - store: st, - applier: ex, - state: s, - metrics: m, - } -} - -func (pc *pContext) applyBlock(blockID types.BlockID, block *types.Block) error { - newState, err := pc.applier.ApplyBlock(pc.state, blockID, block) - pc.state = newState - return err -} - -func (pc pContext) tmState() state.State { - return pc.state -} - -func (pc *pContext) setState(state state.State) { - pc.state = state -} - -func (pc pContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error { - return pc.state.Validators.VerifyCommitLight(chainID, blockID, height, commit) -} - -func (pc *pContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { - pc.store.SaveBlock(block, blockParts, seenCommit) -} - -func (pc *pContext) recordConsMetrics(block *types.Block) { - pc.metrics.RecordConsMetrics(block) -} - -type mockPContext struct { - applicationBL []int64 - verificationBL []int64 - state state.State -} - -func newMockProcessorContext( - state state.State, - verificationBlackList []int64, - applicationBlackList []int64) *mockPContext { - return &mockPContext{ - applicationBL: applicationBlackList, - verificationBL: verificationBlackList, - state: state, - } -} - -func (mpc *mockPContext) applyBlock(blockID types.BlockID, block *types.Block) error { - for _, h := range mpc.applicationBL { - if h == block.Height { - return fmt.Errorf("generic application error") - } - } - mpc.state.LastBlockHeight = block.Height - return nil -} - -func (mpc *mockPContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error { - for _, h := range mpc.verificationBL { - if h == height { - return fmt.Errorf("generic verification error") - } - } - return nil -} - -func (mpc *mockPContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { - -} - -func (mpc *mockPContext) setState(state state.State) { - mpc.state = state -} - -func (mpc *mockPContext) tmState() state.State { - return mpc.state -} - -func (mpc *mockPContext) recordConsMetrics(block *types.Block) { - -} diff --git a/internal/blocksync/v2/processor_test.go b/internal/blocksync/v2/processor_test.go deleted file mode 100644 index f7d51112b..000000000 --- a/internal/blocksync/v2/processor_test.go +++ /dev/null @@ -1,305 +0,0 @@ -package v2 - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - tmState "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" -) - -// pcBlock is a test helper structure with simple types. Its purpose is to help with test readability. -type pcBlock struct { - pid string - height int64 -} - -// params is a test structure used to create processor state. -type params struct { - height int64 - items []pcBlock - blocksSynced int - verBL []int64 - appBL []int64 - draining bool -} - -// makePcBlock makes an empty block. -func makePcBlock(height int64) *types.Block { - return &types.Block{Header: types.Header{Height: height}} -} - -// makeState takes test parameters and creates a specific processor state. -func makeState(p *params) *pcState { - var ( - tmState = tmState.State{LastBlockHeight: p.height} - context = newMockProcessorContext(tmState, p.verBL, p.appBL) - ) - state := newPcState(context) - - for _, item := range p.items { - state.enqueue(types.NodeID(item.pid), makePcBlock(item.height), item.height) - } - - state.blocksSynced = p.blocksSynced - state.draining = p.draining - return state -} - -func mBlockResponse(peerID types.NodeID, height int64) scBlockReceived { - return scBlockReceived{ - peerID: peerID, - block: makePcBlock(height), - } -} - -type pcFsmMakeStateValues struct { - currentState *params - event Event - wantState *params - wantNextEvent Event - wantErr error - wantPanic bool -} - -type testFields struct { - name string - steps []pcFsmMakeStateValues -} - -func executeProcessorTests(t *testing.T, tests []testFields) { - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - var state *pcState - for _, step := range tt.steps { - defer func() { - r := recover() - if (r != nil) != step.wantPanic { - t.Errorf("recover = %v, wantPanic = %v", r, step.wantPanic) - } - }() - - // First step must always initialize the currentState as state. - if step.currentState != nil { - state = makeState(step.currentState) - } - if state == nil { - panic("Bad (initial?) step") - } - - nextEvent, err := state.handle(step.event) - t.Log(state) - assert.Equal(t, step.wantErr, err) - assert.Equal(t, makeState(step.wantState), state) - assert.Equal(t, step.wantNextEvent, nextEvent) - // Next step may use the wantedState as their currentState. - state = makeState(step.wantState) - } - }) - } -} - -func TestRProcessPeerError(t *testing.T) { - tests := []testFields{ - { - name: "error for existing peer", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, - event: scPeerError{peerID: "P2"}, - wantState: ¶ms{items: []pcBlock{{"P1", 1}}}, - wantNextEvent: noOp, - }, - }, - }, - { - name: "error for unknown peer", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, - event: scPeerError{peerID: "P3"}, - wantState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, - wantNextEvent: noOp, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} - -func TestPcBlockResponse(t *testing.T) { - tests := []testFields{ - { - name: "add one block", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{}, event: mBlockResponse("P1", 1), - wantState: ¶ms{items: []pcBlock{{"P1", 1}}}, wantNextEvent: noOp, - }, - }, - }, - - { - name: "add two blocks", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{}, event: mBlockResponse("P1", 3), - wantState: ¶ms{items: []pcBlock{{"P1", 3}}}, wantNextEvent: noOp, - }, - { // use previous wantState as currentState, - event: mBlockResponse("P1", 4), - wantState: ¶ms{items: []pcBlock{{"P1", 3}, {"P1", 4}}}, wantNextEvent: noOp, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} - -func TestRProcessBlockSuccess(t *testing.T) { - tests := []testFields{ - { - name: "noop - no blocks over current height", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{}, event: rProcessBlock{}, - wantState: ¶ms{}, wantNextEvent: noOp, - }, - }, - }, - { - name: "noop - high new blocks", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 5, items: []pcBlock{{"P1", 30}, {"P2", 31}}}, event: rProcessBlock{}, - wantState: ¶ms{height: 5, items: []pcBlock{{"P1", 30}, {"P2", 31}}}, wantNextEvent: noOp, - }, - }, - }, - { - name: "blocks H+1 and H+2 present", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, event: rProcessBlock{}, - wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}}, blocksSynced: 1}, - wantNextEvent: pcBlockProcessed{height: 1, peerID: "P1"}, - }, - }, - }, - { - name: "blocks H+1 and H+2 present after draining", - steps: []pcFsmMakeStateValues{ - { // some contiguous blocks - on stop check draining is set - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P1", 4}}}, - event: scFinishedEv{}, - wantState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P1", 4}}, draining: true}, - wantNextEvent: noOp, - }, - { - event: rProcessBlock{}, - wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true}, - wantNextEvent: pcBlockProcessed{height: 1, peerID: "P1"}, - }, - { // finish when H+1 or/and H+2 are missing - event: rProcessBlock{}, - wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true}, - wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 1}, blocksSynced: 1}, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} - -func TestRProcessBlockFailures(t *testing.T) { - tests := []testFields{ - { - name: "blocks H+1 and H+2 present from different peers - H+1 verification fails ", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}, verBL: []int64{1}}, event: rProcessBlock{}, - wantState: ¶ms{items: []pcBlock{}, verBL: []int64{1}}, - wantNextEvent: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P2"}, - }, - }, - }, - { - name: "blocks H+1 and H+2 present from same peer - H+1 applyBlock fails ", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}, appBL: []int64{1}}, event: rProcessBlock{}, - wantState: ¶ms{items: []pcBlock{}, appBL: []int64{1}}, wantPanic: true, - }, - }, - }, - { - name: "blocks H+1 and H+2 present from same peers - H+1 verification fails ", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 0, items: []pcBlock{{"P1", 1}, {"P1", 2}, {"P2", 3}}, - verBL: []int64{1}}, event: rProcessBlock{}, - wantState: ¶ms{height: 0, items: []pcBlock{{"P2", 3}}, verBL: []int64{1}}, - wantNextEvent: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"}, - }, - }, - }, - { - name: "blocks H+1 and H+2 present from different peers - H+1 applyBlock fails ", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P2", 3}}, appBL: []int64{1}}, - event: rProcessBlock{}, - wantState: ¶ms{items: []pcBlock{{"P2", 3}}, appBL: []int64{1}}, wantPanic: true, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} - -func TestScFinishedEv(t *testing.T) { - tests := []testFields{ - { - name: "no blocks", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 100, items: []pcBlock{}, blocksSynced: 100}, event: scFinishedEv{}, - wantState: ¶ms{height: 100, items: []pcBlock{}, blocksSynced: 100}, - wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 100}, blocksSynced: 100}, - }, - }, - }, - { - name: "maxHeight+1 block present", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 100, items: []pcBlock{ - {"P1", 101}}, blocksSynced: 100}, event: scFinishedEv{}, - wantState: ¶ms{height: 100, items: []pcBlock{{"P1", 101}}, blocksSynced: 100}, - wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 100}, blocksSynced: 100}, - }, - }, - }, - { - name: "more blocks present", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 100, items: []pcBlock{ - {"P1", 101}, {"P1", 102}}, blocksSynced: 100}, event: scFinishedEv{}, - wantState: ¶ms{height: 100, items: []pcBlock{ - {"P1", 101}, {"P1", 102}}, blocksSynced: 100, draining: true}, - wantNextEvent: noOp, - wantErr: nil, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} diff --git a/internal/blocksync/v2/reactor.go b/internal/blocksync/v2/reactor.go deleted file mode 100644 index caa5d73f0..000000000 --- a/internal/blocksync/v2/reactor.go +++ /dev/null @@ -1,643 +0,0 @@ -package v2 - -import ( - "errors" - "fmt" - "time" - - proto "github.com/gogo/protobuf/proto" - - bc "github.com/tendermint/tendermint/internal/blocksync" - "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior" - cons "github.com/tendermint/tendermint/internal/consensus" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/sync" - bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" -) - -const ( - // chBufferSize is the buffer size of all event channels. - chBufferSize int = 1000 -) - -type blockStore interface { - LoadBlock(height int64) *types.Block - SaveBlock(*types.Block, *types.PartSet, *types.Commit) - Base() int64 - Height() int64 -} - -// BlockchainReactor handles block sync protocol. -type BlockchainReactor struct { - p2p.BaseReactor - - blockSync *sync.AtomicBool // enable block sync on start when it's been Set - stateSynced bool // set to true when SwitchToBlockSync is called by state sync - scheduler *Routine - processor *Routine - logger log.Logger - - mtx tmsync.RWMutex - maxPeerHeight int64 - syncHeight int64 - events chan Event // non-nil during a block sync - - reporter behavior.Reporter - io iIO - store blockStore - - syncStartTime time.Time - syncStartHeight int64 - lastSyncRate float64 // # blocks sync per sec base on the last 100 blocks -} - -type blockApplier interface { - ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, error) -} - -// XXX: unify naming in this package around tmState -func newReactor(state state.State, store blockStore, reporter behavior.Reporter, - blockApplier blockApplier, blockSync bool, metrics *cons.Metrics) *BlockchainReactor { - initHeight := state.LastBlockHeight + 1 - if initHeight == 1 { - initHeight = state.InitialHeight - } - scheduler := newScheduler(initHeight, time.Now()) - pContext := newProcessorContext(store, blockApplier, state, metrics) - // TODO: Fix naming to just newProcesssor - // newPcState requires a processorContext - processor := newPcState(pContext) - - return &BlockchainReactor{ - scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize), - processor: newRoutine("processor", processor.handle, chBufferSize), - store: store, - reporter: reporter, - logger: log.NewNopLogger(), - blockSync: sync.NewBool(blockSync), - syncStartHeight: initHeight, - syncStartTime: time.Time{}, - lastSyncRate: 0, - } -} - -// NewBlockchainReactor creates a new reactor instance. -func NewBlockchainReactor( - state state.State, - blockApplier blockApplier, - store blockStore, - blockSync bool, - metrics *cons.Metrics) *BlockchainReactor { - reporter := behavior.NewMockReporter() - return newReactor(state, store, reporter, blockApplier, blockSync, metrics) -} - -// SetSwitch implements Reactor interface. -func (r *BlockchainReactor) SetSwitch(sw *p2p.Switch) { - r.Switch = sw - if sw != nil { - r.io = newSwitchIo(sw) - } else { - r.io = nil - } -} - -func (r *BlockchainReactor) setMaxPeerHeight(height int64) { - r.mtx.Lock() - defer r.mtx.Unlock() - if height > r.maxPeerHeight { - r.maxPeerHeight = height - } -} - -func (r *BlockchainReactor) setSyncHeight(height int64) { - r.mtx.Lock() - defer r.mtx.Unlock() - r.syncHeight = height -} - -// SyncHeight returns the height to which the BlockchainReactor has synced. -func (r *BlockchainReactor) SyncHeight() int64 { - r.mtx.RLock() - defer r.mtx.RUnlock() - return r.syncHeight -} - -// SetLogger sets the logger of the reactor. -func (r *BlockchainReactor) SetLogger(logger log.Logger) { - r.logger = logger - r.scheduler.setLogger(logger) - r.processor.setLogger(logger) -} - -// Start implements cmn.Service interface -func (r *BlockchainReactor) Start() error { - r.reporter = behavior.NewSwitchReporter(r.BaseReactor.Switch) - if r.blockSync.IsSet() { - err := r.startSync(nil) - if err != nil { - return fmt.Errorf("failed to start block sync: %w", err) - } - } - return nil -} - -// startSync begins a block sync, signaled by r.events being non-nil. If state is non-nil, -// the scheduler and processor is updated with this state on startup. -func (r *BlockchainReactor) startSync(state *state.State) error { - r.mtx.Lock() - defer r.mtx.Unlock() - if r.events != nil { - return errors.New("block sync already in progress") - } - r.events = make(chan Event, chBufferSize) - go r.scheduler.start() - go r.processor.start() - if state != nil { - <-r.scheduler.ready() - <-r.processor.ready() - r.scheduler.send(bcResetState{state: *state}) - r.processor.send(bcResetState{state: *state}) - } - go r.demux(r.events) - return nil -} - -// endSync ends a block sync -func (r *BlockchainReactor) endSync() { - r.mtx.Lock() - defer r.mtx.Unlock() - if r.events != nil { - close(r.events) - } - r.events = nil - r.scheduler.stop() - r.processor.stop() -} - -// SwitchToBlockSync is called by the state sync reactor when switching to block sync. -func (r *BlockchainReactor) SwitchToBlockSync(state state.State) error { - r.stateSynced = true - state = state.Copy() - - err := r.startSync(&state) - if err == nil { - r.syncStartTime = time.Now() - } - - return err -} - -// reactor generated ticker events: -// ticker for cleaning peers -type rTryPrunePeer struct { - priorityHigh - time time.Time -} - -func (e rTryPrunePeer) String() string { - return fmt.Sprintf("rTryPrunePeer{%v}", e.time) -} - -// ticker event for scheduling block requests -type rTrySchedule struct { - priorityHigh - time time.Time -} - -func (e rTrySchedule) String() string { - return fmt.Sprintf("rTrySchedule{%v}", e.time) -} - -// ticker for block processing -type rProcessBlock struct { - priorityNormal -} - -func (e rProcessBlock) String() string { - return "rProcessBlock" -} - -// reactor generated events based on blockchain related messages from peers: -// blockResponse message received from a peer -type bcBlockResponse struct { - priorityNormal - time time.Time - peerID types.NodeID - size int64 - block *types.Block -} - -func (resp bcBlockResponse) String() string { - return fmt.Sprintf("bcBlockResponse{%d#%X (size: %d bytes) from %v at %v}", - resp.block.Height, resp.block.Hash(), resp.size, resp.peerID, resp.time) -} - -// blockNoResponse message received from a peer -type bcNoBlockResponse struct { - priorityNormal - time time.Time - peerID types.NodeID - height int64 -} - -func (resp bcNoBlockResponse) String() string { - return fmt.Sprintf("bcNoBlockResponse{%v has no block at height %d at %v}", - resp.peerID, resp.height, resp.time) -} - -// statusResponse message received from a peer -type bcStatusResponse struct { - priorityNormal - time time.Time - peerID types.NodeID - base int64 - height int64 -} - -func (resp bcStatusResponse) String() string { - return fmt.Sprintf("bcStatusResponse{%v is at height %d (base: %d) at %v}", - resp.peerID, resp.height, resp.base, resp.time) -} - -// new peer is connected -type bcAddNewPeer struct { - priorityNormal - peerID types.NodeID -} - -func (resp bcAddNewPeer) String() string { - return fmt.Sprintf("bcAddNewPeer{%v}", resp.peerID) -} - -// existing peer is removed -type bcRemovePeer struct { - priorityHigh - peerID types.NodeID - reason interface{} -} - -func (resp bcRemovePeer) String() string { - return fmt.Sprintf("bcRemovePeer{%v due to %v}", resp.peerID, resp.reason) -} - -// resets the scheduler and processor state, e.g. following a switch from state syncing -type bcResetState struct { - priorityHigh - state state.State -} - -func (e bcResetState) String() string { - return fmt.Sprintf("bcResetState{%v}", e.state) -} - -// Takes the channel as a parameter to avoid race conditions on r.events. -func (r *BlockchainReactor) demux(events <-chan Event) { - var lastHundred = time.Now() - - var ( - processBlockFreq = 20 * time.Millisecond - doProcessBlockCh = make(chan struct{}, 1) - doProcessBlockTk = time.NewTicker(processBlockFreq) - ) - defer doProcessBlockTk.Stop() - - var ( - prunePeerFreq = 1 * time.Second - doPrunePeerCh = make(chan struct{}, 1) - doPrunePeerTk = time.NewTicker(prunePeerFreq) - ) - defer doPrunePeerTk.Stop() - - var ( - scheduleFreq = 20 * time.Millisecond - doScheduleCh = make(chan struct{}, 1) - doScheduleTk = time.NewTicker(scheduleFreq) - ) - defer doScheduleTk.Stop() - - var ( - statusFreq = 10 * time.Second - doStatusCh = make(chan struct{}, 1) - doStatusTk = time.NewTicker(statusFreq) - ) - defer doStatusTk.Stop() - doStatusCh <- struct{}{} // immediately broadcast to get status of existing peers - - // Memoize the scSchedulerFail error to avoid printing it every scheduleFreq. - var scSchedulerFailErr error - - // XXX: Extract timers to make testing atemporal - for { - select { - // Pacers: send at most per frequency but don't saturate - case <-doProcessBlockTk.C: - select { - case doProcessBlockCh <- struct{}{}: - default: - } - case <-doPrunePeerTk.C: - select { - case doPrunePeerCh <- struct{}{}: - default: - } - case <-doScheduleTk.C: - select { - case doScheduleCh <- struct{}{}: - default: - } - case <-doStatusTk.C: - select { - case doStatusCh <- struct{}{}: - default: - } - - // Tickers: perform tasks periodically - case <-doScheduleCh: - r.scheduler.send(rTrySchedule{time: time.Now()}) - case <-doPrunePeerCh: - r.scheduler.send(rTryPrunePeer{time: time.Now()}) - case <-doProcessBlockCh: - r.processor.send(rProcessBlock{}) - case <-doStatusCh: - if err := r.io.broadcastStatusRequest(); err != nil { - r.logger.Error("Error broadcasting status request", "err", err) - } - - // Events from peers. Closing the channel signals event loop termination. - case event, ok := <-events: - if !ok { - r.logger.Info("Stopping event processing") - return - } - switch event := event.(type) { - case bcStatusResponse: - r.setMaxPeerHeight(event.height) - r.scheduler.send(event) - case bcAddNewPeer, bcRemovePeer, bcBlockResponse, bcNoBlockResponse: - r.scheduler.send(event) - default: - r.logger.Error("Received unexpected event", "event", fmt.Sprintf("%T", event)) - } - - // Incremental events from scheduler - case event := <-r.scheduler.next(): - switch event := event.(type) { - case scBlockReceived: - r.processor.send(event) - case scPeerError: - r.processor.send(event) - if err := r.reporter.Report(behavior.BadMessage(event.peerID, "scPeerError")); err != nil { - r.logger.Error("Error reporting peer", "err", err) - } - case scBlockRequest: - peer := r.Switch.Peers().Get(event.peerID) - if peer == nil { - r.logger.Error("Wanted to send block request, but no such peer", "peerID", event.peerID) - continue - } - if err := r.io.sendBlockRequest(peer, event.height); err != nil { - r.logger.Error("Error sending block request", "err", err) - } - case scFinishedEv: - r.processor.send(event) - r.scheduler.stop() - case scSchedulerFail: - if scSchedulerFailErr != event.reason { - r.logger.Error("Scheduler failure", "err", event.reason.Error()) - scSchedulerFailErr = event.reason - } - case scPeersPruned: - // Remove peers from the processor. - for _, peerID := range event.peers { - r.processor.send(scPeerError{peerID: peerID, reason: errors.New("peer was pruned")}) - } - r.logger.Debug("Pruned peers", "count", len(event.peers)) - case noOpEvent: - default: - r.logger.Error("Received unexpected scheduler event", "event", fmt.Sprintf("%T", event)) - } - - // Incremental events from processor - case event := <-r.processor.next(): - switch event := event.(type) { - case pcBlockProcessed: - r.setSyncHeight(event.height) - if (r.syncHeight-r.syncStartHeight)%100 == 0 { - newSyncRate := 100 / time.Since(lastHundred).Seconds() - if r.lastSyncRate == 0 { - r.lastSyncRate = newSyncRate - } else { - r.lastSyncRate = 0.9*r.lastSyncRate + 0.1*newSyncRate - } - r.logger.Info("block sync Rate", "height", r.syncHeight, - "max_peer_height", r.maxPeerHeight, "blocks/s", r.lastSyncRate) - lastHundred = time.Now() - } - r.scheduler.send(event) - case pcBlockVerificationFailure: - r.scheduler.send(event) - case pcFinished: - r.logger.Info("block sync complete, switching to consensus") - if !r.io.trySwitchToConsensus(event.tmState, event.blocksSynced > 0 || r.stateSynced) { - r.logger.Error("Failed to switch to consensus reactor") - } - r.endSync() - r.blockSync.UnSet() - return - case noOpEvent: - default: - r.logger.Error("Received unexpected processor event", "event", fmt.Sprintf("%T", event)) - } - - // Terminal event from scheduler - case err := <-r.scheduler.final(): - switch err { - case nil: - r.logger.Info("Scheduler stopped") - default: - r.logger.Error("Scheduler aborted with error", "err", err) - } - - // Terminal event from processor - case err := <-r.processor.final(): - switch err { - case nil: - r.logger.Info("Processor stopped") - default: - r.logger.Error("Processor aborted with error", "err", err) - } - } - } -} - -// Stop implements cmn.Service interface. -func (r *BlockchainReactor) Stop() error { - r.logger.Info("reactor stopping") - r.endSync() - r.logger.Info("reactor stopped") - return nil -} - -// Receive implements Reactor by handling different message types. -// XXX: do not call any methods that can block or incur heavy processing. -// https://github.com/tendermint/tendermint/issues/2888 -func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - logger := r.logger.With("src", src.ID(), "chID", chID) - - msgProto := new(bcproto.Message) - - if err := proto.Unmarshal(msgBytes, msgProto); err != nil { - logger.Error("error decoding message", "err", err) - _ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error())) - return - } - - if err := msgProto.Validate(); err != nil { - logger.Error("peer sent us an invalid msg", "msg", msgProto, "err", err) - _ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error())) - return - } - - r.logger.Debug("received", "msg", msgProto) - - switch msg := msgProto.Sum.(type) { - case *bcproto.Message_StatusRequest: - if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), src); err != nil { - logger.Error("Could not send status message to src peer") - } - - case *bcproto.Message_BlockRequest: - block := r.store.LoadBlock(msg.BlockRequest.Height) - if block != nil { - if err := r.io.sendBlockToPeer(block, src); err != nil { - logger.Error("Could not send block message to src peer", "err", err) - } - } else { - logger.Info("peer asking for a block we don't have", "height", msg.BlockRequest.Height) - if err := r.io.sendBlockNotFound(msg.BlockRequest.Height, src); err != nil { - logger.Error("Couldn't send block not found msg", "err", err) - } - } - - case *bcproto.Message_StatusResponse: - r.mtx.RLock() - if r.events != nil { - r.events <- bcStatusResponse{ - peerID: src.ID(), - base: msg.StatusResponse.Base, - height: msg.StatusResponse.Height, - } - } - r.mtx.RUnlock() - - case *bcproto.Message_BlockResponse: - bi, err := types.BlockFromProto(msg.BlockResponse.Block) - if err != nil { - logger.Error("error transitioning block from protobuf", "err", err) - _ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error())) - return - } - r.mtx.RLock() - if r.events != nil { - r.events <- bcBlockResponse{ - peerID: src.ID(), - block: bi, - size: int64(len(msgBytes)), - time: time.Now(), - } - } - r.mtx.RUnlock() - - case *bcproto.Message_NoBlockResponse: - r.mtx.RLock() - if r.events != nil { - r.events <- bcNoBlockResponse{ - peerID: src.ID(), - height: msg.NoBlockResponse.Height, - time: time.Now(), - } - } - r.mtx.RUnlock() - } -} - -// AddPeer implements Reactor interface -func (r *BlockchainReactor) AddPeer(peer p2p.Peer) { - err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), peer) - if err != nil { - r.logger.Error("could not send our status to the new peer", "peer", peer.ID, "err", err) - } - - err = r.io.sendStatusRequest(peer) - if err != nil { - r.logger.Error("could not send status request to the new peer", "peer", peer.ID, "err", err) - } - - r.mtx.RLock() - defer r.mtx.RUnlock() - if r.events != nil { - r.events <- bcAddNewPeer{peerID: peer.ID()} - } -} - -// RemovePeer implements Reactor interface. -func (r *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) { - r.mtx.RLock() - defer r.mtx.RUnlock() - if r.events != nil { - r.events <- bcRemovePeer{ - peerID: peer.ID(), - reason: reason, - } - } -} - -// GetChannels implements Reactor -func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { - return []*p2p.ChannelDescriptor{ - { - ID: BlockchainChannel, - Priority: 5, - SendQueueCapacity: 2000, - RecvBufferCapacity: 1024, - RecvMessageCapacity: bc.MaxMsgSize, - }, - } -} - -func (r *BlockchainReactor) GetMaxPeerBlockHeight() int64 { - r.mtx.RLock() - defer r.mtx.RUnlock() - return r.maxPeerHeight -} - -func (r *BlockchainReactor) GetTotalSyncedTime() time.Duration { - if !r.blockSync.IsSet() || r.syncStartTime.IsZero() { - return time.Duration(0) - } - return time.Since(r.syncStartTime) -} - -func (r *BlockchainReactor) GetRemainingSyncTime() time.Duration { - if !r.blockSync.IsSet() { - return time.Duration(0) - } - - r.mtx.RLock() - defer r.mtx.RUnlock() - - targetSyncs := r.maxPeerHeight - r.syncStartHeight - currentSyncs := r.syncHeight - r.syncStartHeight + 1 - if currentSyncs < 0 || r.lastSyncRate < 0.001 { - return time.Duration(0) - } - - remain := float64(targetSyncs-currentSyncs) / r.lastSyncRate - - return time.Duration(int64(remain * float64(time.Second))) -} diff --git a/internal/blocksync/v2/reactor_test.go b/internal/blocksync/v2/reactor_test.go deleted file mode 100644 index fd13da215..000000000 --- a/internal/blocksync/v2/reactor_test.go +++ /dev/null @@ -1,534 +0,0 @@ -package v2 - -import ( - "fmt" - "net" - "os" - "sync" - "testing" - "time" - - "github.com/gogo/protobuf/proto" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - - abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior" - cons "github.com/tendermint/tendermint/internal/consensus" - "github.com/tendermint/tendermint/internal/mempool/mock" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/internal/test/factory" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" - bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - sf "github.com/tendermint/tendermint/state/test/factory" - tmstore "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" -) - -type mockPeer struct { - service.Service - id types.NodeID -} - -func (mp mockPeer) FlushStop() {} -func (mp mockPeer) ID() types.NodeID { return mp.id } -func (mp mockPeer) RemoteIP() net.IP { return net.IP{} } -func (mp mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.RemoteIP(), Port: 8800} } - -func (mp mockPeer) IsOutbound() bool { return true } -func (mp mockPeer) IsPersistent() bool { return true } -func (mp mockPeer) CloseConn() error { return nil } - -func (mp mockPeer) NodeInfo() types.NodeInfo { - return types.NodeInfo{ - NodeID: "", - ListenAddr: "", - } -} -func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } -func (mp mockPeer) SocketAddr() *p2p.NetAddress { return &p2p.NetAddress{} } - -func (mp mockPeer) Send(byte, []byte) bool { return true } -func (mp mockPeer) TrySend(byte, []byte) bool { return true } - -func (mp mockPeer) Set(string, interface{}) {} -func (mp mockPeer) Get(string) interface{} { return struct{}{} } - -//nolint:unused -type mockBlockStore struct { - blocks map[int64]*types.Block -} - -//nolint:unused -func (ml *mockBlockStore) Height() int64 { - return int64(len(ml.blocks)) -} - -//nolint:unused -func (ml *mockBlockStore) LoadBlock(height int64) *types.Block { - return ml.blocks[height] -} - -//nolint:unused -func (ml *mockBlockStore) SaveBlock(block *types.Block, part *types.PartSet, commit *types.Commit) { - ml.blocks[block.Height] = block -} - -type mockBlockApplier struct { -} - -// XXX: Add whitelist/blacklist? -func (mba *mockBlockApplier) ApplyBlock( - state sm.State, blockID types.BlockID, block *types.Block, -) (sm.State, error) { - state.LastBlockHeight++ - return state, nil -} - -type mockSwitchIo struct { - mtx sync.Mutex - switchedToConsensus bool - numStatusResponse int - numBlockResponse int - numNoBlockResponse int - numStatusRequest int -} - -var _ iIO = (*mockSwitchIo)(nil) - -func (sio *mockSwitchIo) sendBlockRequest(_ p2p.Peer, _ int64) error { - return nil -} - -func (sio *mockSwitchIo) sendStatusResponse(_, _ int64, _ p2p.Peer) error { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.numStatusResponse++ - return nil -} - -func (sio *mockSwitchIo) sendBlockToPeer(_ *types.Block, _ p2p.Peer) error { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.numBlockResponse++ - return nil -} - -func (sio *mockSwitchIo) sendBlockNotFound(_ int64, _ p2p.Peer) error { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.numNoBlockResponse++ - return nil -} - -func (sio *mockSwitchIo) trySwitchToConsensus(_ sm.State, _ bool) bool { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.switchedToConsensus = true - return true -} - -func (sio *mockSwitchIo) broadcastStatusRequest() error { - return nil -} - -func (sio *mockSwitchIo) sendStatusRequest(_ p2p.Peer) error { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.numStatusRequest++ - return nil -} - -type testReactorParams struct { - logger log.Logger - genDoc *types.GenesisDoc - privVals []types.PrivValidator - startHeight int64 - mockA bool -} - -func newTestReactor(t *testing.T, p testReactorParams) *BlockchainReactor { - store, state, _ := newReactorStore(t, p.genDoc, p.privVals, p.startHeight) - reporter := behavior.NewMockReporter() - - var appl blockApplier - - if p.mockA { - appl = &mockBlockApplier{} - } else { - app := &testApp{} - cc := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(cc) - err := proxyApp.Start() - require.NoError(t, err) - db := dbm.NewMemDB() - stateStore := sm.NewStore(db) - blockStore := tmstore.NewBlockStore(dbm.NewMemDB()) - appl = sm.NewBlockExecutor( - stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}, blockStore) - err = stateStore.Save(state) - require.NoError(t, err) - } - - r := newReactor(state, store, reporter, appl, true, cons.NopMetrics()) - logger := log.TestingLogger() - r.SetLogger(logger.With("module", "blockchain")) - - return r -} - -// This test is left here and not deleted to retain the termination cases for -// future improvement in [#4482](https://github.com/tendermint/tendermint/issues/4482). -// func TestReactorTerminationScenarios(t *testing.T) { - -// config := cfg.ResetTestRoot("blockchain_reactor_v2_test") -// defer os.RemoveAll(config.RootDir) -// genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30) -// refStore, _, _ := newReactorStore(genDoc, privVals, 20) - -// params := testReactorParams{ -// logger: log.TestingLogger(), -// genDoc: genDoc, -// privVals: privVals, -// startHeight: 10, -// bufferSize: 100, -// mockA: true, -// } - -// type testEvent struct { -// evType string -// peer string -// height int64 -// } - -// tests := []struct { -// name string -// params testReactorParams -// msgs []testEvent -// }{ -// { -// name: "simple termination on max peer height - one peer", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 11}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 12}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P1", height: 13}, -// {evType: "Process"}, -// }, -// }, -// { -// name: "simple termination on max peer height - two peers", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "AddPeer", peer: "P2"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "ReceiveS", peer: "P2", height: 15}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 11}, -// {evType: "ReceiveB", peer: "P2", height: 12}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 13}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P2", height: 14}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 15}, -// {evType: "Process"}, -// }, -// }, -// { -// name: "termination on max peer height - two peers, noBlock error", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "AddPeer", peer: "P2"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "ReceiveS", peer: "P2", height: 15}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveNB", peer: "P1", height: 11}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 12}, -// {evType: "ReceiveB", peer: "P2", height: 11}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 13}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P2", height: 14}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 15}, -// {evType: "Process"}, -// }, -// }, -// { -// name: "termination on max peer height - two peers, remove one peer", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "AddPeer", peer: "P2"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "ReceiveS", peer: "P2", height: 15}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "RemovePeer", peer: "P1"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 12}, -// {evType: "ReceiveB", peer: "P2", height: 11}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 13}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P2", height: 14}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 15}, -// {evType: "Process"}, -// }, -// }, -// } - -// for _, tt := range tests { -// tt := tt -// t.Run(tt.name, func(t *testing.T) { -// reactor := newTestReactor(params) -// reactor.Start() -// reactor.reporter = behavior.NewMockReporter() -// mockSwitch := &mockSwitchIo{switchedToConsensus: false} -// reactor.io = mockSwitch -// // time for go routines to start -// time.Sleep(time.Millisecond) - -// for _, step := range tt.msgs { -// switch step.evType { -// case "AddPeer": -// reactor.scheduler.send(bcAddNewPeer{peerID: p2p.ID(step.peer)}) -// case "RemovePeer": -// reactor.scheduler.send(bcRemovePeer{peerID: p2p.ID(step.peer)}) -// case "ReceiveS": -// reactor.scheduler.send(bcStatusResponse{ -// peerID: p2p.ID(step.peer), -// height: step.height, -// time: time.Now(), -// }) -// case "ReceiveB": -// reactor.scheduler.send(bcBlockResponse{ -// peerID: p2p.ID(step.peer), -// block: refStore.LoadBlock(step.height), -// size: 10, -// time: time.Now(), -// }) -// case "ReceiveNB": -// reactor.scheduler.send(bcNoBlockResponse{ -// peerID: p2p.ID(step.peer), -// height: step.height, -// time: time.Now(), -// }) -// case "BlockReq": -// reactor.scheduler.send(rTrySchedule{time: time.Now()}) -// case "Process": -// reactor.processor.send(rProcessBlock{}) -// } -// // give time for messages to propagate between routines -// time.Sleep(time.Millisecond) -// } - -// // time for processor to finish and reactor to switch to consensus -// time.Sleep(20 * time.Millisecond) -// assert.True(t, mockSwitch.hasSwitchedToConsensus()) -// reactor.Stop() -// }) -// } -// } - -func TestReactorHelperMode(t *testing.T) { - var ( - channelID = byte(0x40) - ) - - config := cfg.ResetTestRoot("blockchain_reactor_v2_test") - defer os.RemoveAll(config.RootDir) - valSet, privVals := factory.ValidatorSet(1, 30) - genDoc := factory.GenesisDoc(config, time.Now(), valSet.Validators, nil) - - params := testReactorParams{ - logger: log.TestingLogger(), - genDoc: genDoc, - privVals: privVals, - startHeight: 20, - mockA: true, - } - - type testEvent struct { - peer string - event interface{} - } - - tests := []struct { - name string - params testReactorParams - msgs []testEvent - }{ - { - name: "status request", - params: params, - msgs: []testEvent{ - {"P1", bcproto.StatusRequest{}}, - {"P1", bcproto.BlockRequest{Height: 13}}, - {"P1", bcproto.BlockRequest{Height: 20}}, - {"P1", bcproto.BlockRequest{Height: 22}}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - reactor := newTestReactor(t, params) - mockSwitch := &mockSwitchIo{switchedToConsensus: false} - reactor.io = mockSwitch - err := reactor.Start() - require.NoError(t, err) - - for i := 0; i < len(tt.msgs); i++ { - step := tt.msgs[i] - switch ev := step.event.(type) { - case bcproto.StatusRequest: - old := mockSwitch.numStatusResponse - - msgProto := new(bcproto.Message) - require.NoError(t, msgProto.Wrap(&ev)) - - msgBz, err := proto.Marshal(msgProto) - require.NoError(t, err) - - reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz) - assert.Equal(t, old+1, mockSwitch.numStatusResponse) - case bcproto.BlockRequest: - if ev.Height > params.startHeight { - old := mockSwitch.numNoBlockResponse - - msgProto := new(bcproto.Message) - require.NoError(t, msgProto.Wrap(&ev)) - - msgBz, err := proto.Marshal(msgProto) - require.NoError(t, err) - - reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz) - assert.Equal(t, old+1, mockSwitch.numNoBlockResponse) - } else { - old := mockSwitch.numBlockResponse - - msgProto := new(bcproto.Message) - require.NoError(t, msgProto.Wrap(&ev)) - - msgBz, err := proto.Marshal(msgProto) - require.NoError(t, err) - - reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz) - assert.Equal(t, old+1, mockSwitch.numBlockResponse) - } - } - } - err = reactor.Stop() - require.NoError(t, err) - }) - } -} - -func TestReactorSetSwitchNil(t *testing.T) { - config := cfg.ResetTestRoot("blockchain_reactor_v2_test") - defer os.RemoveAll(config.RootDir) - valSet, privVals := factory.ValidatorSet(1, 30) - genDoc := factory.GenesisDoc(config, time.Now(), valSet.Validators, nil) - - reactor := newTestReactor(t, testReactorParams{ - logger: log.TestingLogger(), - genDoc: genDoc, - privVals: privVals, - }) - reactor.SetSwitch(nil) - - assert.Nil(t, reactor.Switch) - assert.Nil(t, reactor.io) -} - -type testApp struct { - abci.BaseApplication -} - -func newReactorStore( - t *testing.T, - genDoc *types.GenesisDoc, - privVals []types.PrivValidator, - maxBlockHeight int64) (*tmstore.BlockStore, sm.State, *sm.BlockExecutor) { - t.Helper() - - require.Len(t, privVals, 1) - app := &testApp{} - cc := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(cc) - err := proxyApp.Start() - if err != nil { - panic(fmt.Errorf("error start app: %w", err)) - } - - stateDB := dbm.NewMemDB() - blockStore := tmstore.NewBlockStore(dbm.NewMemDB()) - stateStore := sm.NewStore(stateDB) - state, err := sm.MakeGenesisState(genDoc) - require.NoError(t, err) - - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), - mock.Mempool{}, sm.EmptyEvidencePool{}, blockStore) - err = stateStore.Save(state) - require.NoError(t, err) - - // add blocks in - for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { - lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil) - if blockHeight > 1 { - lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1) - lastBlock := blockStore.LoadBlock(blockHeight - 1) - vote, err := factory.MakeVote( - privVals[0], - lastBlock.Header.ChainID, 0, - lastBlock.Header.Height, 0, 2, - lastBlockMeta.BlockID, - time.Now(), - ) - require.NoError(t, err) - lastCommit = types.NewCommit(vote.Height, vote.Round, - lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()}) - } - - thisBlock := sf.MakeBlock(state, blockHeight, lastCommit) - - thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) - blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} - - state, err = blockExec.ApplyBlock(state, blockID, thisBlock) - require.NoError(t, err) - - blockStore.SaveBlock(thisBlock, thisParts, lastCommit) - } - return blockStore, state, blockExec -} diff --git a/internal/blocksync/v2/routine.go b/internal/blocksync/v2/routine.go deleted file mode 100644 index e4ca52add..000000000 --- a/internal/blocksync/v2/routine.go +++ /dev/null @@ -1,166 +0,0 @@ -package v2 - -import ( - "fmt" - "strings" - "sync/atomic" - - "github.com/Workiva/go-datastructures/queue" - - "github.com/tendermint/tendermint/libs/log" -) - -type handleFunc = func(event Event) (Event, error) - -const historySize = 25 - -// Routine is a structure that models a finite state machine as serialized -// stream of events processed by a handle function. This Routine structure -// handles the concurrency and messaging guarantees. Events are sent via -// `send` are handled by the `handle` function to produce an iterator -// `next()`. Calling `stop()` on a routine will conclude processing of all -// sent events and produce `final()` event representing the terminal state. -type Routine struct { - name string - handle handleFunc - queue *queue.PriorityQueue - history []Event - out chan Event - fin chan error - rdy chan struct{} - running *uint32 - logger log.Logger - metrics *Metrics -} - -func newRoutine(name string, handleFunc handleFunc, bufferSize int) *Routine { - return &Routine{ - name: name, - handle: handleFunc, - queue: queue.NewPriorityQueue(bufferSize, true), - history: make([]Event, 0, historySize), - out: make(chan Event, bufferSize), - rdy: make(chan struct{}, 1), - fin: make(chan error, 1), - running: new(uint32), - logger: log.NewNopLogger(), - metrics: NopMetrics(), - } -} - -func (rt *Routine) setLogger(logger log.Logger) { - rt.logger = logger -} - -// nolint:unused -func (rt *Routine) setMetrics(metrics *Metrics) { - rt.metrics = metrics -} - -func (rt *Routine) start() { - rt.logger.Info(fmt.Sprintf("%s: run", rt.name)) - running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1)) - if !running { - panic(fmt.Sprintf("%s is already running", rt.name)) - } - close(rt.rdy) - defer func() { - if r := recover(); r != nil { - var ( - b strings.Builder - j int - ) - for i := len(rt.history) - 1; i >= 0; i-- { - fmt.Fprintf(&b, "%d: %+v\n", j, rt.history[i]) - j++ - } - panic(fmt.Sprintf("%v\nlast events:\n%v", r, b.String())) - } - stopped := atomic.CompareAndSwapUint32(rt.running, uint32(1), uint32(0)) - if !stopped { - panic(fmt.Sprintf("%s is failed to stop", rt.name)) - } - }() - - for { - events, err := rt.queue.Get(1) - if err == queue.ErrDisposed { - rt.terminate(nil) - return - } else if err != nil { - rt.terminate(err) - return - } - oEvent, err := rt.handle(events[0].(Event)) - rt.metrics.EventsHandled.With("routine", rt.name).Add(1) - if err != nil { - rt.terminate(err) - return - } - rt.metrics.EventsOut.With("routine", rt.name).Add(1) - rt.logger.Debug(fmt.Sprintf("%s: produced %T %+v", rt.name, oEvent, oEvent)) - - // Skip rTrySchedule and rProcessBlock events as they clutter the history - // due to their frequency. - switch events[0].(type) { - case rTrySchedule: - case rProcessBlock: - default: - rt.history = append(rt.history, events[0].(Event)) - if len(rt.history) > historySize { - rt.history = rt.history[1:] - } - } - - rt.out <- oEvent - } -} - -// XXX: look into returning OpError in the net package -func (rt *Routine) send(event Event) bool { - rt.logger.Debug(fmt.Sprintf("%s: received %T %+v", rt.name, event, event)) - if !rt.isRunning() { - return false - } - err := rt.queue.Put(event) - if err != nil { - rt.metrics.EventsShed.With("routine", rt.name).Add(1) - rt.logger.Error(fmt.Sprintf("%s: send failed, queue was full/stopped", rt.name)) - return false - } - - rt.metrics.EventsSent.With("routine", rt.name).Add(1) - return true -} - -func (rt *Routine) isRunning() bool { - return atomic.LoadUint32(rt.running) == 1 -} - -func (rt *Routine) next() chan Event { - return rt.out -} - -func (rt *Routine) ready() chan struct{} { - return rt.rdy -} - -func (rt *Routine) stop() { - if !rt.isRunning() { // XXX: this should check rt.queue.Disposed() - return - } - - rt.logger.Info(fmt.Sprintf("%s: stop", rt.name)) - rt.queue.Dispose() // this should block until all queue items are free? -} - -func (rt *Routine) final() chan error { - return rt.fin -} - -// XXX: Maybe get rid of this -func (rt *Routine) terminate(reason error) { - // We don't close the rt.out channel here, to avoid spinning on the closed channel - // in the event loop. - rt.fin <- reason -} diff --git a/internal/blocksync/v2/routine_test.go b/internal/blocksync/v2/routine_test.go deleted file mode 100644 index 8f92bee3e..000000000 --- a/internal/blocksync/v2/routine_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package v2 - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -type eventA struct { - priorityNormal -} - -var errDone = fmt.Errorf("done") - -func simpleHandler(event Event) (Event, error) { - if _, ok := event.(eventA); ok { - return noOp, errDone - } - return noOp, nil -} - -func TestRoutineFinal(t *testing.T) { - var ( - bufferSize = 10 - routine = newRoutine("simpleRoutine", simpleHandler, bufferSize) - ) - - assert.False(t, routine.isRunning(), - "expected an initialized routine to not be running") - go routine.start() - <-routine.ready() - assert.True(t, routine.isRunning(), - "expected an started routine") - - assert.True(t, routine.send(eventA{}), - "expected sending to a ready routine to succeed") - - assert.Equal(t, errDone, <-routine.final(), - "expected the final event to be done") - - assert.False(t, routine.isRunning(), - "expected an completed routine to no longer be running") -} - -func TestRoutineStop(t *testing.T) { - var ( - bufferSize = 10 - routine = newRoutine("simpleRoutine", simpleHandler, bufferSize) - ) - - assert.False(t, routine.send(eventA{}), - "expected sending to an unstarted routine to fail") - - go routine.start() - <-routine.ready() - - assert.True(t, routine.send(eventA{}), - "expected sending to a running routine to succeed") - - routine.stop() - - assert.False(t, routine.send(eventA{}), - "expected sending to a stopped routine to fail") -} - -type finalCount struct { - count int -} - -func (f finalCount) Error() string { - return "end" -} - -func genStatefulHandler(maxCount int) handleFunc { - counter := 0 - return func(event Event) (Event, error) { - if _, ok := event.(eventA); ok { - counter++ - if counter >= maxCount { - return noOp, finalCount{counter} - } - - return eventA{}, nil - } - return noOp, nil - } -} - -func feedback(r *Routine) { - for event := range r.next() { - r.send(event) - } -} - -func TestStatefulRoutine(t *testing.T) { - var ( - count = 10 - handler = genStatefulHandler(count) - bufferSize = 20 - routine = newRoutine("statefulRoutine", handler, bufferSize) - ) - - go routine.start() - go feedback(routine) - <-routine.ready() - - assert.True(t, routine.send(eventA{}), - "expected sending to a started routine to succeed") - - final := <-routine.final() - if fnl, ok := final.(finalCount); ok { - assert.Equal(t, count, fnl.count, - "expected the routine to count to 10") - } else { - t.Fail() - } -} - -type lowPriorityEvent struct { - priorityLow -} - -type highPriorityEvent struct { - priorityHigh -} - -func handleWithPriority(event Event) (Event, error) { - switch event.(type) { - case lowPriorityEvent: - return noOp, nil - case highPriorityEvent: - return noOp, errDone - } - return noOp, nil -} - -func TestPriority(t *testing.T) { - var ( - bufferSize = 20 - routine = newRoutine("priorityRoutine", handleWithPriority, bufferSize) - ) - - go routine.start() - <-routine.ready() - go func() { - for { - routine.send(lowPriorityEvent{}) - time.Sleep(1 * time.Millisecond) - } - }() - time.Sleep(10 * time.Millisecond) - - assert.True(t, routine.isRunning(), - "expected an started routine") - assert.True(t, routine.send(highPriorityEvent{}), - "expected send to succeed even when saturated") - - assert.Equal(t, errDone, <-routine.final()) - assert.False(t, routine.isRunning(), - "expected an started routine") -} diff --git a/internal/blocksync/v2/scheduler.go b/internal/blocksync/v2/scheduler.go deleted file mode 100644 index b731d96a4..000000000 --- a/internal/blocksync/v2/scheduler.go +++ /dev/null @@ -1,711 +0,0 @@ -package v2 - -import ( - "bytes" - "errors" - "fmt" - "math" - "sort" - "time" - - "github.com/tendermint/tendermint/types" -) - -// Events generated by the scheduler: -// all blocks have been processed -type scFinishedEv struct { - priorityNormal - reason string -} - -func (e scFinishedEv) String() string { - return fmt.Sprintf("scFinishedEv{%v}", e.reason) -} - -// send a blockRequest message -type scBlockRequest struct { - priorityNormal - peerID types.NodeID - height int64 -} - -func (e scBlockRequest) String() string { - return fmt.Sprintf("scBlockRequest{%d from %v}", e.height, e.peerID) -} - -// a block has been received and validated by the scheduler -type scBlockReceived struct { - priorityNormal - peerID types.NodeID - block *types.Block -} - -func (e scBlockReceived) String() string { - return fmt.Sprintf("scBlockReceived{%d#%X from %v}", e.block.Height, e.block.Hash(), e.peerID) -} - -// scheduler detected a peer error -type scPeerError struct { - priorityHigh - peerID types.NodeID - reason error -} - -func (e scPeerError) String() string { - return fmt.Sprintf("scPeerError{%v errored with %v}", e.peerID, e.reason) -} - -// scheduler removed a set of peers (timed out or slow peer) -type scPeersPruned struct { - priorityHigh - peers []types.NodeID -} - -func (e scPeersPruned) String() string { - return fmt.Sprintf("scPeersPruned{%v}", e.peers) -} - -// XXX: make this fatal? -// scheduler encountered a fatal error -type scSchedulerFail struct { - priorityHigh - reason error -} - -func (e scSchedulerFail) String() string { - return fmt.Sprintf("scSchedulerFail{%v}", e.reason) -} - -type blockState int - -const ( - blockStateUnknown blockState = iota + 1 // no known peer has this block - blockStateNew // indicates that a peer has reported having this block - blockStatePending // indicates that this block has been requested from a peer - blockStateReceived // indicates that this block has been received by a peer - blockStateProcessed // indicates that this block has been applied -) - -func (e blockState) String() string { - switch e { - case blockStateUnknown: - return "Unknown" - case blockStateNew: - return "New" - case blockStatePending: - return "Pending" - case blockStateReceived: - return "Received" - case blockStateProcessed: - return "Processed" - default: - return fmt.Sprintf("invalid blockState: %d", e) - } -} - -type peerState int - -const ( - peerStateNew = iota + 1 - peerStateReady - peerStateRemoved -) - -func (e peerState) String() string { - switch e { - case peerStateNew: - return "New" - case peerStateReady: - return "Ready" - case peerStateRemoved: - return "Removed" - default: - panic(fmt.Sprintf("unknown peerState: %d", e)) - } -} - -type scPeer struct { - peerID types.NodeID - - // initialized as New when peer is added, updated to Ready when statusUpdate is received, - // updated to Removed when peer is removed - state peerState - - base int64 // updated when statusResponse is received - height int64 // updated when statusResponse is received - lastTouched time.Time - lastRate int64 // last receive rate in bytes -} - -func (p scPeer) String() string { - return fmt.Sprintf("{state %v, base %d, height %d, lastTouched %v, lastRate %d, id %v}", - p.state, p.base, p.height, p.lastTouched, p.lastRate, p.peerID) -} - -func newScPeer(peerID types.NodeID) *scPeer { - return &scPeer{ - peerID: peerID, - state: peerStateNew, - base: -1, - height: -1, - lastTouched: time.Time{}, - } -} - -// The scheduler keep track of the state of each block and each peer. The -// scheduler will attempt to schedule new block requests with `trySchedule` -// events and remove slow peers with `tryPrune` events. -type scheduler struct { - initHeight int64 - - // next block that needs to be processed. All blocks with smaller height are - // in Processed state. - height int64 - - // lastAdvance tracks the last time a block execution happened. - // syncTimeout is the maximum time the scheduler waits to advance in the block sync process before finishing. - // This covers the cases where there are no peers or all peers have a lower height. - lastAdvance time.Time - syncTimeout time.Duration - - // a map of peerID to scheduler specific peer struct `scPeer` used to keep - // track of peer specific state - peers map[types.NodeID]*scPeer - peerTimeout time.Duration // maximum response time from a peer otherwise prune - minRecvRate int64 // minimum receive rate from peer otherwise prune - - // the maximum number of blocks that should be New, Received or Pending at any point - // in time. This is used to enforce a limit on the blockStates map. - targetPending int - // a list of blocks to be scheduled (New), Pending or Received. Its length should be - // smaller than targetPending. - blockStates map[int64]blockState - - // a map of heights to the peer we are waiting a response from - pendingBlocks map[int64]types.NodeID - - // the time at which a block was put in blockStatePending - pendingTime map[int64]time.Time - - // a map of heights to the peers that put the block in blockStateReceived - receivedBlocks map[int64]types.NodeID -} - -func (sc scheduler) String() string { - return fmt.Sprintf("ih: %d, bst: %v, peers: %v, pblks: %v, ptm %v, rblks: %v", - sc.initHeight, sc.blockStates, sc.peers, sc.pendingBlocks, sc.pendingTime, sc.receivedBlocks) -} - -func newScheduler(initHeight int64, startTime time.Time) *scheduler { - sc := scheduler{ - initHeight: initHeight, - lastAdvance: startTime, - syncTimeout: 60 * time.Second, - height: initHeight, - blockStates: make(map[int64]blockState), - peers: make(map[types.NodeID]*scPeer), - pendingBlocks: make(map[int64]types.NodeID), - pendingTime: make(map[int64]time.Time), - receivedBlocks: make(map[int64]types.NodeID), - targetPending: 10, // TODO - pass as param - peerTimeout: 15 * time.Second, // TODO - pass as param - minRecvRate: 0, // int64(7680), TODO - pass as param - } - - return &sc -} - -func (sc *scheduler) ensurePeer(peerID types.NodeID) *scPeer { - if _, ok := sc.peers[peerID]; !ok { - sc.peers[peerID] = newScPeer(peerID) - } - return sc.peers[peerID] -} - -func (sc *scheduler) touchPeer(peerID types.NodeID, time time.Time) error { - peer, ok := sc.peers[peerID] - if !ok { - return fmt.Errorf("couldn't find peer %s", peerID) - } - - if peer.state != peerStateReady { - return fmt.Errorf("tried to touch peer in state %s, must be Ready", peer.state) - } - - peer.lastTouched = time - - return nil -} - -func (sc *scheduler) removePeer(peerID types.NodeID) { - peer, ok := sc.peers[peerID] - if !ok { - return - } - if peer.state == peerStateRemoved { - return - } - - for height, pendingPeerID := range sc.pendingBlocks { - if pendingPeerID == peerID { - sc.setStateAtHeight(height, blockStateNew) - delete(sc.pendingTime, height) - delete(sc.pendingBlocks, height) - } - } - - for height, rcvPeerID := range sc.receivedBlocks { - if rcvPeerID == peerID { - sc.setStateAtHeight(height, blockStateNew) - delete(sc.receivedBlocks, height) - } - } - - // remove the blocks from blockStates if the peer removal causes the max peer height to be lower. - peer.state = peerStateRemoved - maxPeerHeight := int64(0) - for _, otherPeer := range sc.peers { - if otherPeer.state != peerStateReady { - continue - } - if otherPeer.peerID != peer.peerID && otherPeer.height > maxPeerHeight { - maxPeerHeight = otherPeer.height - } - } - for h := range sc.blockStates { - if h > maxPeerHeight { - delete(sc.blockStates, h) - } - } -} - -// check if the blockPool is running low and add new blocks in New state to be requested. -// This function is called when there is an increase in the maximum peer height or when -// blocks are processed. -func (sc *scheduler) addNewBlocks() { - if len(sc.blockStates) >= sc.targetPending { - return - } - - for i := sc.height; i < int64(sc.targetPending)+sc.height; i++ { - if i > sc.maxHeight() { - break - } - if sc.getStateAtHeight(i) == blockStateUnknown { - sc.setStateAtHeight(i, blockStateNew) - } - } -} - -func (sc *scheduler) setPeerRange(peerID types.NodeID, base int64, height int64) error { - peer := sc.ensurePeer(peerID) - - if peer.state == peerStateRemoved { - return nil // noop - } - - if height < peer.height { - sc.removePeer(peerID) - return fmt.Errorf("cannot move peer height lower. from %d to %d", peer.height, height) - } - - if base > height { - sc.removePeer(peerID) - return fmt.Errorf("cannot set peer base higher than its height") - } - - peer.base = base - peer.height = height - peer.state = peerStateReady - - sc.addNewBlocks() - return nil -} - -func (sc *scheduler) getStateAtHeight(height int64) blockState { - if height < sc.height { - return blockStateProcessed - } else if state, ok := sc.blockStates[height]; ok { - return state - } else { - return blockStateUnknown - } -} - -func (sc *scheduler) getPeersWithHeight(height int64) []types.NodeID { - peers := make([]types.NodeID, 0) - for _, peer := range sc.peers { - if peer.state != peerStateReady { - continue - } - if peer.base <= height && peer.height >= height { - peers = append(peers, peer.peerID) - } - } - return peers -} - -func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []types.NodeID { - prunable := make([]types.NodeID, 0) - for peerID, peer := range sc.peers { - if peer.state != peerStateReady { - continue - } - if now.Sub(peer.lastTouched) > peerTimout || peer.lastRate < minRecvRate { - prunable = append(prunable, peerID) - } - } - // Tests for handleTryPrunePeer() may fail without sort due to range non-determinism - sort.Sort(PeerByID(prunable)) - return prunable -} - -func (sc *scheduler) setStateAtHeight(height int64, state blockState) { - sc.blockStates[height] = state -} - -// CONTRACT: peer exists and in Ready state. -func (sc *scheduler) markReceived(peerID types.NodeID, height int64, size int64, now time.Time) error { - peer := sc.peers[peerID] - - if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID { - return fmt.Errorf("received block %d from peer %s without being requested", height, peerID) - } - - pendingTime, ok := sc.pendingTime[height] - if !ok || now.Sub(pendingTime) <= 0 { - return fmt.Errorf("clock error: block %d received at %s but requested at %s", - height, pendingTime, now) - } - - peer.lastRate = size / now.Sub(pendingTime).Nanoseconds() - - sc.setStateAtHeight(height, blockStateReceived) - delete(sc.pendingBlocks, height) - delete(sc.pendingTime, height) - - sc.receivedBlocks[height] = peerID - - return nil -} - -func (sc *scheduler) markPending(peerID types.NodeID, height int64, time time.Time) error { - state := sc.getStateAtHeight(height) - if state != blockStateNew { - return fmt.Errorf("block %d should be in blockStateNew but is %s", height, state) - } - - peer, ok := sc.peers[peerID] - if !ok { - return fmt.Errorf("cannot find peer %s", peerID) - } - - if peer.state != peerStateReady { - return fmt.Errorf("cannot schedule %d from %s in %s", height, peerID, peer.state) - } - - if height > peer.height { - return fmt.Errorf("cannot request height %d from peer %s that is at height %d", - height, peerID, peer.height) - } - - if height < peer.base { - return fmt.Errorf("cannot request height %d for peer %s with base %d", - height, peerID, peer.base) - } - - sc.setStateAtHeight(height, blockStatePending) - sc.pendingBlocks[height] = peerID - sc.pendingTime[height] = time - - return nil -} - -func (sc *scheduler) markProcessed(height int64) error { - // It is possible that a peer error or timeout is handled after the processor - // has processed the block but before the scheduler received this event, so - // when pcBlockProcessed event is received, the block had been requested - // again => don't check the block state. - sc.lastAdvance = time.Now() - sc.height = height + 1 - delete(sc.pendingBlocks, height) - delete(sc.pendingTime, height) - delete(sc.receivedBlocks, height) - delete(sc.blockStates, height) - sc.addNewBlocks() - return nil -} - -func (sc *scheduler) allBlocksProcessed() bool { - if len(sc.peers) == 0 { - return false - } - return sc.height >= sc.maxHeight() -} - -// returns max peer height or the last processed block, i.e. sc.height -func (sc *scheduler) maxHeight() int64 { - max := sc.height - 1 - for _, peer := range sc.peers { - if peer.state != peerStateReady { - continue - } - if max < peer.height { - max = peer.height - } - } - return max -} - -// lowest block in sc.blockStates with state == blockStateNew or -1 if no new blocks -func (sc *scheduler) nextHeightToSchedule() int64 { - var min int64 = math.MaxInt64 - for height, state := range sc.blockStates { - if state == blockStateNew && height < min { - min = height - } - } - if min == math.MaxInt64 { - min = -1 - } - return min -} - -func (sc *scheduler) pendingFrom(peerID types.NodeID) []int64 { - var heights []int64 - for height, pendingPeerID := range sc.pendingBlocks { - if pendingPeerID == peerID { - heights = append(heights, height) - } - } - return heights -} - -func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) { - peers := sc.getPeersWithHeight(height) - if len(peers) == 0 { - return "", fmt.Errorf("cannot find peer for height %d", height) - } - - // create a map from number of pending requests to a list - // of peers having that number of pending requests. - pendingFrom := make(map[int][]types.NodeID) - for _, peerID := range peers { - numPending := len(sc.pendingFrom(peerID)) - pendingFrom[numPending] = append(pendingFrom[numPending], peerID) - } - - // find the set of peers with minimum number of pending requests. - var minPending int64 = math.MaxInt64 - for mp := range pendingFrom { - if int64(mp) < minPending { - minPending = int64(mp) - } - } - - sort.Sort(PeerByID(pendingFrom[int(minPending)])) - return pendingFrom[int(minPending)][0], nil -} - -// PeerByID is a list of peers sorted by peerID. -type PeerByID []types.NodeID - -func (peers PeerByID) Len() int { - return len(peers) -} -func (peers PeerByID) Less(i, j int) bool { - return bytes.Compare([]byte(peers[i]), []byte(peers[j])) == -1 -} - -func (peers PeerByID) Swap(i, j int) { - peers[i], peers[j] = peers[j], peers[i] -} - -// Handlers - -// This handler gets the block, performs some validation and then passes it on to the processor. -func (sc *scheduler) handleBlockResponse(event bcBlockResponse) (Event, error) { - err := sc.touchPeer(event.peerID, event.time) - if err != nil { - // peer does not exist OR not ready - return noOp, nil - } - - err = sc.markReceived(event.peerID, event.block.Height, event.size, event.time) - if err != nil { - sc.removePeer(event.peerID) - return scPeerError{peerID: event.peerID, reason: err}, nil - } - - return scBlockReceived{peerID: event.peerID, block: event.block}, nil -} - -func (sc *scheduler) handleNoBlockResponse(event bcNoBlockResponse) (Event, error) { - // No such peer or peer was removed. - peer, ok := sc.peers[event.peerID] - if !ok || peer.state == peerStateRemoved { - return noOp, nil - } - - // The peer may have been just removed due to errors, low speed or timeouts. - sc.removePeer(event.peerID) - - return scPeerError{peerID: event.peerID, - reason: fmt.Errorf("peer %v with base %d height %d claims no block for %d", - event.peerID, peer.base, peer.height, event.height)}, nil -} - -func (sc *scheduler) handleBlockProcessed(event pcBlockProcessed) (Event, error) { - if event.height != sc.height { - panic(fmt.Sprintf("processed height %d, but expected height %d", event.height, sc.height)) - } - - err := sc.markProcessed(event.height) - if err != nil { - return scSchedulerFail{reason: err}, nil - } - - if sc.allBlocksProcessed() { - return scFinishedEv{reason: "processed all blocks"}, nil - } - - return noOp, nil -} - -// Handles an error from the processor. The processor had already cleaned the blocks from -// the peers included in this event. Just attempt to remove the peers. -func (sc *scheduler) handleBlockProcessError(event pcBlockVerificationFailure) (Event, error) { - // The peers may have been just removed due to errors, low speed or timeouts. - sc.removePeer(event.firstPeerID) - if event.firstPeerID != event.secondPeerID { - sc.removePeer(event.secondPeerID) - } - - if sc.allBlocksProcessed() { - return scFinishedEv{reason: "error on last block"}, nil - } - - return noOp, nil -} - -func (sc *scheduler) handleAddNewPeer(event bcAddNewPeer) (Event, error) { - sc.ensurePeer(event.peerID) - return noOp, nil -} - -func (sc *scheduler) handleRemovePeer(event bcRemovePeer) (Event, error) { - sc.removePeer(event.peerID) - - if sc.allBlocksProcessed() { - return scFinishedEv{reason: "removed peer"}, nil - } - - // Return scPeerError so the peer (and all associated blocks) is removed from - // the processor. - return scPeerError{peerID: event.peerID, reason: errors.New("peer was stopped")}, nil -} - -func (sc *scheduler) handleTryPrunePeer(event rTryPrunePeer) (Event, error) { - // Check behavior of peer responsible to deliver block at sc.height. - timeHeightAsked, ok := sc.pendingTime[sc.height] - if ok && time.Since(timeHeightAsked) > sc.peerTimeout { - // A request was sent to a peer for block at sc.height but a response was not received - // from that peer within sc.peerTimeout. Remove the peer. This is to ensure that a peer - // will be timed out even if it sends blocks at higher heights but prevents progress by - // not sending the block at current height. - sc.removePeer(sc.pendingBlocks[sc.height]) - } - - prunablePeers := sc.prunablePeers(sc.peerTimeout, sc.minRecvRate, event.time) - if len(prunablePeers) == 0 { - return noOp, nil - } - for _, peerID := range prunablePeers { - sc.removePeer(peerID) - } - - // If all blocks are processed we should finish. - if sc.allBlocksProcessed() { - return scFinishedEv{reason: "after try prune"}, nil - } - - return scPeersPruned{peers: prunablePeers}, nil -} - -func (sc *scheduler) handleResetState(event bcResetState) (Event, error) { - initHeight := event.state.LastBlockHeight + 1 - if initHeight == 1 { - initHeight = event.state.InitialHeight - } - sc.initHeight = initHeight - sc.height = initHeight - sc.lastAdvance = time.Now() - sc.addNewBlocks() - return noOp, nil -} - -func (sc *scheduler) handleTrySchedule(event rTrySchedule) (Event, error) { - if time.Since(sc.lastAdvance) > sc.syncTimeout { - return scFinishedEv{reason: "timeout, no advance"}, nil - } - - nextHeight := sc.nextHeightToSchedule() - if nextHeight == -1 { - return noOp, nil - } - - bestPeerID, err := sc.selectPeer(nextHeight) - if err != nil { - return scSchedulerFail{reason: err}, nil - } - if err := sc.markPending(bestPeerID, nextHeight, event.time); err != nil { - return scSchedulerFail{reason: err}, nil // XXX: peerError might be more appropriate - } - return scBlockRequest{peerID: bestPeerID, height: nextHeight}, nil - -} - -func (sc *scheduler) handleStatusResponse(event bcStatusResponse) (Event, error) { - err := sc.setPeerRange(event.peerID, event.base, event.height) - if err != nil { - return scPeerError{peerID: event.peerID, reason: err}, nil - } - return noOp, nil -} - -func (sc *scheduler) handle(event Event) (Event, error) { - switch event := event.(type) { - case bcResetState: - nextEvent, err := sc.handleResetState(event) - return nextEvent, err - case bcStatusResponse: - nextEvent, err := sc.handleStatusResponse(event) - return nextEvent, err - case bcBlockResponse: - nextEvent, err := sc.handleBlockResponse(event) - return nextEvent, err - case bcNoBlockResponse: - nextEvent, err := sc.handleNoBlockResponse(event) - return nextEvent, err - case rTrySchedule: - nextEvent, err := sc.handleTrySchedule(event) - return nextEvent, err - case bcAddNewPeer: - nextEvent, err := sc.handleAddNewPeer(event) - return nextEvent, err - case bcRemovePeer: - nextEvent, err := sc.handleRemovePeer(event) - return nextEvent, err - case rTryPrunePeer: - nextEvent, err := sc.handleTryPrunePeer(event) - return nextEvent, err - case pcBlockProcessed: - nextEvent, err := sc.handleBlockProcessed(event) - return nextEvent, err - case pcBlockVerificationFailure: - nextEvent, err := sc.handleBlockProcessError(event) - return nextEvent, err - default: - return scSchedulerFail{reason: fmt.Errorf("unknown event %v", event)}, nil - } -} diff --git a/internal/blocksync/v2/scheduler_test.go b/internal/blocksync/v2/scheduler_test.go deleted file mode 100644 index 91fac3637..000000000 --- a/internal/blocksync/v2/scheduler_test.go +++ /dev/null @@ -1,2253 +0,0 @@ -package v2 - -import ( - "fmt" - "math" - "sort" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" -) - -type scTestParams struct { - peers map[string]*scPeer - initHeight int64 - height int64 - allB []int64 - pending map[int64]types.NodeID - pendingTime map[int64]time.Time - received map[int64]types.NodeID - peerTimeout time.Duration - minRecvRate int64 - targetPending int - startTime time.Time - syncTimeout time.Duration -} - -func verifyScheduler(sc *scheduler) { - missing := 0 - if sc.maxHeight() >= sc.height { - missing = int(math.Min(float64(sc.targetPending), float64(sc.maxHeight()-sc.height+1))) - } - if len(sc.blockStates) != missing { - panic(fmt.Sprintf("scheduler block length %d different than target %d", len(sc.blockStates), missing)) - } -} - -func newTestScheduler(params scTestParams) *scheduler { - peers := make(map[types.NodeID]*scPeer) - var maxHeight int64 - - initHeight := params.initHeight - if initHeight == 0 { - initHeight = 1 - } - sc := newScheduler(initHeight, params.startTime) - if params.height != 0 { - sc.height = params.height - } - - for id, peer := range params.peers { - peer.peerID = types.NodeID(id) - peers[types.NodeID(id)] = peer - if maxHeight < peer.height { - maxHeight = peer.height - } - } - for _, h := range params.allB { - sc.blockStates[h] = blockStateNew - } - for h, pid := range params.pending { - sc.blockStates[h] = blockStatePending - sc.pendingBlocks[h] = pid - } - for h, tm := range params.pendingTime { - sc.pendingTime[h] = tm - } - for h, pid := range params.received { - sc.blockStates[h] = blockStateReceived - sc.receivedBlocks[h] = pid - } - - sc.peers = peers - sc.peerTimeout = params.peerTimeout - if params.syncTimeout == 0 { - sc.syncTimeout = 10 * time.Second - } else { - sc.syncTimeout = params.syncTimeout - } - - if params.targetPending == 0 { - sc.targetPending = 10 - } else { - sc.targetPending = params.targetPending - } - - sc.minRecvRate = params.minRecvRate - - verifyScheduler(sc) - - return sc -} - -func TestScInit(t *testing.T) { - var ( - initHeight int64 = 5 - sc = newScheduler(initHeight, time.Now()) - ) - assert.Equal(t, blockStateProcessed, sc.getStateAtHeight(initHeight-1)) - assert.Equal(t, blockStateUnknown, sc.getStateAtHeight(initHeight)) - assert.Equal(t, blockStateUnknown, sc.getStateAtHeight(initHeight+1)) -} - -func TestScMaxHeights(t *testing.T) { - - tests := []struct { - name string - sc scheduler - wantMax int64 - }{ - { - name: "no peers", - sc: scheduler{height: 11}, - wantMax: 10, - }, - { - name: "one ready peer", - sc: scheduler{ - height: 3, - peers: map[types.NodeID]*scPeer{"P1": {height: 6, state: peerStateReady}}, - }, - wantMax: 6, - }, - { - name: "ready and removed peers", - sc: scheduler{ - height: 1, - peers: map[types.NodeID]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 10, state: peerStateRemoved}}, - }, - wantMax: 4, - }, - { - name: "removed peers", - sc: scheduler{ - height: 1, - peers: map[types.NodeID]*scPeer{ - "P1": {height: 4, state: peerStateRemoved}, - "P2": {height: 10, state: peerStateRemoved}}, - }, - wantMax: 0, - }, - { - name: "new peers", - sc: scheduler{ - height: 1, - peers: map[types.NodeID]*scPeer{ - "P1": {base: -1, height: -1, state: peerStateNew}, - "P2": {base: -1, height: -1, state: peerStateNew}}, - }, - wantMax: 0, - }, - { - name: "mixed peers", - sc: scheduler{ - height: 1, - peers: map[types.NodeID]*scPeer{ - "P1": {height: -1, state: peerStateNew}, - "P2": {height: 10, state: peerStateReady}, - "P3": {height: 20, state: peerStateRemoved}, - "P4": {height: 22, state: peerStateReady}, - }, - }, - wantMax: 22, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - // maxHeight() should not mutate the scheduler - wantSc := tt.sc - - resMax := tt.sc.maxHeight() - assert.Equal(t, tt.wantMax, resMax) - assert.Equal(t, wantSc, tt.sc) - }) - } -} - -func TestScEnsurePeer(t *testing.T) { - - type args struct { - peerID types.NodeID - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - }{ - { - name: "add first peer", - fields: scTestParams{}, - args: args{peerID: "P1"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {base: -1, height: -1, state: peerStateNew}}}, - }, - { - name: "add second peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {base: -1, height: -1, state: peerStateNew}}}, - args: args{peerID: "P2"}, - wantFields: scTestParams{peers: map[string]*scPeer{ - "P1": {base: -1, height: -1, state: peerStateNew}, - "P2": {base: -1, height: -1, state: peerStateNew}}}, - }, - { - name: "add duplicate peer is fine", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - args: args{peerID: "P1"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - }, - { - name: "add duplicate peer with existing peer in Ready state is noop", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {state: peerStateReady, height: 3}}, - allB: []int64{1, 2, 3}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {state: peerStateReady, height: 3}}, - allB: []int64{1, 2, 3}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - sc.ensurePeer(tt.args.peerID) - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) - }) - } -} - -func TestScTouchPeer(t *testing.T) { - now := time.Now() - - type args struct { - peerID types.NodeID - time time.Time - } - - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "attempt to touch non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {state: peerStateReady, height: 5}}, - allB: []int64{1, 2, 3, 4, 5}, - }, - args: args{peerID: "P2", time: now}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {state: peerStateReady, height: 5}}, - allB: []int64{1, 2, 3, 4, 5}, - }, - wantErr: true, - }, - { - name: "attempt to touch peer in state New", - fields: scTestParams{peers: map[string]*scPeer{"P1": {}}}, - args: args{peerID: "P1", time: now}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {}}}, - wantErr: true, - }, - { - name: "attempt to touch peer in state Removed", - fields: scTestParams{peers: map[string]*scPeer{"P1": {state: peerStateRemoved}, "P2": {state: peerStateReady}}}, - args: args{peerID: "P1", time: now}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {state: peerStateRemoved}, "P2": {state: peerStateReady}}}, - wantErr: true, - }, - { - name: "touch peer in state Ready", - fields: scTestParams{peers: map[string]*scPeer{"P1": {state: peerStateReady, lastTouched: now}}}, - args: args{peerID: "P1", time: now.Add(3 * time.Second)}, - wantFields: scTestParams{peers: map[string]*scPeer{ - "P1": {state: peerStateReady, lastTouched: now.Add(3 * time.Second)}}}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - if err := sc.touchPeer(tt.args.peerID, tt.args.time); (err != nil) != tt.wantErr { - t.Errorf("touchPeer() wantErr %v, error = %v", tt.wantErr, err) - } - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) - }) - } -} - -func TestScPrunablePeers(t *testing.T) { - now := time.Now() - - type args struct { - threshold time.Duration - time time.Time - minSpeed int64 - } - - tests := []struct { - name string - fields scTestParams - args args - wantResult []types.NodeID - }{ - { - name: "no peers", - fields: scTestParams{peers: map[string]*scPeer{}}, - args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100}, - wantResult: []types.NodeID{}, - }, - { - name: "mixed peers", - fields: scTestParams{peers: map[string]*scPeer{ - // X - removed, active, fast - "P1": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 101}, - // X - ready, active, fast - "P2": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 101}, - // X - removed, active, equal - "P3": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 100}, - // V - ready, inactive, equal - "P4": {state: peerStateReady, lastTouched: now, lastRate: 100}, - // V - ready, inactive, slow - "P5": {state: peerStateReady, lastTouched: now, lastRate: 99}, - // V - ready, active, slow - "P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90}, - }}, - args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100}, - wantResult: []types.NodeID{"P4", "P5", "P6"}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // peersSlowerThan should not mutate the scheduler - wantSc := sc - res := sc.prunablePeers(tt.args.threshold, tt.args.minSpeed, tt.args.time) - assert.Equal(t, tt.wantResult, res) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScRemovePeer(t *testing.T) { - - type args struct { - peerID types.NodeID - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "remove non existing peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - args: args{peerID: "P2"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - }, - { - name: "remove single New peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - args: args{peerID: "P1"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateRemoved}}}, - }, - { - name: "remove one of two New peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}, "P2": {height: -1}}}, - args: args{peerID: "P1"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateRemoved}, "P2": {height: -1}}}, - }, - { - name: "remove one Ready peer, all peers removed", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 10, state: peerStateRemoved}, - "P2": {height: 5, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5}, - }, - args: args{peerID: "P2"}, - wantFields: scTestParams{peers: map[string]*scPeer{ - "P1": {height: 10, state: peerStateRemoved}, - "P2": {height: 5, state: peerStateRemoved}}, - }, - }, - { - name: "attempt to remove already removed peer", - fields: scTestParams{ - height: 8, - peers: map[string]*scPeer{ - "P1": {height: 10, state: peerStateRemoved}, - "P2": {height: 11, state: peerStateReady}}, - allB: []int64{8, 9, 10, 11}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - height: 8, - peers: map[string]*scPeer{ - "P1": {height: 10, state: peerStateRemoved}, - "P2": {height: 11, state: peerStateReady}}, - allB: []int64{8, 9, 10, 11}}, - }, - { - name: "remove Ready peer with blocks requested", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{1: "P1"}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}}, - allB: []int64{}, - pending: map[int64]types.NodeID{}, - }, - }, - { - name: "remove Ready peer with blocks received", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - received: map[int64]types.NodeID{1: "P1"}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}}, - allB: []int64{}, - received: map[int64]types.NodeID{}, - }, - }, - { - name: "remove Ready peer with blocks received and requested (not yet received)", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 3: "P1"}, - received: map[int64]types.NodeID{2: "P1", 4: "P1"}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}, - allB: []int64{}, - pending: map[int64]types.NodeID{}, - received: map[int64]types.NodeID{}, - }, - }, - { - name: "remove Ready peer from multiple peers set, with blocks received and requested (not yet received)", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 6, state: peerStateReady}, - "P2": {height: 6, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4, 5, 6}, - pending: map[int64]types.NodeID{1: "P1", 3: "P2", 6: "P1"}, - received: map[int64]types.NodeID{2: "P1", 4: "P2", 5: "P2"}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 6, state: peerStateRemoved}, - "P2": {height: 6, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4, 5, 6}, - pending: map[int64]types.NodeID{3: "P2"}, - received: map[int64]types.NodeID{4: "P2", 5: "P2"}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - sc.removePeer(tt.args.peerID) - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) - }) - } -} - -func TestScSetPeerRange(t *testing.T) { - - type args struct { - peerID types.NodeID - base int64 - height int64 - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "change height of non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P2", height: 4}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 2, state: peerStateReady}, - "P2": {height: 4, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}}, - }, - { - name: "increase height of removed peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - args: args{peerID: "P1", height: 4}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - }, - { - name: "decrease height of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - args: args{peerID: "P1", height: 2}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}, - allB: []int64{}}, - wantErr: true, - }, - { - name: "increase height of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P1", height: 4}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - }, - { - name: "noop height change of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - args: args{peerID: "P1", height: 4}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - }, - { - name: "add peer with huge height 10**10 ", - fields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: -1, state: peerStateNew}}, - targetPending: 4, - }, - args: args{peerID: "P2", height: 10000000000}, - wantFields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P2": {height: 10000000000, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - }, - { - name: "add peer with base > height should error", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - args: args{peerID: "P1", base: 6, height: 5}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, - wantErr: true, - }, - { - name: "add peer with base == height is fine", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateNew}}, - targetPending: 4, - }, - args: args{peerID: "P1", base: 6, height: 6}, - wantFields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P1": {base: 6, height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - err := sc.setPeerRange(tt.args.peerID, tt.args.base, tt.args.height) - if (err != nil) != tt.wantErr { - t.Errorf("setPeerHeight() wantErr %v, error = %v", tt.wantErr, err) - } - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) - }) - } -} - -func TestScGetPeersWithHeight(t *testing.T) { - - type args struct { - height int64 - } - tests := []struct { - name string - fields scTestParams - args args - wantResult []types.NodeID - }{ - { - name: "no peers", - fields: scTestParams{peers: map[string]*scPeer{}}, - args: args{height: 10}, - wantResult: []types.NodeID{}, - }, - { - name: "only new peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}}, - args: args{height: 10}, - wantResult: []types.NodeID{}, - }, - { - name: "only Removed peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, - args: args{height: 2}, - wantResult: []types.NodeID{}, - }, - { - name: "one Ready shorter peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 5}, - wantResult: []types.NodeID{}, - }, - { - name: "one Ready equal peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: []types.NodeID{"P1"}, - }, - { - name: "one Ready higher peer", - fields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P1": {height: 20, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: []types.NodeID{"P1"}, - }, - { - name: "one Ready higher peer at base", - fields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P1": {base: 4, height: 20, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: []types.NodeID{"P1"}, - }, - { - name: "one Ready higher peer with higher base", - fields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P1": {base: 10, height: 20, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: []types.NodeID{}, - }, - { - name: "multiple mixed peers", - fields: scTestParams{ - height: 8, - peers: map[string]*scPeer{ - "P1": {height: -1, state: peerStateNew}, - "P2": {height: 10, state: peerStateReady}, - "P3": {height: 5, state: peerStateReady}, - "P4": {height: 20, state: peerStateRemoved}, - "P5": {height: 11, state: peerStateReady}}, - allB: []int64{8, 9, 10, 11}, - }, - args: args{height: 8}, - wantResult: []types.NodeID{"P2", "P5"}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // getPeersWithHeight should not mutate the scheduler - wantSc := sc - res := sc.getPeersWithHeight(tt.args.height) - sort.Sort(PeerByID(res)) - assert.Equal(t, tt.wantResult, res) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScMarkPending(t *testing.T) { - now := time.Now() - - type args struct { - peerID types.NodeID - height int64 - tm time.Time - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "attempt mark pending an unknown block above height", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P1", height: 3, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - wantErr: true, - }, - { - name: "attempt mark pending an unknown block below base", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {base: 4, height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}}, - args: args{peerID: "P1", height: 3, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {base: 4, height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}}, - wantErr: true, - }, - { - name: "attempt mark pending from non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P2", height: 1, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - wantErr: true, - }, - { - name: "mark pending from Removed peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - args: args{peerID: "P1", height: 1, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - wantErr: true, - }, - { - name: "mark pending from New peer", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 4, state: peerStateNew}, - }, - allB: []int64{1, 2, 3, 4}, - }, - args: args{peerID: "P2", height: 2, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 4, state: peerStateNew}, - }, - allB: []int64{1, 2, 3, 4}, - }, - wantErr: true, - }, - { - name: "mark pending from short peer", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 2, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}, - }, - args: args{peerID: "P2", height: 3, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 2, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}, - }, - wantErr: true, - }, - { - name: "mark pending all good", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1"}, - pendingTime: map[int64]time.Time{1: now}, - }, - args: args{peerID: "P1", height: 2, tm: now.Add(time.Millisecond)}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Millisecond)}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - if err := sc.markPending(tt.args.peerID, tt.args.height, tt.args.tm); (err != nil) != tt.wantErr { - t.Errorf("markPending() wantErr %v, error = %v", tt.wantErr, err) - } - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScMarkReceived(t *testing.T) { - now := time.Now() - - type args struct { - peerID types.NodeID - height int64 - size int64 - tm time.Time - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "received from non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P2", height: 1, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - wantErr: true, - }, - { - name: "received from removed peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - args: args{peerID: "P1", height: 1, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - wantErr: true, - }, - { - name: "received from unsolicited peer", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 4, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"}, - }, - args: args{peerID: "P1", height: 2, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 4, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"}, - }, - wantErr: true, - }, - { - name: "received but blockRequest not sent", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{}, - }, - args: args{peerID: "P1", height: 2, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{}, - }, - wantErr: true, - }, - { - name: "received with bad timestamp", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)}, - }, - args: args{peerID: "P1", height: 2, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)}, - }, - wantErr: true, - }, - { - name: "received all good", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now}, - }, - args: args{peerID: "P1", height: 2, size: 1000, tm: now.Add(time.Millisecond)}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1"}, - pendingTime: map[int64]time.Time{1: now}, - received: map[int64]types.NodeID{2: "P1"}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - if err := sc.markReceived( - tt.args.peerID, - tt.args.height, - tt.args.size, - now.Add(time.Second)); (err != nil) != tt.wantErr { - t.Errorf("markReceived() wantErr %v, error = %v", tt.wantErr, err) - } - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScMarkProcessed(t *testing.T) { - now := time.Now() - - type args struct { - height int64 - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "processed an unreceived block", - fields: scTestParams{ - height: 2, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{2}, - pending: map[int64]types.NodeID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}, - targetPending: 1, - }, - args: args{height: 2}, - wantFields: scTestParams{ - height: 3, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{3}, - targetPending: 1, - }, - }, - { - name: "mark processed success", - fields: scTestParams{ - height: 1, - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}, - received: map[int64]types.NodeID{1: "P1"}}, - args: args{height: 1}, - wantFields: scTestParams{ - height: 2, - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{2}, - pending: map[int64]types.NodeID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - oldBlockState := sc.getStateAtHeight(tt.args.height) - if err := sc.markProcessed(tt.args.height); (err != nil) != tt.wantErr { - t.Errorf("markProcessed() wantErr %v, error = %v", tt.wantErr, err) - } - if tt.wantErr { - assert.Equal(t, oldBlockState, sc.getStateAtHeight(tt.args.height)) - } else { - assert.Equal(t, blockStateProcessed, sc.getStateAtHeight(tt.args.height)) - } - wantSc := newTestScheduler(tt.wantFields) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -func TestScResetState(t *testing.T) { - tests := []struct { - name string - fields scTestParams - state state.State - wantFields scTestParams - }{ - { - name: "updates height and initHeight", - fields: scTestParams{ - height: 0, - initHeight: 0, - }, - state: state.State{LastBlockHeight: 7}, - wantFields: scTestParams{ - height: 8, - initHeight: 8, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - e, err := sc.handleResetState(bcResetState{state: tt.state}) - require.NoError(t, err) - assert.Equal(t, e, noOp) - wantSc := newTestScheduler(tt.wantFields) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -func TestScAllBlocksProcessed(t *testing.T) { - now := time.Now() - - tests := []struct { - name string - fields scTestParams - wantResult bool - }{ - { - name: "no blocks, no peers", - fields: scTestParams{}, - wantResult: false, - }, - { - name: "only New blocks", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - wantResult: false, - }, - { - name: "only Pending blocks", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now}, - }, - wantResult: false, - }, - { - name: "only Received blocks", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, - }, - wantResult: false, - }, - { - name: "only Processed blocks plus highest is received", - fields: scTestParams{ - height: 4, - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}}, - allB: []int64{4}, - received: map[int64]types.NodeID{4: "P1"}, - }, - wantResult: true, - }, - { - name: "mixed block states", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{2: "P1", 4: "P1"}, - pendingTime: map[int64]time.Time{2: now, 4: now}, - }, - wantResult: false, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // allBlocksProcessed() should not mutate the scheduler - wantSc := sc - res := sc.allBlocksProcessed() - assert.Equal(t, tt.wantResult, res) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -func TestScNextHeightToSchedule(t *testing.T) { - now := time.Now() - - tests := []struct { - name string - fields scTestParams - wantHeight int64 - }{ - { - name: "no blocks", - fields: scTestParams{initHeight: 11, height: 11}, - wantHeight: -1, - }, - { - name: "only New blocks", - fields: scTestParams{ - initHeight: 3, - peers: map[string]*scPeer{"P1": {height: 6, state: peerStateReady}}, - allB: []int64{3, 4, 5, 6}, - }, - wantHeight: 3, - }, - { - name: "only Pending blocks", - fields: scTestParams{ - initHeight: 1, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now}, - }, - wantHeight: -1, - }, - { - name: "only Received blocks", - fields: scTestParams{ - initHeight: 1, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, - }, - wantHeight: -1, - }, - { - name: "only Processed blocks", - fields: scTestParams{ - initHeight: 1, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - wantHeight: 1, - }, - { - name: "mixed block states", - fields: scTestParams{ - initHeight: 1, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}, - }, - wantHeight: 1, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // nextHeightToSchedule() should not mutate the scheduler - wantSc := sc - - resMin := sc.nextHeightToSchedule() - assert.Equal(t, tt.wantHeight, resMin) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -func TestScSelectPeer(t *testing.T) { - - type args struct { - height int64 - } - tests := []struct { - name string - fields scTestParams - args args - wantResult types.NodeID - wantError bool - }{ - { - name: "no peers", - fields: scTestParams{peers: map[string]*scPeer{}}, - args: args{height: 10}, - wantResult: "", - wantError: true, - }, - { - name: "only new peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}}, - args: args{height: 10}, - wantResult: "", - wantError: true, - }, - { - name: "only Removed peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, - args: args{height: 2}, - wantResult: "", - wantError: true, - }, - { - name: "one Ready shorter peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 5}, - wantResult: "", - wantError: true, - }, - { - name: "one Ready equal peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: "P1", - }, - { - name: "one Ready higher peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}, - }, - args: args{height: 4}, - wantResult: "P1", - }, - { - name: "one Ready higher peer with higher base", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {base: 4, height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}, - }, - args: args{height: 3}, - wantResult: "", - wantError: true, - }, - { - name: "many Ready higher peers with different number of pending requests", - fields: scTestParams{ - height: 4, - peers: map[string]*scPeer{ - "P1": {height: 8, state: peerStateReady}, - "P2": {height: 9, state: peerStateReady}}, - allB: []int64{4, 5, 6, 7, 8, 9}, - pending: map[int64]types.NodeID{ - 4: "P1", 6: "P1", - 5: "P2", - }, - }, - args: args{height: 4}, - wantResult: "P2", - }, - { - name: "many Ready higher peers with same number of pending requests", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P2": {height: 20, state: peerStateReady}, - "P1": {height: 15, state: peerStateReady}, - "P3": {height: 15, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, - pending: map[int64]types.NodeID{ - 1: "P1", 2: "P1", - 3: "P3", 4: "P3", - 5: "P2", 6: "P2", - }, - }, - args: args{height: 7}, - wantResult: "P1", - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // selectPeer should not mutate the scheduler - wantSc := sc - res, err := sc.selectPeer(tt.args.height) - assert.Equal(t, tt.wantResult, res) - assert.Equal(t, tt.wantError, err != nil) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -// makeScBlock makes an empty block. -func makeScBlock(height int64) *types.Block { - return &types.Block{Header: types.Header{Height: height}} -} - -// used in place of assert.Equal(t, want, actual) to avoid failures due to -// scheduler.lastAdvanced timestamp inequalities. -func checkSameScheduler(t *testing.T, want *scheduler, actual *scheduler) { - assert.Equal(t, want.initHeight, actual.initHeight) - assert.Equal(t, want.height, actual.height) - assert.Equal(t, want.peers, actual.peers) - assert.Equal(t, want.blockStates, actual.blockStates) - assert.Equal(t, want.pendingBlocks, actual.pendingBlocks) - assert.Equal(t, want.pendingTime, actual.pendingTime) - assert.Equal(t, want.blockStates, actual.blockStates) - assert.Equal(t, want.receivedBlocks, actual.receivedBlocks) - assert.Equal(t, want.blockStates, actual.blockStates) -} - -// checkScResults checks scheduler handler test results -func checkScResults(t *testing.T, wantErr bool, err error, wantEvent Event, event Event) { - if (err != nil) != wantErr { - t.Errorf("error = %v, wantErr %v", err, wantErr) - return - } - if !assert.IsType(t, wantEvent, event) { - t.Log(fmt.Sprintf("Wrong type received, got: %v", event)) - } - switch wantEvent := wantEvent.(type) { - case scPeerError: - assert.Equal(t, wantEvent.peerID, event.(scPeerError).peerID) - assert.Equal(t, wantEvent.reason != nil, event.(scPeerError).reason != nil) - case scBlockReceived: - assert.Equal(t, wantEvent.peerID, event.(scBlockReceived).peerID) - assert.Equal(t, wantEvent.block, event.(scBlockReceived).block) - case scSchedulerFail: - assert.Equal(t, wantEvent.reason != nil, event.(scSchedulerFail).reason != nil) - } -} - -func TestScHandleBlockResponse(t *testing.T) { - now := time.Now() - block6FromP1 := bcBlockResponse{ - time: now.Add(time.Millisecond), - peerID: types.NodeID("P1"), - size: 100, - block: makeScBlock(6), - } - - type args struct { - event bcBlockResponse - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "empty scheduler", - fields: scTestParams{}, - args: args{event: block6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "block from removed peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - args: args{event: block6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "block we haven't asked for", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}}, - args: args{event: block6FromP1}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - }, - { - name: "block from wrong peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P2"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: block6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "block with bad timestamp", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now.Add(time.Second)}, - }, - args: args{event: block6FromP1}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - }, - { - name: "good block, accept", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: block6FromP1}, - wantEvent: scBlockReceived{peerID: "P1", block: block6FromP1.block}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleBlockResponse(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleNoBlockResponse(t *testing.T) { - now := time.Now() - noBlock6FromP1 := bcNoBlockResponse{ - time: now.Add(time.Millisecond), - peerID: types.NodeID("P1"), - height: 6, - } - - tests := []struct { - name string - fields scTestParams - wantEvent Event - wantFields scTestParams - wantErr bool - }{ - { - name: "empty scheduler", - fields: scTestParams{}, - wantEvent: noOpEvent{}, - wantFields: scTestParams{}, - }, - { - name: "noBlock from removed peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - wantEvent: noOpEvent{}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - }, - { - name: "for block we haven't asked for", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - }, - { - name: "noBlock from peer we don't have", - fields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P2"}, - pendingTime: map[int64]time.Time{6: now}, - }, - wantEvent: noOpEvent{}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P2"}, - pendingTime: map[int64]time.Time{6: now}, - }, - }, - { - name: "noBlock from existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleNoBlockResponse(noBlock6FromP1) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScHandleBlockProcessed(t *testing.T) { - now := time.Now() - processed6FromP1 := pcBlockProcessed{ - peerID: types.NodeID("P1"), - height: 6, - } - - type args struct { - event pcBlockProcessed - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "empty scheduler", - fields: scTestParams{height: 6}, - args: args{event: processed6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "processed block we don't have", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: processed6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "processed block ok, we processed all blocks", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}}, - allB: []int64{6, 7}, - received: map[int64]types.NodeID{6: "P1", 7: "P1"}, - }, - args: args{event: processed6FromP1}, - wantEvent: scFinishedEv{}, - }, - { - name: "processed block ok, we still have blocks to process", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - pending: map[int64]types.NodeID{7: "P1", 8: "P1"}, - received: map[int64]types.NodeID{6: "P1"}, - }, - args: args{event: processed6FromP1}, - wantEvent: noOpEvent{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleBlockProcessed(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleBlockVerificationFailure(t *testing.T) { - now := time.Now() - - type args struct { - event pcBlockVerificationFailure - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "empty scheduler", - fields: scTestParams{}, - args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: noOpEvent{}, - }, - { - name: "failed block we don't have, single peer is still removed", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: scFinishedEv{}, - }, - { - name: "failed block we don't have, one of two peers are removed", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: noOpEvent{}, - }, - { - name: "failed block, all blocks are processed after removal", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}}, - allB: []int64{6, 7}, - received: map[int64]types.NodeID{6: "P1", 7: "P1"}, - }, - args: args{event: pcBlockVerificationFailure{height: 7, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: scFinishedEv{}, - }, - { - name: "failed block, we still have blocks to process", - fields: scTestParams{ - initHeight: 5, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}}, - allB: []int64{5, 6, 7, 8}, - pending: map[int64]types.NodeID{7: "P1", 8: "P1"}, - received: map[int64]types.NodeID{5: "P1", 6: "P1"}, - }, - args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: noOpEvent{}, - }, - { - name: "failed block, H+1 and H+2 delivered by different peers, we still have blocks to process", - fields: scTestParams{ - initHeight: 5, - peers: map[string]*scPeer{ - "P1": {height: 8, state: peerStateReady}, - "P2": {height: 8, state: peerStateReady}, - "P3": {height: 8, state: peerStateReady}, - }, - allB: []int64{5, 6, 7, 8}, - pending: map[int64]types.NodeID{7: "P1", 8: "P1"}, - received: map[int64]types.NodeID{5: "P1", 6: "P1"}, - }, - args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P2"}}, - wantEvent: noOpEvent{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleBlockProcessError(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleAddNewPeer(t *testing.T) { - addP1 := bcAddNewPeer{ - peerID: types.NodeID("P1"), - } - type args struct { - event bcAddNewPeer - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "add P1 to empty scheduler", - fields: scTestParams{}, - args: args{event: addP1}, - wantEvent: noOpEvent{}, - }, - { - name: "add duplicate peer", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - }, - args: args{event: addP1}, - wantEvent: noOpEvent{}, - }, - { - name: "add P1 to non empty scheduler", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - }, - args: args{event: addP1}, - wantEvent: noOpEvent{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleAddNewPeer(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleTryPrunePeer(t *testing.T) { - now := time.Now() - - pruneEv := rTryPrunePeer{ - time: now.Add(time.Second + time.Millisecond), - } - type args struct { - event rTryPrunePeer - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "no peers", - fields: scTestParams{}, - args: args{event: pruneEv}, - wantEvent: noOpEvent{}, - }, - { - name: "no prunable peers", - fields: scTestParams{ - minRecvRate: 100, - peers: map[string]*scPeer{ - // X - removed, active, fast - "P1": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 101}, - // X - ready, active, fast - "P2": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 101}, - // X - removed, active, equal - "P3": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 100}}, - peerTimeout: time.Second, - }, - args: args{event: pruneEv}, - wantEvent: noOpEvent{}, - }, - { - name: "mixed peers", - fields: scTestParams{ - minRecvRate: 100, - peers: map[string]*scPeer{ - // X - removed, active, fast - "P1": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 101, height: 5}, - // X - ready, active, fast - "P2": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 101, height: 5}, - // X - removed, active, equal - "P3": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 100, height: 5}, - // V - ready, inactive, equal - "P4": {state: peerStateReady, lastTouched: now, lastRate: 100, height: 7}, - // V - ready, inactive, slow - "P5": {state: peerStateReady, lastTouched: now, lastRate: 99, height: 7}, - // V - ready, active, slow - "P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90, height: 7}, - }, - allB: []int64{1, 2, 3, 4, 5, 6, 7}, - peerTimeout: time.Second}, - args: args{event: pruneEv}, - wantEvent: scPeersPruned{peers: []types.NodeID{"P4", "P5", "P6"}}, - }, - { - name: "mixed peers, finish after pruning", - fields: scTestParams{ - minRecvRate: 100, - height: 6, - peers: map[string]*scPeer{ - // X - removed, active, fast - "P1": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 101, height: 5}, - // X - ready, active, fast - "P2": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 101, height: 5}, - // X - removed, active, equal - "P3": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 100, height: 5}, - // V - ready, inactive, equal - "P4": {state: peerStateReady, lastTouched: now, lastRate: 100, height: 7}, - // V - ready, inactive, slow - "P5": {state: peerStateReady, lastTouched: now, lastRate: 99, height: 7}, - // V - ready, active, slow - "P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90, height: 7}, - }, - allB: []int64{6, 7}, - peerTimeout: time.Second}, - args: args{event: pruneEv}, - wantEvent: scFinishedEv{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleTryPrunePeer(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleTrySchedule(t *testing.T) { - now := time.Now() - tryEv := rTrySchedule{ - time: now.Add(time.Second + time.Millisecond), - } - - type args struct { - event rTrySchedule - } - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "no peers", - fields: scTestParams{startTime: now, peers: map[string]*scPeer{}}, - args: args{event: tryEv}, - wantEvent: noOpEvent{}, - }, - { - name: "only new peers", - fields: scTestParams{startTime: now, peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}}, - args: args{event: tryEv}, - wantEvent: noOpEvent{}, - }, - { - name: "only Removed peers", - fields: scTestParams{startTime: now, peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, - args: args{event: tryEv}, - wantEvent: noOpEvent{}, - }, - { - name: "one Ready shorter peer", - fields: scTestParams{ - startTime: now, - height: 6, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}}, - args: args{event: tryEv}, - wantEvent: noOpEvent{}, - }, - { - name: "one Ready equal peer", - fields: scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - args: args{event: tryEv}, - wantEvent: scBlockRequest{peerID: "P1", height: 1}, - }, - { - name: "many Ready higher peers with different number of pending requests", - fields: scTestParams{ - startTime: now, - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 5, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5}, - pending: map[int64]types.NodeID{ - 1: "P1", 2: "P1", - 3: "P2", - }, - }, - args: args{event: tryEv}, - wantEvent: scBlockRequest{peerID: "P2", height: 4}, - }, - - { - name: "many Ready higher peers with same number of pending requests", - fields: scTestParams{ - startTime: now, - peers: map[string]*scPeer{ - "P2": {height: 8, state: peerStateReady}, - "P1": {height: 8, state: peerStateReady}, - "P3": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{ - 1: "P1", 2: "P1", - 3: "P3", 4: "P3", - 5: "P2", 6: "P2", - }, - }, - args: args{event: tryEv}, - wantEvent: scBlockRequest{peerID: "P1", height: 7}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleTrySchedule(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleStatusResponse(t *testing.T) { - now := time.Now() - statusRespP1Ev := bcStatusResponse{ - time: now.Add(time.Second + time.Millisecond), - peerID: "P1", - height: 6, - } - - type args struct { - event bcStatusResponse - } - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "change height of non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - }, - args: args{event: statusRespP1Ev}, - wantEvent: noOpEvent{}, - }, - - { - name: "increase height of removed peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - args: args{event: statusRespP1Ev}, - wantEvent: noOpEvent{}, - }, - - { - name: "decrease height of single peer", - fields: scTestParams{ - height: 5, - peers: map[string]*scPeer{"P1": {height: 10, state: peerStateReady}}, - allB: []int64{5, 6, 7, 8, 9, 10}, - }, - args: args{event: statusRespP1Ev}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - }, - - { - name: "increase height of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{event: statusRespP1Ev}, - wantEvent: noOpEvent{}, - }, - { - name: "noop height change of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}}, - args: args{event: statusRespP1Ev}, - wantEvent: noOpEvent{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleStatusResponse(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandle(t *testing.T) { - now := time.Now() - - type unknownEv struct { - priorityNormal - } - - block1, block2, block3 := makeScBlock(1), makeScBlock(2), makeScBlock(3) - - t0 := time.Now() - tick := make([]time.Time, 100) - for i := range tick { - tick[i] = t0.Add(time.Duration(i) * time.Millisecond) - } - - type args struct { - event Event - } - type scStep struct { - currentSc *scTestParams - args args - wantEvent Event - wantErr bool - wantSc *scTestParams - } - tests := []struct { - name string - steps []scStep - }{ - { - name: "unknown event", - steps: []scStep{ - { // add P1 - currentSc: &scTestParams{}, - args: args{event: unknownEv{}}, - wantEvent: scSchedulerFail{reason: fmt.Errorf("some error")}, - wantSc: &scTestParams{}, - }, - }, - }, - { - name: "single peer, sync 3 blocks", - steps: []scStep{ - { // add P1 - currentSc: &scTestParams{startTime: now, peers: map[string]*scPeer{}, height: 1}, - args: args{event: bcAddNewPeer{peerID: "P1"}}, - wantEvent: noOpEvent{}, - wantSc: &scTestParams{startTime: now, peers: map[string]*scPeer{ - "P1": {base: -1, height: -1, state: peerStateNew}}, height: 1}, - }, - { // set height of P1 - args: args{event: bcStatusResponse{peerID: "P1", time: tick[0], height: 3}}, - wantEvent: noOpEvent{}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - height: 1, - }, - }, - { // schedule block 1 - args: args{event: rTrySchedule{time: tick[1]}}, - wantEvent: scBlockRequest{peerID: "P1", height: 1}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{1: "P1"}, - pendingTime: map[int64]time.Time{1: tick[1]}, - height: 1, - }, - }, - { // schedule block 2 - args: args{event: rTrySchedule{time: tick[2]}}, - wantEvent: scBlockRequest{peerID: "P1", height: 2}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2]}, - height: 1, - }, - }, - { // schedule block 3 - args: args{event: rTrySchedule{time: tick[3]}}, - wantEvent: scBlockRequest{peerID: "P1", height: 3}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"}, - pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2], 3: tick[3]}, - height: 1, - }, - }, - { // block response 1 - args: args{event: bcBlockResponse{peerID: "P1", time: tick[4], size: 100, block: block1}}, - wantEvent: scBlockReceived{peerID: "P1", block: block1}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[4]}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{2: "P1", 3: "P1"}, - pendingTime: map[int64]time.Time{2: tick[2], 3: tick[3]}, - received: map[int64]types.NodeID{1: "P1"}, - height: 1, - }, - }, - { // block response 2 - args: args{event: bcBlockResponse{peerID: "P1", time: tick[5], size: 100, block: block2}}, - wantEvent: scBlockReceived{peerID: "P1", block: block2}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[5]}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{3: "P1"}, - pendingTime: map[int64]time.Time{3: tick[3]}, - received: map[int64]types.NodeID{1: "P1", 2: "P1"}, - height: 1, - }, - }, - { // block response 3 - args: args{event: bcBlockResponse{peerID: "P1", time: tick[6], size: 100, block: block3}}, - wantEvent: scBlockReceived{peerID: "P1", block: block3}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{1, 2, 3}, - received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"}, - height: 1, - }, - }, - { // processed block 1 - args: args{event: pcBlockProcessed{peerID: types.NodeID("P1"), height: 1}}, - wantEvent: noOpEvent{}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{2, 3}, - received: map[int64]types.NodeID{2: "P1", 3: "P1"}, - height: 2, - }, - }, - { // processed block 2 - args: args{event: pcBlockProcessed{peerID: types.NodeID("P1"), height: 2}}, - wantEvent: scFinishedEv{}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{3}, - received: map[int64]types.NodeID{3: "P1"}, - height: 3, - }, - }, - }, - }, - { - name: "block verification failure", - steps: []scStep{ - { // failure processing block 1 - currentSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady, lastTouched: tick[6]}, - "P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{1, 2, 3, 4}, - received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"}, - height: 1, - }, - args: args{event: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: noOpEvent{}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateRemoved, lastTouched: tick[6]}, - "P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{1, 2, 3}, - received: map[int64]types.NodeID{}, - height: 1, - }, - }, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - var sc *scheduler - for i, step := range tt.steps { - // First step must always initialize the currentState as state. - if step.currentSc != nil { - sc = newTestScheduler(*step.currentSc) - } - if sc == nil { - panic("Bad (initial?) step") - } - - nextEvent, err := sc.handle(step.args.event) - wantSc := newTestScheduler(*step.wantSc) - - t.Logf("step %d(%v): %s", i, step.args.event, sc) - checkSameScheduler(t, wantSc, sc) - - checkScResults(t, step.wantErr, err, step.wantEvent, nextEvent) - - // Next step may use the wantedState as their currentState. - sc = newTestScheduler(*step.wantSc) - } - }) - } -} diff --git a/internal/blocksync/v2/types.go b/internal/blocksync/v2/types.go deleted file mode 100644 index 7a73728e4..000000000 --- a/internal/blocksync/v2/types.go +++ /dev/null @@ -1,65 +0,0 @@ -package v2 - -import ( - "github.com/Workiva/go-datastructures/queue" -) - -// Event is the type that can be added to the priority queue. -type Event queue.Item - -type priority interface { - Compare(other queue.Item) int - Priority() int -} - -type priorityLow struct{} -type priorityNormal struct{} -type priorityHigh struct{} - -func (p priorityLow) Priority() int { - return 1 -} - -func (p priorityNormal) Priority() int { - return 2 -} - -func (p priorityHigh) Priority() int { - return 3 -} - -func (p priorityLow) Compare(other queue.Item) int { - op := other.(priority) - if p.Priority() > op.Priority() { - return 1 - } else if p.Priority() == op.Priority() { - return 0 - } - return -1 -} - -func (p priorityNormal) Compare(other queue.Item) int { - op := other.(priority) - if p.Priority() > op.Priority() { - return 1 - } else if p.Priority() == op.Priority() { - return 0 - } - return -1 -} - -func (p priorityHigh) Compare(other queue.Item) int { - op := other.(priority) - if p.Priority() > op.Priority() { - return 1 - } else if p.Priority() == op.Priority() { - return 0 - } - return -1 -} - -type noOpEvent struct { - priorityLow -} - -var noOp = noOpEvent{} diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index 28f630f5f..70555d440 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -11,25 +11,30 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - abcicli "github.com/tendermint/tendermint/abci/client" + dbm "github.com/tendermint/tm-db" + + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/evidence" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" + "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) // Byzantine node sends two different prevotes (nil and blockID) to the same // validator. func TestByzantinePrevoteEquivocation(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + config := configSetup(t) nValidators := 4 @@ -51,7 +56,9 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { require.NoError(t, err) require.NoError(t, stateStore.Save(state)) - thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + require.NoError(t, err) + defer os.RemoveAll(thisConfig.RootDir) ensureDir(t, path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal @@ -63,13 +70,17 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { blockStore := store.NewBlockStore(blockDB) // one for mempool, one for consensus - mtx := new(tmsync.RWMutex) - proxyAppConnMem := abcicli.NewLocalClient(mtx, app) - proxyAppConnCon := abcicli.NewLocalClient(mtx, app) + mtx := new(tmsync.Mutex) + proxyAppConnMem := abciclient.NewLocalClient(mtx, app) + proxyAppConnCon := abciclient.NewLocalClient(mtx, app) // Make Mempool - mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) - mempool.SetLogger(log.TestingLogger().With("module", "mempool")) + mempool := mempool.NewTxMempool( + log.TestingLogger().With("module", "mempool"), + thisConfig.Mempool, + proxyAppConnMem, + 0, + ) if thisConfig.Consensus.WaitForTxs() { mempool.EnableTxsAvailable() } @@ -81,26 +92,23 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { // Make State blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore) - cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) - cs.SetLogger(cs.Logger) + cs := NewState(logger, thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) // set private validator pv := privVals[i] cs.SetPrivValidator(pv) - eventBus := types.NewEventBus() - eventBus.SetLogger(log.TestingLogger().With("module", "events")) - err = eventBus.Start() + eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events")) + err = eventBus.Start(ctx) require.NoError(t, err) cs.SetEventBus(eventBus) cs.SetTimeoutTicker(tickerFunc()) - cs.SetLogger(logger) states[i] = cs }() } - rts := setup(t, nValidators, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, nValidators, states, 100) // buffer must be large enough to not deadlock var bzNodeID types.NodeID @@ -208,7 +216,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} proposal := types.NewProposal(height, round, lazyNodeState.ValidRound, propBlockID) p := proposal.ToProto() - if err := lazyNodeState.privValidator.SignProposal(context.Background(), lazyNodeState.state.ChainID, p); err == nil { + if err := lazyNodeState.privValidator.SignProposal(ctx, lazyNodeState.state.ChainID, p); err == nil { proposal.Signature = p.Signature // send proposal and block parts on internal msg queue @@ -226,31 +234,34 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } // Evidence should be submitted and committed at the third height but // we will check the first six just in case evidenceFromEachValidator := make([]types.Evidence, nValidators) - wg := new(sync.WaitGroup) + var wg sync.WaitGroup i := 0 for _, sub := range rts.subs { wg.Add(1) - go func(j int, s types.Subscription) { + go func(j int, s eventbus.Subscription) { defer wg.Done() for { - select { - case msg := <-s.Out(): - require.NotNil(t, msg) - block := msg.Data().(types.EventDataNewBlock).Block - if len(block.Evidence.Evidence) != 0 { - evidenceFromEachValidator[j] = block.Evidence.Evidence[0] - return - } - case <-s.Canceled(): - require.Fail(t, "subscription failed for %d", j) + if ctx.Err() != nil { + return + } + + msg, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + return + } + require.NotNil(t, msg) + block := msg.Data().(types.EventDataNewBlock).Block + if len(block.Evidence.Evidence) != 0 { + evidenceFromEachValidator[j] = block.Evidence.Evidence[0] return } } @@ -261,7 +272,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { wg.Wait() - pubkey, err := bzNodeState.privValidator.GetPubKey(context.Background()) + pubkey, err := bzNodeState.privValidator.GetPubKey(ctx) require.NoError(t, err) for idx, ev := range evidenceFromEachValidator { @@ -307,7 +318,7 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) { // eventBus.SetLogger(logger.With("module", "events", "validator", i)) // var err error - // blocksSubs[i], err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) + // blocksSubs[i], err = eventBus.Subscribe(ctx, testSubscriber, types.EventQueryNewBlock) // require.NoError(t, err) // conR := NewReactor(states[i], true) // so we don't start the consensus states diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index 24e910443..754a2e697 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -5,26 +5,26 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" + "path" "path/filepath" "sync" "testing" "time" "github.com/stretchr/testify/require" - - "path" - dbm "github.com/tendermint/tm-db" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" cstypes "github.com/tendermint/tendermint/internal/consensus/types" + "github.com/tendermint/tendermint/internal/eventbus" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" + "github.com/tendermint/tendermint/internal/mempool" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" @@ -33,8 +33,6 @@ import ( tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) @@ -49,24 +47,30 @@ const ( // test. type cleanupFunc func() -func configSetup(t *testing.T) *cfg.Config { +func configSetup(t *testing.T) *config.Config { t.Helper() - config := ResetConfig("consensus_reactor_test") + cfg, err := ResetConfig("consensus_reactor_test") + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(cfg.RootDir) }) - consensusReplayConfig := ResetConfig("consensus_replay_test") - configStateTest := ResetConfig("consensus_state_test") - configMempoolTest := ResetConfig("consensus_mempool_test") - configByzantineTest := ResetConfig("consensus_byzantine_test") + consensusReplayConfig, err := ResetConfig("consensus_replay_test") + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(consensusReplayConfig.RootDir) }) - t.Cleanup(func() { - os.RemoveAll(config.RootDir) - os.RemoveAll(consensusReplayConfig.RootDir) - os.RemoveAll(configStateTest.RootDir) - os.RemoveAll(configMempoolTest.RootDir) - os.RemoveAll(configByzantineTest.RootDir) - }) - return config + configStateTest, err := ResetConfig("consensus_state_test") + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(configStateTest.RootDir) }) + + configMempoolTest, err := ResetConfig("consensus_mempool_test") + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(configMempoolTest.RootDir) }) + + configByzantineTest, err := ResetConfig("consensus_byzantine_test") + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(configByzantineTest.RootDir) }) + + return cfg } func ensureDir(t *testing.T, dir string, mode os.FileMode) { @@ -76,8 +80,8 @@ func ensureDir(t *testing.T, dir string, mode os.FileMode) { } } -func ResetConfig(name string) *cfg.Config { - return cfg.ResetTestRoot(name) +func ResetConfig(name string) (*config.Config, error) { + return config.ResetTestRoot(name) } //------------------------------------------------------------------------------- @@ -105,11 +109,12 @@ func newValidatorStub(privValidator types.PrivValidator, valIndex int32) *valida } func (vs *validatorStub) signVote( + ctx context.Context, voteType tmproto.SignedMsgType, chainID string, blockID types.BlockID) (*types.Vote, error) { - pubKey, err := vs.PrivValidator.GetPubKey(context.Background()) + pubKey, err := vs.PrivValidator.GetPubKey(ctx) if err != nil { return nil, fmt.Errorf("can't get pubkey: %w", err) } @@ -124,7 +129,7 @@ func (vs *validatorStub) signVote( BlockID: blockID, } v := vote.ToProto() - if err := vs.PrivValidator.SignVote(context.Background(), chainID, v); err != nil { + if err := vs.PrivValidator.SignVote(ctx, chainID, v); err != nil { return nil, fmt.Errorf("sign vote failed: %w", err) } @@ -142,12 +147,14 @@ func (vs *validatorStub) signVote( // Sign vote for type/hash/header func signVote( + ctx context.Context, vs *validatorStub, voteType tmproto.SignedMsgType, chainID string, blockID types.BlockID) *types.Vote { - v, err := vs.signVote(voteType, chainID, blockID) + v, err := vs.signVote(ctx, voteType, chainID, blockID) + if err != nil { panic(fmt.Errorf("failed to sign vote: %v", err)) } @@ -158,13 +165,14 @@ func signVote( } func signVotes( + ctx context.Context, voteType tmproto.SignedMsgType, chainID string, blockID types.BlockID, vss ...*validatorStub) []*types.Vote { votes := make([]*types.Vote, len(vss)) for i, vs := range vss { - votes[i] = signVote(vs, voteType, chainID, blockID) + votes[i] = signVote(ctx, vs, voteType, chainID, blockID) } return votes } @@ -188,11 +196,11 @@ func (vss ValidatorStubsByPower) Len() int { } func (vss ValidatorStubsByPower) Less(i, j int) bool { - vssi, err := vss[i].GetPubKey(context.Background()) + vssi, err := vss[i].GetPubKey(context.TODO()) if err != nil { panic(err) } - vssj, err := vss[j].GetPubKey(context.Background()) + vssj, err := vss[j].GetPubKey(context.TODO()) if err != nil { panic(err) } @@ -214,13 +222,14 @@ func (vss ValidatorStubsByPower) Swap(i, j int) { //------------------------------------------------------------------------------- // Functions for transitioning the consensus state -func startTestRound(cs *State, height int64, round int32) { +func startTestRound(ctx context.Context, cs *State, height int64, round int32) { cs.enterNewRound(height, round) - cs.startRoutines(0) + cs.startRoutines(ctx, 0) } // Create proposal block from cs1 but sign it with vs. func decideProposal( + ctx context.Context, t *testing.T, cs1 *State, vs *validatorStub, @@ -241,7 +250,7 @@ func decideProposal( polRound, propBlockID := validRound, types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} proposal = types.NewProposal(height, round, polRound, propBlockID) p := proposal.ToProto() - if err := vs.SignProposal(context.Background(), chainID, p); err != nil { + if err := vs.SignProposal(ctx, chainID, p); err != nil { t.Fatalf("error signing proposal: %s", err) } @@ -257,20 +266,20 @@ func addVotes(to *State, votes ...*types.Vote) { } func signAddVotes( + ctx context.Context, to *State, voteType tmproto.SignedMsgType, chainID string, blockID types.BlockID, vss ...*validatorStub, ) { - votes := signVotes(voteType, chainID, blockID, vss...) - addVotes(to, votes...) + addVotes(to, signVotes(ctx, voteType, chainID, blockID, vss...)...) } -func validatePrevote(t *testing.T, cs *State, round int32, privVal *validatorStub, blockHash []byte) { +func validatePrevote(ctx context.Context, t *testing.T, cs *State, round int32, privVal *validatorStub, blockHash []byte) { t.Helper() prevotes := cs.Votes.Prevotes(round) - pubKey, err := privVal.GetPubKey(context.Background()) + pubKey, err := privVal.GetPubKey(ctx) require.NoError(t, err) address := pubKey.Address() var vote *types.Vote @@ -288,10 +297,10 @@ func validatePrevote(t *testing.T, cs *State, round int32, privVal *validatorStu } } -func validateLastPrecommit(t *testing.T, cs *State, privVal *validatorStub, blockHash []byte) { +func validateLastPrecommit(ctx context.Context, t *testing.T, cs *State, privVal *validatorStub, blockHash []byte) { t.Helper() votes := cs.LastCommit - pv, err := privVal.GetPubKey(context.Background()) + pv, err := privVal.GetPubKey(ctx) require.NoError(t, err) address := pv.Address() var vote *types.Vote @@ -304,6 +313,7 @@ func validateLastPrecommit(t *testing.T, cs *State, privVal *validatorStub, bloc } func validatePrecommit( + ctx context.Context, t *testing.T, cs *State, thisRound, @@ -314,7 +324,7 @@ func validatePrecommit( ) { t.Helper() precommits := cs.Votes.Precommits(thisRound) - pv, err := privVal.GetPubKey(context.Background()) + pv, err := privVal.GetPubKey(ctx) require.NoError(t, err) address := pv.Address() var vote *types.Vote @@ -353,6 +363,7 @@ func validatePrecommit( } func validatePrevoteAndPrecommit( + ctx context.Context, t *testing.T, cs *State, thisRound, @@ -363,64 +374,82 @@ func validatePrevoteAndPrecommit( ) { t.Helper() // verify the prevote - validatePrevote(t, cs, thisRound, privVal, votedBlockHash) + validatePrevote(ctx, t, cs, thisRound, privVal, votedBlockHash) // verify precommit cs.mtx.Lock() - validatePrecommit(t, cs, thisRound, lockRound, privVal, votedBlockHash, lockedBlockHash) - cs.mtx.Unlock() + defer cs.mtx.Unlock() + validatePrecommit(ctx, t, cs, thisRound, lockRound, privVal, votedBlockHash, lockedBlockHash) } -func subscribeToVoter(cs *State, addr []byte) <-chan tmpubsub.Message { - votesSub, err := cs.eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, types.EventQueryVote) - if err != nil { - panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote)) - } - ch := make(chan tmpubsub.Message) - go func() { - for msg := range votesSub.Out() { - vote := msg.Data().(types.EventDataVote) - // we only fire for our own votes - if bytes.Equal(addr, vote.Vote.ValidatorAddress) { - ch <- msg - } +func subscribeToVoter(ctx context.Context, t *testing.T, cs *State, addr []byte) <-chan tmpubsub.Message { + t.Helper() + + ch := make(chan tmpubsub.Message, 1) + if err := cs.eventBus.Observe(ctx, func(msg tmpubsub.Message) error { + vote := msg.Data().(types.EventDataVote) + // we only fire for our own votes + if bytes.Equal(addr, vote.Vote.ValidatorAddress) { + ch <- msg } - }() + return nil + }, types.EventQueryVote); err != nil { + t.Fatalf("Failed to observe query %v: %v", types.EventQueryVote, err) + } return ch } //------------------------------------------------------------------------------- // consensus states -func newState(state sm.State, pv types.PrivValidator, app abci.Application) *State { - config := cfg.ResetTestRoot("consensus_state_test") - return newStateWithConfig(config, state, pv, app) +func newState( + ctx context.Context, + logger log.Logger, + state sm.State, + pv types.PrivValidator, + app abci.Application, +) (*State, error) { + cfg, err := config.ResetTestRoot("consensus_state_test") + if err != nil { + return nil, err + } + + return newStateWithConfig(ctx, logger, cfg, state, pv, app), nil } func newStateWithConfig( - thisConfig *cfg.Config, + ctx context.Context, + logger log.Logger, + thisConfig *config.Config, state sm.State, pv types.PrivValidator, app abci.Application, ) *State { - blockStore := store.NewBlockStore(dbm.NewMemDB()) - return newStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockStore) + return newStateWithConfigAndBlockStore(ctx, logger, thisConfig, state, pv, app, store.NewBlockStore(dbm.NewMemDB())) } func newStateWithConfigAndBlockStore( - thisConfig *cfg.Config, + ctx context.Context, + logger log.Logger, + thisConfig *config.Config, state sm.State, pv types.PrivValidator, app abci.Application, blockStore *store.BlockStore, ) *State { // one for mempool, one for consensus - mtx := new(tmsync.RWMutex) - proxyAppConnMem := abcicli.NewLocalClient(mtx, app) - proxyAppConnCon := abcicli.NewLocalClient(mtx, app) + mtx := new(tmsync.Mutex) + proxyAppConnMem := abciclient.NewLocalClient(mtx, app) + proxyAppConnCon := abciclient.NewLocalClient(mtx, app) // Make Mempool - mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) - mempool.SetLogger(log.TestingLogger().With("module", "mempool")) + + mempool := mempool.NewTxMempool( + logger.With("module", "mempool"), + thisConfig.Mempool, + proxyAppConnMem, + 0, + ) + if thisConfig.Consensus.WaitForTxs() { mempool.EnableTxsAvailable() } @@ -434,14 +463,12 @@ func newStateWithConfigAndBlockStore( panic(err) } - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore) - cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) - cs.SetLogger(log.TestingLogger().With("module", "consensus")) + blockExec := sm.NewBlockExecutor(stateStore, logger, proxyAppConnCon, mempool, evpool, blockStore) + cs := NewState(logger.With("module", "consensus"), thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) cs.SetPrivValidator(pv) - eventBus := types.NewEventBus() - eventBus.SetLogger(log.TestingLogger().With("module", "events")) - err := eventBus.Start() + eventBus := eventbus.NewDefault(logger.With("module", "events")) + err := eventBus.Start(ctx) if err != nil { panic(err) } @@ -449,11 +476,11 @@ func newStateWithConfigAndBlockStore( return cs } -func loadPrivValidator(t *testing.T, config *cfg.Config) *privval.FilePV { +func loadPrivValidator(t *testing.T, cfg *config.Config) *privval.FilePV { t.Helper() - privValidatorKeyFile := config.PrivValidator.KeyFile() + privValidatorKeyFile := cfg.PrivValidator.KeyFile() ensureDir(t, filepath.Dir(privValidatorKeyFile), 0700) - privValidatorStateFile := config.PrivValidator.StateFile() + privValidatorStateFile := cfg.PrivValidator.StateFile() privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) if err != nil { t.Fatalf("error generating validator file: %s", err) @@ -462,15 +489,18 @@ func loadPrivValidator(t *testing.T, config *cfg.Config) *privval.FilePV { return privValidator } -func makeState(config *cfg.Config, nValidators int) (*State, []*validatorStub) { +func makeState(ctx context.Context, cfg *config.Config, logger log.Logger, nValidators int) (*State, []*validatorStub, error) { // Get State - state, privVals := makeGenesisState(config, genesisStateArgs{ + state, privVals := makeGenesisState(cfg, genesisStateArgs{ Validators: nValidators, }) - vss := make([]*validatorStub, nValidators) + cs, err := newState(ctx, logger, state, privVals[0], kvstore.NewApplication()) + if err != nil { + return nil, nil, err + } - cs := newState(state, privVals[0], kvstore.NewApplication()) + vss := make([]*validatorStub, nValidators) for i := 0; i < nValidators; i++ { vss[i] = newValidatorStub(privVals[i], int32(i)) @@ -478,7 +508,7 @@ func makeState(config *cfg.Config, nValidators int) (*State, []*validatorStub) { // since cs1 starts at 1 incrementHeight(vss[1:]...) - return cs, vss + return cs, vss, nil } //------------------------------------------------------------------------------- @@ -729,18 +759,19 @@ func consensusLogger() log.Logger { } func makeConsensusState( + ctx context.Context, t *testing.T, - config *cfg.Config, + cfg *config.Config, nValidators int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application, - configOpts ...func(*cfg.Config), + configOpts ...func(*config.Config), ) ([]*State, cleanupFunc) { t.Helper() valSet, privVals := factory.ValidatorSet(nValidators, 30) - genDoc := factory.GenesisDoc(config, time.Now(), valSet.Validators, nil) + genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil) css := make([]*State, nValidators) logger := consensusLogger() @@ -752,7 +783,9 @@ func makeConsensusState( blockStore := store.NewBlockStore(dbm.NewMemDB()) // each state needs its own db state, err := sm.MakeGenesisState(genDoc) require.NoError(t, err) - thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + require.NoError(t, err) + configRootDirs = append(configRootDirs, thisConfig.RootDir) for _, opt := range configOpts { @@ -770,9 +803,9 @@ func makeConsensusState( vals := types.TM2PB.ValidatorUpdates(state.Validators) app.InitChain(abci.RequestInitChain{Validators: vals}) - css[i] = newStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, blockStore) + l := logger.With("validator", i, "module", "consensus") + css[i] = newStateWithConfigAndBlockStore(ctx, l, thisConfig, state, privVals[i], app, blockStore) css[i].SetTimeoutTicker(tickerFunc()) - css[i].SetLogger(logger.With("validator", i, "module", "consensus")) } return css, func() { @@ -787,25 +820,30 @@ func makeConsensusState( // nPeers = nValidators + nNotValidator func randConsensusNetWithPeers( + ctx context.Context, t *testing.T, - config *cfg.Config, + cfg *config.Config, nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func(string) abci.Application, -) ([]*State, *types.GenesisDoc, *cfg.Config, cleanupFunc) { - valSet, privVals := factory.ValidatorSet(nValidators, testMinPower) - genDoc := factory.GenesisDoc(config, time.Now(), valSet.Validators, nil) - css := make([]*State, nPeers) +) ([]*State, *types.GenesisDoc, *config.Config, cleanupFunc) { t.Helper() + valSet, privVals := factory.ValidatorSet(nValidators, testMinPower) + genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil) + css := make([]*State, nPeers) logger := consensusLogger() - var peer0Config *cfg.Config + var peer0Config *config.Config configRootDirs := make([]string, 0, nPeers) for i := 0; i < nPeers; i++ { state, _ := sm.MakeGenesisState(genDoc) - thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + if err != nil { + t.Fatalf("error reseting config %s", err) + } + configRootDirs = append(configRootDirs, thisConfig.RootDir) ensureDir(t, filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal if i == 0 { @@ -815,11 +853,11 @@ func randConsensusNetWithPeers( if i < nValidators { privVal = privVals[i] } else { - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") if err != nil { t.Fatalf("error creating temp file for validator key: %s", err) } - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") if err != nil { t.Fatalf("error loading validator state: %s", err) } @@ -830,7 +868,7 @@ func randConsensusNetWithPeers( } } - app := appFunc(path.Join(config.DBDir(), fmt.Sprintf("%s_%d", testName, i))) + app := appFunc(path.Join(cfg.DBDir(), fmt.Sprintf("%s_%d", testName, i))) vals := types.TM2PB.ValidatorUpdates(state.Validators) if _, ok := app.(*kvstore.PersistentKVStoreApplication); ok { // simulate handshake, receive app version. If don't do this, replay test will fail @@ -839,9 +877,8 @@ func randConsensusNetWithPeers( app.InitChain(abci.RequestInitChain{Validators: vals}) // sm.SaveState(stateDB,state) //height 1's validatorsInfo already saved in LoadStateFromDBOrGenesisDoc above - css[i] = newStateWithConfig(thisConfig, state, privVal, app) + css[i] = newStateWithConfig(ctx, logger.With("validator", i, "module", "consensus"), thisConfig, state, privVal, app) css[i].SetTimeoutTicker(tickerFunc()) - css[i].SetLogger(logger.With("validator", i, "module", "consensus")) } return css, genDoc, peer0Config, func() { for _, dir := range configRootDirs { @@ -857,7 +894,7 @@ type genesisStateArgs struct { Time time.Time } -func makeGenesisState(config *cfg.Config, args genesisStateArgs) (sm.State, []types.PrivValidator) { +func makeGenesisState(cfg *config.Config, args genesisStateArgs) (sm.State, []types.PrivValidator) { if args.Power == 0 { args.Power = 1 } @@ -871,7 +908,7 @@ func makeGenesisState(config *cfg.Config, args genesisStateArgs) (sm.State, []ty if args.Time.IsZero() { args.Time = time.Now() } - genDoc := factory.GenesisDoc(config, args.Time, valSet.Validators, args.Params) + genDoc := factory.GenesisDoc(cfg, args.Time, valSet.Validators, args.Params) s0, _ := sm.MakeGenesisState(genDoc) return s0, privValidators } @@ -895,7 +932,7 @@ type mockTicker struct { fired bool } -func (m *mockTicker) Start() error { +func (m *mockTicker) Start(context.Context) error { return nil } @@ -922,7 +959,7 @@ func (m *mockTicker) Chan() <-chan timeoutInfo { func (*mockTicker) SetLogger(log.Logger) {} func newPersistentKVStore() abci.Application { - dir, err := ioutil.TempDir("", "persistent-kvstore") + dir, err := os.MkdirTemp("", "persistent-kvstore") if err != nil { panic(err) } diff --git a/internal/consensus/invalid_test.go b/internal/consensus/invalid_test.go index ad1d6e50d..c404844a6 100644 --- a/internal/consensus/invalid_test.go +++ b/internal/consensus/invalid_test.go @@ -5,7 +5,9 @@ import ( "sync" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/bytes" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -15,25 +17,27 @@ import ( ) func TestReactorInvalidPrecommit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + config := configSetup(t) n := 4 - states, cleanup := makeConsensusState(t, + states, cleanup := makeConsensusState(ctx, t, config, n, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) t.Cleanup(cleanup) for i := 0; i < 4; i++ { - ticker := NewTimeoutTicker() - ticker.SetLogger(states[i].Logger) + ticker := NewTimeoutTicker(states[i].Logger) states[i].SetTimeoutTicker(ticker) } - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } // this val sends a random precommit at each height @@ -47,7 +51,7 @@ func TestReactorInvalidPrecommit(t *testing.T) { byzState.mtx.Lock() privVal := byzState.privValidator byzState.doPrevote = func(height int64, round int32) { - invalidDoPrevoteFunc(t, height, round, byzState, byzReactor, privVal) + invalidDoPrevoteFunc(ctx, t, height, round, byzState, byzReactor, privVal) } byzState.mtx.Unlock() @@ -55,13 +59,17 @@ func TestReactorInvalidPrecommit(t *testing.T) { // // TODO: Make this tighter by ensuring the halt happens by block 2. var wg sync.WaitGroup + for i := 0; i < 10; i++ { for _, sub := range rts.subs { wg.Add(1) - go func(s types.Subscription) { - <-s.Out() - wg.Done() + go func(s eventbus.Subscription) { + defer wg.Done() + _, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() // cancel other subscribers on failure + } }(sub) } } @@ -69,7 +77,15 @@ func TestReactorInvalidPrecommit(t *testing.T) { wg.Wait() } -func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, r *Reactor, pv types.PrivValidator) { +func invalidDoPrevoteFunc( + ctx context.Context, + t *testing.T, + height int64, + round int32, + cs *State, + r *Reactor, + pv types.PrivValidator, +) { // routine to: // - precommit for a random block // - send precommit to all peers @@ -78,7 +94,7 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, r cs.mtx.Lock() cs.privValidator = pv - pubKey, err := cs.privValidator.GetPubKey(context.Background()) + pubKey, err := cs.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pubKey.Address() @@ -99,7 +115,7 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, r } p := precommit.ToProto() - err = cs.privValidator.SignVote(context.Background(), cs.state.ChainID, p) + err = cs.privValidator.SignVote(ctx, cs.state.ChainID, p) require.NoError(t, err) precommit.Signature = p.Signature diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index 7302ad9b6..fbff36d77 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -10,41 +10,45 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/abci/example/code" abci "github.com/tendermint/tendermint/abci/types" - mempl "github.com/tendermint/tendermint/internal/mempool" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/store" + "github.com/tendermint/tendermint/internal/mempool" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/store" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" ) // for testing -func assertMempool(txn txNotifier) mempl.Mempool { - return txn.(mempl.Mempool) +func assertMempool(txn txNotifier) mempool.Mempool { + return txn.(mempool.Mempool) } func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + baseConfig := configSetup(t) - config := ResetConfig("consensus_mempool_txs_available_test") + config, err := ResetConfig("consensus_mempool_txs_available_test") + require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) }) config.Consensus.CreateEmptyBlocks = false state, privVals := makeGenesisState(baseConfig, genesisStateArgs{ Validators: 1, Power: 10}) - cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication()) + cs := newStateWithConfig(ctx, log.TestingLogger(), config, state, privVals[0], NewCounterApplication()) assertMempool(cs.txNotifier).EnableTxsAvailable() height, round := cs.Height, cs.Round - newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) - startTestRound(cs, height, round) + newBlockCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock) + startTestRound(ctx, cs, height, round) ensureNewEventOnChannel(t, newBlockCh) // first block gets committed ensureNoNewEventOnChannel(t, newBlockCh) - deliverTxsRange(cs, 0, 1) + deliverTxsRange(ctx, cs, 0, 1) ensureNewEventOnChannel(t, newBlockCh) // commit txs ensureNewEventOnChannel(t, newBlockCh) // commit updated app hash ensureNoNewEventOnChannel(t, newBlockCh) @@ -52,20 +56,23 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { baseConfig := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - config := ResetConfig("consensus_mempool_txs_available_test") + config, err := ResetConfig("consensus_mempool_txs_available_test") + require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) }) config.Consensus.CreateEmptyBlocksInterval = ensureTimeout state, privVals := makeGenesisState(baseConfig, genesisStateArgs{ Validators: 1, Power: 10}) - cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication()) + cs := newStateWithConfig(ctx, log.TestingLogger(), config, state, privVals[0], NewCounterApplication()) assertMempool(cs.txNotifier).EnableTxsAvailable() - newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) - startTestRound(cs, cs.Height, cs.Round) + newBlockCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock) + startTestRound(ctx, cs, cs.Height, cs.Round) ensureNewEventOnChannel(t, newBlockCh) // first block gets committed ensureNoNewEventOnChannel(t, newBlockCh) // then we dont make a block ... @@ -74,20 +81,23 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { func TestMempoolProgressInHigherRound(t *testing.T) { baseConfig := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - config := ResetConfig("consensus_mempool_txs_available_test") + config, err := ResetConfig("consensus_mempool_txs_available_test") + require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) }) config.Consensus.CreateEmptyBlocks = false state, privVals := makeGenesisState(baseConfig, genesisStateArgs{ Validators: 1, Power: 10}) - cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication()) + cs := newStateWithConfig(ctx, log.TestingLogger(), config, state, privVals[0], NewCounterApplication()) assertMempool(cs.txNotifier).EnableTxsAvailable() height, round := cs.Height, cs.Round - newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) - newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) - timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) + newBlockCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock) + newRoundCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewRound) + timeoutCh := subscribe(ctx, t, cs.eventBus, types.EventQueryTimeoutPropose) cs.setProposal = func(proposal *types.Proposal) error { if cs.Height == 2 && cs.Round == 0 { // dont set the proposal in round 0 so we timeout and @@ -97,7 +107,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) { } return cs.defaultSetProposal(proposal) } - startTestRound(cs, height, round) + startTestRound(ctx, cs, height, round) ensureNewRound(t, newRoundCh, height, round) // first round at first height ensureNewEventOnChannel(t, newBlockCh) // first block gets committed @@ -106,7 +116,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) { round = 0 ensureNewRound(t, newRoundCh, height, round) // first round at next height - deliverTxsRange(cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round + deliverTxsRange(ctx, cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round ensureNewTimeout(t, timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds()) round++ // moving to the next round @@ -114,12 +124,12 @@ func TestMempoolProgressInHigherRound(t *testing.T) { ensureNewEventOnChannel(t, newBlockCh) // now we can commit the block } -func deliverTxsRange(cs *State, start, end int) { +func deliverTxsRange(ctx context.Context, cs *State, start, end int) { // Deliver some txs. for i := start; i < end; i++ { txBytes := make([]byte, 8) binary.BigEndian.PutUint64(txBytes, uint64(i)) - err := assertMempool(cs.txNotifier).CheckTx(context.Background(), txBytes, nil, mempl.TxInfo{}) + err := assertMempool(cs.txNotifier).CheckTx(ctx, txBytes, nil, mempool.TxInfo{}) if err != nil { panic(fmt.Sprintf("Error after CheckTx: %v", err)) } @@ -127,22 +137,29 @@ func deliverTxsRange(cs *State, start, end int) { } func TestMempoolTxConcurrentWithCommit(t *testing.T) { - config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + config := configSetup(t) + logger := log.TestingLogger() state, privVals := makeGenesisState(config, genesisStateArgs{ Validators: 1, Power: 10}) stateStore := sm.NewStore(dbm.NewMemDB()) blockStore := store.NewBlockStore(dbm.NewMemDB()) - cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockStore) + + cs := newStateWithConfigAndBlockStore( + ctx, + logger, config, state, privVals[0], NewCounterApplication(), blockStore) + err := stateStore.Save(state) require.NoError(t, err) - newBlockHeaderCh := subscribe(cs.eventBus, types.EventQueryNewBlockHeader) + newBlockHeaderCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlockHeader) const numTxs int64 = 3000 - go deliverTxsRange(cs, 0, int(numTxs)) + go deliverTxsRange(ctx, cs, 0, int(numTxs)) - startTestRound(cs, cs.Height, cs.Round) + startTestRound(ctx, cs, cs.Height, cs.Round) for n := int64(0); n < numTxs; { select { case msg := <-newBlockHeaderCh: @@ -156,6 +173,8 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { func TestMempoolRmBadTx(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() state, privVals := makeGenesisState(config, genesisStateArgs{ Validators: 1, @@ -163,7 +182,7 @@ func TestMempoolRmBadTx(t *testing.T) { app := NewCounterApplication() stateStore := sm.NewStore(dbm.NewMemDB()) blockStore := store.NewBlockStore(dbm.NewMemDB()) - cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockStore) + cs := newStateWithConfigAndBlockStore(ctx, log.TestingLogger(), config, state, privVals[0], app, blockStore) err := stateStore.Save(state) require.NoError(t, err) @@ -183,13 +202,13 @@ func TestMempoolRmBadTx(t *testing.T) { // Try to send the tx through the mempool. // CheckTx should not err, but the app should return a bad abci code // and the tx should get removed from the pool - err := assertMempool(cs.txNotifier).CheckTx(context.Background(), txBytes, func(r *abci.Response) { + err := assertMempool(cs.txNotifier).CheckTx(ctx, txBytes, func(r *abci.Response) { if r.GetCheckTx().Code != code.CodeTypeBadNonce { t.Errorf("expected checktx to return bad nonce, got %v", r) return } checkTxRespCh <- struct{}{} - }, mempl.TxInfo{}) + }, mempool.TxInfo{}) if err != nil { t.Errorf("error after CheckTx: %v", err) return diff --git a/internal/consensus/metrics.go b/internal/consensus/metrics.go index bceac4942..a75f1505c 100644 --- a/internal/consensus/metrics.go +++ b/internal/consensus/metrics.go @@ -61,6 +61,9 @@ type Metrics struct { // Number of blockparts transmitted by peer. BlockParts metrics.Counter + + // Histogram of time taken per step annotated with reason that the step proceeded. + StepTime metrics.Histogram } // PrometheusMetrics returns Metrics build using Prometheus client library. @@ -187,6 +190,12 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "block_parts", Help: "Number of blockparts transmitted by peer.", }, append(labels, "peer_id")).With(labelsAndValues...), + StepTime: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "step_time", + Help: "Time spent per step.", + }, append(labels, "step", "reason")).With(labelsAndValues...), } } diff --git a/internal/consensus/mocks/cons_sync_reactor.go b/internal/consensus/mocks/cons_sync_reactor.go index 263969798..5ac592f0d 100644 --- a/internal/consensus/mocks/cons_sync_reactor.go +++ b/internal/consensus/mocks/cons_sync_reactor.go @@ -4,7 +4,7 @@ package mocks import ( mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/state" + state "github.com/tendermint/tendermint/internal/state" ) // ConsSyncReactor is an autogenerated mock type for the ConsSyncReactor type diff --git a/internal/consensus/mocks/fast_sync_reactor.go b/internal/consensus/mocks/fast_sync_reactor.go index b7f521ff2..9da851065 100644 --- a/internal/consensus/mocks/fast_sync_reactor.go +++ b/internal/consensus/mocks/fast_sync_reactor.go @@ -4,7 +4,7 @@ package mocks import ( mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/state" + state "github.com/tendermint/tendermint/internal/state" time "time" ) diff --git a/internal/consensus/msgs.go b/internal/consensus/msgs.go index 17aef9aa2..052b8f556 100644 --- a/internal/consensus/msgs.go +++ b/internal/consensus/msgs.go @@ -77,7 +77,7 @@ func (m *NewRoundStepMessage) ValidateHeight(initialHeight int64) error { m.LastCommitRound, initialHeight) } if m.Height > initialHeight && m.LastCommitRound < 0 { - return fmt.Errorf("LastCommitRound can only be negative for initial height %v", // nolint + return fmt.Errorf("LastCommitRound can only be negative for initial height %v", initialHeight) } return nil diff --git a/internal/consensus/peer_state.go b/internal/consensus/peer_state.go index 73e61f21c..6a64e8e10 100644 --- a/internal/consensus/peer_state.go +++ b/internal/consensus/peer_state.go @@ -193,7 +193,10 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (*types.Vote, boo } if index, ok := votes.BitArray().Sub(psVotes).PickRandom(); ok { - return votes.GetByIndex(int32(index)), true + vote := votes.GetByIndex(int32(index)) + if vote != nil { + return vote, true + } } return nil, false @@ -358,6 +361,9 @@ func (ps *PeerState) BlockPartsSent() int { // SetHasVote sets the given vote as known by the peer func (ps *PeerState) SetHasVote(vote *types.Vote) { + if vote == nil { + return + } ps.mtx.Lock() defer ps.mtx.Unlock() diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index 915756488..03865d13d 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -1,84 +1,71 @@ package consensus import ( + "context" "fmt" "runtime/debug" "time" cstypes "github.com/tendermint/tendermint/internal/consensus/types" + "github.com/tendermint/tendermint/internal/eventbus" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/bits" tmevents "github.com/tendermint/tendermint/libs/events" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) var ( _ service.Service = (*Reactor)(nil) _ p2p.Wrapper = (*tmcons.Message)(nil) +) - // ChannelShims contains a map of ChannelDescriptorShim objects, where each - // object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding - // p2p proto.Message the new p2p Channel is responsible for handling. - // - // - // TODO: Remove once p2p refactor is complete. - // ref: https://github.com/tendermint/tendermint/issues/5670 - ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - StateChannel: { - MsgType: new(tmcons.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(StateChannel), - Priority: 8, - SendQueueCapacity: 64, - RecvMessageCapacity: maxMsgSize, - RecvBufferCapacity: 128, - MaxSendBytes: 12000, - }, +// GetChannelDescriptor produces an instance of a descriptor for this +// package's required channels. +func GetChannelDescriptors() []*p2p.ChannelDescriptor { + return []*p2p.ChannelDescriptor{ + { + ID: StateChannel, + MessageType: new(tmcons.Message), + Priority: 8, + SendQueueCapacity: 64, + RecvMessageCapacity: maxMsgSize, + RecvBufferCapacity: 128, }, - DataChannel: { - MsgType: new(tmcons.Message), - Descriptor: &p2p.ChannelDescriptor{ - // TODO: Consider a split between gossiping current block and catchup - // stuff. Once we gossip the whole block there is nothing left to send - // until next height or round. - ID: byte(DataChannel), - Priority: 12, - SendQueueCapacity: 64, - RecvBufferCapacity: 512, - RecvMessageCapacity: maxMsgSize, - MaxSendBytes: 40000, - }, + { + // TODO: Consider a split between gossiping current block and catchup + // stuff. Once we gossip the whole block there is nothing left to send + // until next height or round. + ID: DataChannel, + MessageType: new(tmcons.Message), + Priority: 12, + SendQueueCapacity: 64, + RecvBufferCapacity: 512, + RecvMessageCapacity: maxMsgSize, }, - VoteChannel: { - MsgType: new(tmcons.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(VoteChannel), - Priority: 10, - SendQueueCapacity: 64, - RecvBufferCapacity: 128, - RecvMessageCapacity: maxMsgSize, - MaxSendBytes: 150, - }, + { + ID: VoteChannel, + MessageType: new(tmcons.Message), + Priority: 10, + SendQueueCapacity: 64, + RecvBufferCapacity: 128, + RecvMessageCapacity: maxMsgSize, }, - VoteSetBitsChannel: { - MsgType: new(tmcons.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(VoteSetBitsChannel), - Priority: 5, - SendQueueCapacity: 8, - RecvBufferCapacity: 128, - RecvMessageCapacity: maxMsgSize, - MaxSendBytes: 50, - }, + { + ID: VoteSetBitsChannel, + MessageType: new(tmcons.Message), + Priority: 5, + SendQueueCapacity: 8, + RecvBufferCapacity: 128, + RecvMessageCapacity: maxMsgSize, }, } -) +} const ( StateChannel = p2p.ChannelID(0x20) @@ -99,7 +86,7 @@ type ReactorOption func(*Reactor) // NOTE: Temporary interface for switching to block sync, we should get rid of v0. // See: https://github.com/tendermint/tendermint/issues/4595 type BlockSyncReactor interface { - SwitchToBlockSync(sm.State) error + SwitchToBlockSync(context.Context, sm.State) error GetMaxPeerBlockHeight() int64 @@ -125,7 +112,7 @@ type Reactor struct { service.BaseService state *State - eventBus *types.EventBus + eventBus *eventbus.EventBus Metrics *Metrics mtx tmsync.RWMutex @@ -188,7 +175,7 @@ func NewReactor( // envelopes on each. In addition, it also listens for peer updates and handles // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. -func (r *Reactor) OnStart() error { +func (r *Reactor) OnStart(ctx context.Context) error { r.Logger.Debug("consensus wait sync", "wait_sync", r.WaitSync()) // start routine that computes peer statistics for evaluating peer quality @@ -200,7 +187,7 @@ func (r *Reactor) OnStart() error { r.subscribeToBroadcastEvents() if !r.WaitSync() { - if err := r.state.Start(); err != nil { + if err := r.state.Start(ctx); err != nil { return err } } @@ -230,17 +217,15 @@ func (r *Reactor) OnStop() { } r.mtx.Lock() - peers := r.peers + // Close and wait for each of the peers to shutdown. + // This is safe to perform with the lock since none of the peers require the + // lock to complete any of the methods that the waitgroup is waiting on. + for _, state := range r.peers { + state.closer.Close() + state.broadcastWG.Wait() + } r.mtx.Unlock() - // wait for all spawned peer goroutines to gracefully exit - for _, ps := range peers { - ps.closer.Close() - } - for _, ps := range peers { - ps.broadcastWG.Wait() - } - // Close the StateChannel goroutine separately since it uses its own channel // to signal closure. close(r.stateCloseCh) @@ -260,7 +245,7 @@ func (r *Reactor) OnStop() { } // SetEventBus sets the reactor's event bus. -func (r *Reactor) SetEventBus(b *types.EventBus) { +func (r *Reactor) SetEventBus(b *eventbus.EventBus) { r.eventBus = b r.state.SetEventBus(b) } @@ -280,7 +265,7 @@ func ReactorMetrics(metrics *Metrics) ReactorOption { // SwitchToConsensus switches from block-sync mode to consensus mode. It resets // the state, turns off block-sync, and starts the consensus state-machine. -func (r *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) { +func (r *Reactor) SwitchToConsensus(ctx context.Context, state sm.State, skipWAL bool) { r.Logger.Info("switching to consensus") // we have no votes, so reconstruct LastCommit from SeenCommit @@ -303,7 +288,7 @@ func (r *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) { r.state.doWALCatchup = false } - if err := r.state.Start(); err != nil { + if err := r.state.Start(ctx); err != nil { panic(fmt.Sprintf(`failed to start consensus state: %v conS: diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index 8f788d920..696c31816 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -10,28 +10,29 @@ import ( "time" "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/eventbus" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" + sm "github.com/tendermint/tendermint/internal/state" + statemocks "github.com/tendermint/tendermint/internal/state/mocks" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" - sm "github.com/tendermint/tendermint/state" - statemocks "github.com/tendermint/tendermint/state/mocks" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) var ( @@ -42,37 +43,46 @@ type reactorTestSuite struct { network *p2ptest.Network states map[types.NodeID]*State reactors map[types.NodeID]*Reactor - subs map[types.NodeID]types.Subscription - blocksyncSubs map[types.NodeID]types.Subscription + subs map[types.NodeID]eventbus.Subscription + blocksyncSubs map[types.NodeID]eventbus.Subscription stateChannels map[types.NodeID]*p2p.Channel dataChannels map[types.NodeID]*p2p.Channel voteChannels map[types.NodeID]*p2p.Channel voteSetBitsChannels map[types.NodeID]*p2p.Channel } -func chDesc(chID p2p.ChannelID) p2p.ChannelDescriptor { - return p2p.ChannelDescriptor{ - ID: byte(chID), +func chDesc(chID p2p.ChannelID, size int) *p2p.ChannelDescriptor { + return &p2p.ChannelDescriptor{ + ID: chID, + MessageType: new(tmcons.Message), + RecvBufferCapacity: size, } } -func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSuite { +func setup( + ctx context.Context, + t *testing.T, + numNodes int, + states []*State, + size int, +) *reactorTestSuite { t.Helper() rts := &reactorTestSuite{ - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), + network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes}), states: make(map[types.NodeID]*State), reactors: make(map[types.NodeID]*Reactor, numNodes), - subs: make(map[types.NodeID]types.Subscription, numNodes), - blocksyncSubs: make(map[types.NodeID]types.Subscription, numNodes), + subs: make(map[types.NodeID]eventbus.Subscription, numNodes), + blocksyncSubs: make(map[types.NodeID]eventbus.Subscription, numNodes), } - rts.stateChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(StateChannel), new(tmcons.Message), size) - rts.dataChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(DataChannel), new(tmcons.Message), size) - rts.voteChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteChannel), new(tmcons.Message), size) - rts.voteSetBitsChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteSetBitsChannel), new(tmcons.Message), size) + rts.stateChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(StateChannel, size)) + rts.dataChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(DataChannel, size)) + rts.voteChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteChannel, size)) + rts.voteSetBitsChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteSetBitsChannel, size)) - _, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) + // Canceled during cleanup (see below). i := 0 for nodeID, node := range rts.network.Nodes { @@ -85,16 +95,24 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu rts.dataChannels[nodeID], rts.voteChannels[nodeID], rts.voteSetBitsChannels[nodeID], - node.MakePeerUpdates(t), + node.MakePeerUpdates(ctx, t), true, ) reactor.SetEventBus(state.eventBus) - blocksSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, size) + blocksSub, err := state.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: testSubscriber, + Query: types.EventQueryNewBlock, + Limit: size, + }) require.NoError(t, err) - fsSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryBlockSyncStatus, size) + fsSub, err := state.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: testSubscriber, + Query: types.EventQueryBlockSyncStatus, + Limit: size, + }) require.NoError(t, err) rts.states[nodeID] = state @@ -107,7 +125,7 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu require.NoError(t, state.blockExec.Store().Save(state.state)) } - require.NoError(t, reactor.Start()) + require.NoError(t, reactor.Start(ctx)) require.True(t, reactor.IsRunning()) i++ @@ -119,14 +137,8 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu rts.network.Start(t) t.Cleanup(func() { - for nodeID, r := range rts.reactors { - require.NoError(t, rts.states[nodeID].eventBus.Stop()) - require.NoError(t, r.Stop()) - require.False(t, r.IsRunning()) - } - - leaktest.Check(t) cancel() + leaktest.Check(t) }) return rts @@ -150,32 +162,39 @@ func validateBlock(block *types.Block, activeVals map[string]struct{}) error { } func waitForAndValidateBlock( + bctx context.Context, t *testing.T, n int, activeVals map[string]struct{}, - blocksSubs []types.Subscription, + blocksSubs []eventbus.Subscription, states []*State, txs ...[]byte, ) { + t.Helper() + ctx, cancel := context.WithCancel(bctx) + defer cancel() fn := func(j int) { - msg := <-blocksSubs[j].Out() - newBlock := msg.Data().(types.EventDataNewBlock).Block + msg, err := blocksSubs[j].Next(ctx) + if !assert.NoError(t, err) { + cancel() + return + } + newBlock := msg.Data().(types.EventDataNewBlock).Block require.NoError(t, validateBlock(newBlock, activeVals)) for _, tx := range txs { - require.NoError(t, assertMempool(states[j].txNotifier).CheckTx(context.Background(), tx, nil, mempool.TxInfo{})) + require.NoError(t, assertMempool(states[j].txNotifier).CheckTx(ctx, tx, nil, mempool.TxInfo{})) } } var wg sync.WaitGroup - wg.Add(n) - for i := 0; i < n; i++ { + wg.Add(1) go func(j int) { + defer wg.Done() fn(j) - wg.Done() }(i) } @@ -183,21 +202,28 @@ func waitForAndValidateBlock( } func waitForAndValidateBlockWithTx( + bctx context.Context, t *testing.T, n int, activeVals map[string]struct{}, - blocksSubs []types.Subscription, + blocksSubs []eventbus.Subscription, states []*State, txs ...[]byte, ) { + t.Helper() + ctx, cancel := context.WithCancel(bctx) + defer cancel() fn := func(j int) { ntxs := 0 - BLOCK_TX_LOOP: for { - msg := <-blocksSubs[j].Out() - newBlock := msg.Data().(types.EventDataNewBlock).Block + msg, err := blocksSubs[j].Next(ctx) + if !assert.NoError(t, err) { + cancel() + return + } + newBlock := msg.Data().(types.EventDataNewBlock).Block require.NoError(t, validateBlock(newBlock, activeVals)) // check that txs match the txs we're waiting for. @@ -209,18 +235,17 @@ func waitForAndValidateBlockWithTx( } if ntxs == len(txs) { - break BLOCK_TX_LOOP + break } } } var wg sync.WaitGroup - wg.Add(n) - for i := 0; i < n; i++ { + wg.Add(1) go func(j int) { + defer wg.Done() fn(j) - wg.Done() }(i) } @@ -228,22 +253,30 @@ func waitForAndValidateBlockWithTx( } func waitForBlockWithUpdatedValsAndValidateIt( + bctx context.Context, t *testing.T, n int, updatedVals map[string]struct{}, - blocksSubs []types.Subscription, + blocksSubs []eventbus.Subscription, css []*State, ) { + t.Helper() + ctx, cancel := context.WithCancel(bctx) + defer cancel() fn := func(j int) { var newBlock *types.Block - LOOP: for { - msg := <-blocksSubs[j].Out() + msg, err := blocksSubs[j].Next(ctx) + if !assert.NoError(t, err) { + cancel() + return + } + newBlock = msg.Data().(types.EventDataNewBlock).Block if newBlock.LastCommit.Size() == len(updatedVals) { - break LOOP + break } } @@ -251,12 +284,11 @@ func waitForBlockWithUpdatedValsAndValidateIt( } var wg sync.WaitGroup - wg.Add(n) - for i := 0; i < n; i++ { + wg.Add(1) go func(j int) { + defer wg.Done() fn(j) - wg.Done() }(i) } @@ -273,19 +305,22 @@ func ensureBlockSyncStatus(t *testing.T, msg tmpubsub.Message, complete bool, he } func TestReactorBasic(t *testing.T) { - config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg := configSetup(t) n := 4 - states, cleanup := makeConsensusState(t, - config, n, "consensus_reactor_test", + states, cleanup := makeConsensusState(ctx, t, + cfg, n, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) t.Cleanup(cleanup) - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } var wg sync.WaitGroup @@ -293,9 +328,12 @@ func TestReactorBasic(t *testing.T) { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { + go func(s eventbus.Subscription) { defer wg.Done() - <-s.Out() + _, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + } }(sub) } @@ -305,9 +343,13 @@ func TestReactorBasic(t *testing.T) { wg.Add(1) // wait till everyone makes the consensus switch - go func(s types.Subscription) { + go func(s eventbus.Subscription) { defer wg.Done() - msg := <-s.Out() + msg, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + return + } ensureBlockSyncStatus(t, msg, true, 0) }(sub) } @@ -316,7 +358,10 @@ func TestReactorBasic(t *testing.T) { } func TestReactorWithEvidence(t *testing.T) { - config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg := configSetup(t) n := 4 testName := "consensus_reactor_test" @@ -324,7 +369,7 @@ func TestReactorWithEvidence(t *testing.T) { appFunc := newKVStore valSet, privVals := factory.ValidatorSet(n, 30) - genDoc := factory.GenesisDoc(config, time.Now(), valSet.Validators, nil) + genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil) states := make([]*State, n) logger := consensusLogger() @@ -333,7 +378,8 @@ func TestReactorWithEvidence(t *testing.T) { stateStore := sm.NewStore(stateDB) state, err := sm.MakeGenesisState(genDoc) require.NoError(t, err) - thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + require.NoError(t, err) defer os.RemoveAll(thisConfig.RootDir) @@ -347,12 +393,17 @@ func TestReactorWithEvidence(t *testing.T) { blockStore := store.NewBlockStore(blockDB) // one for mempool, one for consensus - mtx := new(tmsync.RWMutex) - proxyAppConnMem := abcicli.NewLocalClient(mtx, app) - proxyAppConnCon := abcicli.NewLocalClient(mtx, app) + mtx := new(tmsync.Mutex) + proxyAppConnMem := abciclient.NewLocalClient(mtx, app) + proxyAppConnCon := abciclient.NewLocalClient(mtx, app) + + mempool := mempool.NewTxMempool( + log.TestingLogger().With("module", "mempool"), + thisConfig.Mempool, + proxyAppConnMem, + 0, + ) - mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) - mempool.SetLogger(log.TestingLogger().With("module", "mempool")) if thisConfig.Consensus.WaitForTxs() { mempool.EnableTxsAvailable() } @@ -361,7 +412,7 @@ func TestReactorWithEvidence(t *testing.T) { // everyone includes evidence of another double signing vIdx := (i + 1) % n - ev := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], config.ChainID()) + ev := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], cfg.ChainID()) evpool := &statemocks.EvidencePool{} evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil) evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return([]types.Evidence{ @@ -371,27 +422,24 @@ func TestReactorWithEvidence(t *testing.T) { evpool2 := sm.EmptyEvidencePool{} blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore) - cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2) - cs.SetLogger(log.TestingLogger().With("module", "consensus")) + cs := NewState(logger.With("validator", i, "module", "consensus"), + thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2) cs.SetPrivValidator(pv) - eventBus := types.NewEventBus() - eventBus.SetLogger(log.TestingLogger().With("module", "events")) - err = eventBus.Start() - require.NoError(t, err) + eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events")) + require.NoError(t, eventBus.Start(ctx)) cs.SetEventBus(eventBus) cs.SetTimeoutTicker(tickerFunc()) - cs.SetLogger(logger.With("validator", i, "module", "consensus")) states[i] = cs } - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } var wg sync.WaitGroup @@ -400,12 +448,16 @@ func TestReactorWithEvidence(t *testing.T) { // We expect for each validator that is the proposer to propose one piece of // evidence. - go func(s types.Subscription) { - msg := <-s.Out() - block := msg.Data().(types.EventDataNewBlock).Block + go func(s eventbus.Subscription) { + defer wg.Done() + msg, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + return + } + block := msg.Data().(types.EventDataNewBlock).Block require.Len(t, block.Evidence.Evidence, 1) - wg.Done() }(sub) } @@ -413,35 +465,39 @@ func TestReactorWithEvidence(t *testing.T) { } func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { - config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg := configSetup(t) n := 4 states, cleanup := makeConsensusState( + ctx, t, - config, + cfg, n, "consensus_reactor_test", newMockTickerFunc(true), newKVStore, - func(c *cfg.Config) { + func(c *config.Config) { c.Consensus.CreateEmptyBlocks = false }, ) t.Cleanup(cleanup) - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } // send a tx require.NoError( t, assertMempool(states[3].txNotifier).CheckTx( - context.Background(), + ctx, []byte{1, 2, 3}, nil, mempool.TxInfo{}, @@ -453,9 +509,12 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { - <-s.Out() - wg.Done() + go func(s eventbus.Subscription) { + defer wg.Done() + _, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + } }(sub) } @@ -463,19 +522,22 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { } func TestReactorRecordsVotesAndBlockParts(t *testing.T) { - config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg := configSetup(t) n := 4 - states, cleanup := makeConsensusState(t, - config, n, "consensus_reactor_test", + states, cleanup := makeConsensusState(ctx, t, + cfg, n, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) t.Cleanup(cleanup) - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } var wg sync.WaitGroup @@ -483,9 +545,12 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { - <-s.Out() - wg.Done() + go func(s eventbus.Subscription) { + defer wg.Done() + _, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + } }(sub) } @@ -522,12 +587,16 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) { } func TestReactorVotingPowerChange(t *testing.T) { - config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg := configSetup(t) n := 4 states, cleanup := makeConsensusState( + ctx, t, - config, + cfg, n, "consensus_voting_power_changes_test", newMockTickerFunc(true), @@ -536,17 +605,17 @@ func TestReactorVotingPowerChange(t *testing.T) { t.Cleanup(cleanup) - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } // map of active validators activeVals := make(map[string]struct{}) for i := 0; i < n; i++ { - pubKey, err := states[i].privValidator.GetPubKey(context.Background()) + pubKey, err := states[i].privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pubKey.Address() @@ -558,32 +627,35 @@ func TestReactorVotingPowerChange(t *testing.T) { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { - <-s.Out() - wg.Done() + go func(s eventbus.Subscription) { + defer wg.Done() + _, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + } }(sub) } wg.Wait() - blocksSubs := []types.Subscription{} + blocksSubs := []eventbus.Subscription{} for _, sub := range rts.subs { blocksSubs = append(blocksSubs, sub) } - val1PubKey, err := states[0].privValidator.GetPubKey(context.Background()) + val1PubKey, err := states[0].privValidator.GetPubKey(ctx) require.NoError(t, err) - val1PubKeyABCI, err := cryptoenc.PubKeyToProto(val1PubKey) + val1PubKeyABCI, err := encoding.PubKeyToProto(val1PubKey) require.NoError(t, err) updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25) previousTotalVotingPower := states[0].GetRoundState().LastValidators.TotalVotingPower() - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states, updateValidatorTx) - waitForAndValidateBlockWithTx(t, n, activeVals, blocksSubs, states, updateValidatorTx) - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states) - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states, updateValidatorTx) + waitForAndValidateBlockWithTx(ctx, t, n, activeVals, blocksSubs, states, updateValidatorTx) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states) require.NotEqualf( t, previousTotalVotingPower, states[0].GetRoundState().LastValidators.TotalVotingPower(), @@ -595,10 +667,10 @@ func TestReactorVotingPowerChange(t *testing.T) { updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2) previousTotalVotingPower = states[0].GetRoundState().LastValidators.TotalVotingPower() - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states, updateValidatorTx) - waitForAndValidateBlockWithTx(t, n, activeVals, blocksSubs, states, updateValidatorTx) - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states) - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states, updateValidatorTx) + waitForAndValidateBlockWithTx(ctx, t, n, activeVals, blocksSubs, states, updateValidatorTx) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states) require.NotEqualf( t, states[0].GetRoundState().LastValidators.TotalVotingPower(), previousTotalVotingPower, @@ -609,10 +681,10 @@ func TestReactorVotingPowerChange(t *testing.T) { updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26) previousTotalVotingPower = states[0].GetRoundState().LastValidators.TotalVotingPower() - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states, updateValidatorTx) - waitForAndValidateBlockWithTx(t, n, activeVals, blocksSubs, states, updateValidatorTx) - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states) - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states, updateValidatorTx) + waitForAndValidateBlockWithTx(ctx, t, n, activeVals, blocksSubs, states, updateValidatorTx) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states) require.NotEqualf( t, previousTotalVotingPower, states[0].GetRoundState().LastValidators.TotalVotingPower(), @@ -623,13 +695,17 @@ func TestReactorVotingPowerChange(t *testing.T) { } func TestReactorValidatorSetChanges(t *testing.T) { - config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg := configSetup(t) nPeers := 7 nVals := 4 states, _, _, cleanup := randConsensusNetWithPeers( + ctx, t, - config, + cfg, nVals, nPeers, "consensus_val_set_changes_test", @@ -638,11 +714,11 @@ func TestReactorValidatorSetChanges(t *testing.T) { ) t.Cleanup(cleanup) - rts := setup(t, nPeers, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, nPeers, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } // map of active validators @@ -659,23 +735,26 @@ func TestReactorValidatorSetChanges(t *testing.T) { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { - <-s.Out() - wg.Done() + go func(s eventbus.Subscription) { + defer wg.Done() + _, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + } }(sub) } wg.Wait() - newValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(context.Background()) + newValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(ctx) require.NoError(t, err) - valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1) + valPubKey1ABCI, err := encoding.PubKeyToProto(newValidatorPubKey1) require.NoError(t, err) newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) - blocksSubs := []types.Subscription{} + blocksSubs := []eventbus.Subscription{} for _, sub := range rts.subs { blocksSubs = append(blocksSubs, sub) } @@ -683,36 +762,36 @@ func TestReactorValidatorSetChanges(t *testing.T) { // wait till everyone makes block 2 // ensure the commit includes all validators // send newValTx to change vals in block 3 - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states, newValidatorTx1) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states, newValidatorTx1) // wait till everyone makes block 3. // it includes the commit for block 2, which is by the original validator set - waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, states, newValidatorTx1) + waitForAndValidateBlockWithTx(ctx, t, nPeers, activeVals, blocksSubs, states, newValidatorTx1) // wait till everyone makes block 4. // it includes the commit for block 3, which is by the original validator set - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states) // the commits for block 4 should be with the updated validator set activeVals[string(newValidatorPubKey1.Address())] = struct{}{} // wait till everyone makes block 5 // it includes the commit for block 4, which should have the updated validator set - waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, states) + waitForBlockWithUpdatedValsAndValidateIt(ctx, t, nPeers, activeVals, blocksSubs, states) - updateValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(context.Background()) + updateValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(ctx) require.NoError(t, err) - updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1) + updatePubKey1ABCI, err := encoding.PubKeyToProto(updateValidatorPubKey1) require.NoError(t, err) updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) previousTotalVotingPower := states[nVals].GetRoundState().LastValidators.TotalVotingPower() - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states, updateValidatorTx1) - waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, states, updateValidatorTx1) - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states) - waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states, updateValidatorTx1) + waitForAndValidateBlockWithTx(ctx, t, nPeers, activeVals, blocksSubs, states, updateValidatorTx1) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states) + waitForBlockWithUpdatedValsAndValidateIt(ctx, t, nPeers, activeVals, blocksSubs, states) require.NotEqualf( t, states[nVals].GetRoundState().LastValidators.TotalVotingPower(), previousTotalVotingPower, @@ -720,40 +799,40 @@ func TestReactorValidatorSetChanges(t *testing.T) { previousTotalVotingPower, states[nVals].GetRoundState().LastValidators.TotalVotingPower(), ) - newValidatorPubKey2, err := states[nVals+1].privValidator.GetPubKey(context.Background()) + newValidatorPubKey2, err := states[nVals+1].privValidator.GetPubKey(ctx) require.NoError(t, err) - newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2) + newVal2ABCI, err := encoding.PubKeyToProto(newValidatorPubKey2) require.NoError(t, err) newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) - newValidatorPubKey3, err := states[nVals+2].privValidator.GetPubKey(context.Background()) + newValidatorPubKey3, err := states[nVals+2].privValidator.GetPubKey(ctx) require.NoError(t, err) - newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3) + newVal3ABCI, err := encoding.PubKeyToProto(newValidatorPubKey3) require.NoError(t, err) newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states, newValidatorTx2, newValidatorTx3) - waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, states, newValidatorTx2, newValidatorTx3) - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states, newValidatorTx2, newValidatorTx3) + waitForAndValidateBlockWithTx(ctx, t, nPeers, activeVals, blocksSubs, states, newValidatorTx2, newValidatorTx3) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states) activeVals[string(newValidatorPubKey2.Address())] = struct{}{} activeVals[string(newValidatorPubKey3.Address())] = struct{}{} - waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, states) + waitForBlockWithUpdatedValsAndValidateIt(ctx, t, nPeers, activeVals, blocksSubs, states) removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0) removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states, removeValidatorTx2, removeValidatorTx3) - waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, states, removeValidatorTx2, removeValidatorTx3) - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states, removeValidatorTx2, removeValidatorTx3) + waitForAndValidateBlockWithTx(ctx, t, nPeers, activeVals, blocksSubs, states, removeValidatorTx2, removeValidatorTx3) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states) delete(activeVals, string(newValidatorPubKey2.Address())) delete(activeVals, string(newValidatorPubKey3.Address())) - waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, states) + waitForBlockWithUpdatedValsAndValidateIt(ctx, t, nPeers, activeVals, blocksSubs, states) } diff --git a/internal/consensus/replay.go b/internal/consensus/replay.go index 9b22f4631..f40389f2b 100644 --- a/internal/consensus/replay.go +++ b/internal/consensus/replay.go @@ -3,6 +3,7 @@ package consensus import ( "bytes" "context" + "errors" "fmt" "hash/crc32" "io" @@ -11,9 +12,10 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -36,7 +38,7 @@ var crc32c = crc32.MakeTable(crc32.Castagnoli) // Unmarshal and apply a single message to the consensus state as if it were // received in receiveRoutine. Lines that start with "#" are ignored. // NOTE: receiveRoutine should not be running. -func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscription) error { +func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub eventbus.Subscription) error { // Skip meta messages which exist for demarcating boundaries. if _, ok := msg.Msg.(EndHeightMessage); ok { return nil @@ -47,18 +49,18 @@ func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscr case types.EventDataRoundState: cs.Logger.Info("Replay: New Step", "height", m.Height, "round", m.Round, "step", m.Step) // these are playback checks - ticker := time.After(time.Second * 2) if newStepSub != nil { - select { - case stepMsg := <-newStepSub.Out(): - m2 := stepMsg.Data().(types.EventDataRoundState) - if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step { - return fmt.Errorf("roundState mismatch. Got %v; Expected %v", m2, m) - } - case <-newStepSub.Canceled(): - return fmt.Errorf("failed to read off newStepSub.Out(). newStepSub was canceled") - case <-ticker: - return fmt.Errorf("failed to read off newStepSub.Out()") + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + stepMsg, err := newStepSub.Next(ctx) + if errors.Is(err, context.DeadlineExceeded) { + return fmt.Errorf("subscription timed out: %w", err) + } else if err != nil { + return fmt.Errorf("subscription canceled: %w", err) + } + m2 := stepMsg.Data().(types.EventDataRoundState) + if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step { + return fmt.Errorf("roundState mismatch. Got %v; Expected %v", m2, m) } } case msgInfo: @@ -209,40 +211,36 @@ type Handshaker struct { nBlocks int // number of blocks applied to the state } -func NewHandshaker(stateStore sm.Store, state sm.State, - store sm.BlockStore, genDoc *types.GenesisDoc) *Handshaker { +func NewHandshaker( + logger log.Logger, + stateStore sm.Store, + state sm.State, + store sm.BlockStore, + eventBus types.BlockEventPublisher, + genDoc *types.GenesisDoc, +) *Handshaker { return &Handshaker{ stateStore: stateStore, initialState: state, store: store, - eventBus: types.NopEventBus{}, + eventBus: eventBus, genDoc: genDoc, - logger: log.NewNopLogger(), + logger: logger, nBlocks: 0, } } -func (h *Handshaker) SetLogger(l log.Logger) { - h.logger = l -} - -// SetEventBus - sets the event bus for publishing block related events. -// If not called, it defaults to types.NopEventBus. -func (h *Handshaker) SetEventBus(eventBus types.BlockEventPublisher) { - h.eventBus = eventBus -} - // NBlocks returns the number of blocks applied to the state. func (h *Handshaker) NBlocks() int { return h.nBlocks } // TODO: retry the handshake/replay if it fails ? -func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { +func (h *Handshaker) Handshake(ctx context.Context, proxyApp proxy.AppConns) error { // Handshake is done via ABCI Info on the query conn. - res, err := proxyApp.Query().InfoSync(context.Background(), proxy.RequestInfo) + res, err := proxyApp.Query().InfoSync(ctx, proxy.RequestInfo) if err != nil { return fmt.Errorf("error calling Info: %v", err) } @@ -266,7 +264,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { } // Replay blocks up to the latest in the blockstore. - _, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp) + _, err = h.ReplayBlocks(ctx, h.initialState, appHash, blockHeight, proxyApp) if err != nil { return fmt.Errorf("error on replay: %v", err) } @@ -283,6 +281,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { // matches the current state. // Returns the final AppHash or an error. func (h *Handshaker) ReplayBlocks( + ctx context.Context, state sm.State, appHash []byte, appBlockHeight int64, @@ -317,7 +316,7 @@ func (h *Handshaker) ReplayBlocks( Validators: nextVals, AppStateBytes: h.genDoc.AppState, } - res, err := proxyApp.Consensus().InitChainSync(context.Background(), req) + res, err := proxyApp.Consensus().InitChainSync(ctx, req) if err != nil { return nil, err } @@ -423,7 +422,7 @@ func (h *Handshaker) ReplayBlocks( if err != nil { return nil, err } - mockApp := newMockProxyApp(appHash, abciResponses) + mockApp := newMockProxyApp(ctx, h.logger, appHash, abciResponses) h.logger.Info("Replay last block using mock app") state, err = h.replayBlock(state, storeBlockHeight, mockApp) return state.AppHash, err diff --git a/internal/consensus/replay_file.go b/internal/consensus/replay_file.go index 51cb090d7..1de0ffa0e 100644 --- a/internal/consensus/replay_file.go +++ b/internal/consensus/replay_file.go @@ -12,13 +12,13 @@ import ( dbm "github.com/tendermint/tm-db" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" - tmos "github.com/tendermint/tendermint/libs/os" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) @@ -31,16 +31,27 @@ const ( // replay messages interactively or all at once // replay the wal file -func RunReplayFile(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig, console bool) { - consensusState := newConsensusStateForReplay(config, csConfig) - - if err := consensusState.ReplayFile(csConfig.WalFile(), console); err != nil { - tmos.Exit(fmt.Sprintf("Error during consensus replay: %v", err)) +func RunReplayFile( + ctx context.Context, + logger log.Logger, + cfg config.BaseConfig, + csConfig *config.ConsensusConfig, + console bool, +) error { + consensusState, err := newConsensusStateForReplay(ctx, cfg, logger, csConfig) + if err != nil { + return err } + + if err := consensusState.ReplayFile(ctx, csConfig.WalFile(), console); err != nil { + return fmt.Errorf("consensus replay: %w", err) + } + + return nil } // Replay msgs in file or start the console -func (cs *State) ReplayFile(file string, console bool) error { +func (cs *State) ReplayFile(ctx context.Context, file string, console bool) error { if cs.IsRunning() { return errors.New("cs is already running, cannot replay") @@ -53,8 +64,10 @@ func (cs *State) ReplayFile(file string, console bool) error { // ensure all new step events are regenerated as expected - ctx := context.Background() - newStepSub, err := cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep) + newStepSub, err := cs.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: subscriber, + Query: types.EventQueryNewRoundStep, + }) if err != nil { return fmt.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep) } @@ -78,7 +91,10 @@ func (cs *State) ReplayFile(file string, console bool) error { var msg *TimedWALMessage for { if nextN == 0 && console { - nextN = pb.replayConsoleLoop() + nextN, err = pb.replayConsoleLoop() + if err != nil { + return err + } } msg, err = pb.dec.Decode() @@ -125,13 +141,13 @@ func newPlayback(fileName string, fp *os.File, cs *State, genState sm.State) *pl } // go back count steps by resetting the state and running (pb.count - count) steps -func (pb *playback) replayReset(count int, newStepSub types.Subscription) error { +func (pb *playback) replayReset(count int, newStepSub eventbus.Subscription) error { if err := pb.cs.Stop(); err != nil { return err } pb.cs.Wait() - newCS := NewState(pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec, + newCS := NewState(pb.cs.Logger, pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec, pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool) newCS.SetEventBus(pb.cs.eventBus) newCS.startForReplay() @@ -180,16 +196,17 @@ func (cs *State) startForReplay() { }()*/ } -// console function for parsing input and running commands -func (pb *playback) replayConsoleLoop() int { +// console function for parsing input and running commands. The integer +// return value is invalid unless the error is nil. +func (pb *playback) replayConsoleLoop() (int, error) { for { fmt.Printf("> ") bufReader := bufio.NewReader(os.Stdin) line, more, err := bufReader.ReadLine() if more { - tmos.Exit("input is too long") + return 0, fmt.Errorf("input is too long") } else if err != nil { - tmos.Exit(err.Error()) + return 0, err } tokens := strings.Split(string(line), " ") @@ -203,13 +220,13 @@ func (pb *playback) replayConsoleLoop() int { // "next N" -> replay next N messages if len(tokens) == 1 { - return 0 + return 0, nil } i, err := strconv.Atoi(tokens[1]) if err != nil { fmt.Println("next takes an integer argument") } else { - return i + return i, nil } case "back": @@ -219,12 +236,15 @@ func (pb *playback) replayConsoleLoop() int { // NOTE: "back" is not supported in the state machine design, // so we restart and replay up to - ctx := context.Background() + ctx := context.TODO() // ensure all new step events are regenerated as expected - newStepSub, err := pb.cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep) + newStepSub, err := pb.cs.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: subscriber, + Query: types.EventQueryNewRoundStep, + }) if err != nil { - tmos.Exit(fmt.Sprintf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep)) + return 0, fmt.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep) } defer func() { args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: types.EventQueryNewRoundStep} @@ -286,56 +306,62 @@ func (pb *playback) replayConsoleLoop() int { //-------------------------------------------------------------------------------- // convenience for replay mode -func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig) *State { - dbType := dbm.BackendType(config.DBBackend) +func newConsensusStateForReplay( + ctx context.Context, + cfg config.BaseConfig, + logger log.Logger, + csConfig *config.ConsensusConfig, +) (*State, error) { + dbType := dbm.BackendType(cfg.DBBackend) // Get BlockStore - blockStoreDB, err := dbm.NewDB("blockstore", dbType, config.DBDir()) + blockStoreDB, err := dbm.NewDB("blockstore", dbType, cfg.DBDir()) if err != nil { - tmos.Exit(err.Error()) + return nil, err } blockStore := store.NewBlockStore(blockStoreDB) // Get State - stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) + stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir()) if err != nil { - tmos.Exit(err.Error()) + return nil, err } + stateStore := sm.NewStore(stateDB) - gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile()) + gdoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) if err != nil { - tmos.Exit(err.Error()) + return nil, err } + state, err := sm.MakeGenesisState(gdoc) if err != nil { - tmos.Exit(err.Error()) + return nil, err } // Create proxyAppConn connection (consensus, mempool, query) - clientCreator, _ := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()) - proxyApp := proxy.NewAppConns(clientCreator) - err = proxyApp.Start() + clientCreator, _ := proxy.DefaultClientCreator(logger, cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) + proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) + err = proxyApp.Start(ctx) if err != nil { - tmos.Exit(fmt.Sprintf("Error starting proxy app conns: %v", err)) + return nil, fmt.Errorf("starting proxy app conns: %w", err) } - eventBus := types.NewEventBus() - if err := eventBus.Start(); err != nil { - tmos.Exit(fmt.Sprintf("Failed to start event bus: %v", err)) + eventBus := eventbus.NewDefault(logger) + if err := eventBus.Start(ctx); err != nil { + return nil, fmt.Errorf("failed to start event bus: %w", err) } - handshaker := NewHandshaker(stateStore, state, blockStore, gdoc) - handshaker.SetEventBus(eventBus) - err = handshaker.Handshake(proxyApp) - if err != nil { - tmos.Exit(fmt.Sprintf("Error on handshake: %v", err)) + handshaker := NewHandshaker(logger, stateStore, state, blockStore, eventBus, gdoc) + + if err = handshaker.Handshake(ctx, proxyApp); err != nil { + return nil, err } mempool, evpool := emptyMempool{}, sm.EmptyEvidencePool{} - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore) + blockExec := sm.NewBlockExecutor(stateStore, logger, proxyApp.Consensus(), mempool, evpool, blockStore) - consensusState := NewState(csConfig, state.Copy(), blockExec, + consensusState := NewState(logger, csConfig, state.Copy(), blockExec, blockStore, mempool, evpool) consensusState.SetEventBus(eventBus) - return consensusState + return consensusState, nil } diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go index c79340a0c..8672f8e1e 100644 --- a/internal/consensus/replay_stubs.go +++ b/internal/consensus/replay_stubs.go @@ -3,11 +3,13 @@ package consensus import ( "context" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/libs/clist" - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/proxy" + "github.com/tendermint/tendermint/libs/log" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -15,22 +17,23 @@ import ( type emptyMempool struct{} -var _ mempl.Mempool = emptyMempool{} +var _ mempool.Mempool = emptyMempool{} func (emptyMempool) Lock() {} func (emptyMempool) Unlock() {} func (emptyMempool) Size() int { return 0 } -func (emptyMempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempl.TxInfo) error { +func (emptyMempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempool.TxInfo) error { return nil } +func (emptyMempool) RemoveTxByKey(txKey types.TxKey) error { return nil } func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } func (emptyMempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } func (emptyMempool) Update( _ int64, _ types.Txs, _ []*abci.ResponseDeliverTx, - _ mempl.PreCheckFunc, - _ mempl.PostCheckFunc, + _ mempool.PreCheckFunc, + _ mempool.PostCheckFunc, ) error { return nil } @@ -52,17 +55,22 @@ func (emptyMempool) CloseWAL() {} // Useful because we don't want to call Commit() twice for the same block on // the real app. -func newMockProxyApp(appHash []byte, abciResponses *tmstate.ABCIResponses) proxy.AppConnConsensus { - clientCreator := proxy.NewLocalClientCreator(&mockProxyApp{ +func newMockProxyApp( + ctx context.Context, + logger log.Logger, + appHash []byte, + abciResponses *tmstate.ABCIResponses, +) proxy.AppConnConsensus { + clientCreator := abciclient.NewLocalCreator(&mockProxyApp{ appHash: appHash, abciResponses: abciResponses, }) - cli, _ := clientCreator.NewABCIClient() - err := cli.Start() + cli, _ := clientCreator(logger) + err := cli.Start(ctx) if err != nil { panic(err) } - return proxy.NewAppConnConsensus(cli) + return proxy.NewAppConnConsensus(cli, proxy.NopMetrics()) } type mockProxyApp struct { diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index 7d65d99ca..61b1ce4cd 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -3,9 +3,9 @@ package consensus import ( "bytes" "context" + "errors" "fmt" "io" - "io/ioutil" "math/rand" "os" "path/filepath" @@ -19,22 +19,25 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + sf "github.com/tendermint/tendermint/internal/state/test/factory" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/pubsub" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/privval" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - sf "github.com/tendermint/tendermint/state/test/factory" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) @@ -53,7 +56,7 @@ import ( // and which ones we need the wal for - then we'd also be able to only flush the // wal writer when we need to, instead of with every message. -func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config, +func startNewStateAndWaitForBlock(ctx context.Context, t *testing.T, consensusReplayConfig *config.Config, lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) { logger := log.TestingLogger() state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) @@ -61,18 +64,19 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Confi privValidator := loadPrivValidator(t, consensusReplayConfig) blockStore := store.NewBlockStore(dbm.NewMemDB()) cs := newStateWithConfigAndBlockStore( + ctx, + logger, consensusReplayConfig, state, privValidator, kvstore.NewApplication(), blockStore, ) - cs.SetLogger(logger) - bytes, _ := ioutil.ReadFile(cs.config.WalFile()) + bytes, _ := os.ReadFile(cs.config.WalFile()) t.Logf("====== WAL: \n\r%X\n", bytes) - err = cs.Start() + err = cs.Start(ctx) require.NoError(t, err) defer func() { if err := cs.Stop(); err != nil { @@ -84,14 +88,18 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Confi // in the WAL itself. Assuming the consensus state is running, replay of any // WAL, including the empty one, should eventually be followed by a new // block, or else something is wrong. - newBlockSub, err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) + newBlockSub, err := cs.eventBus.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: testSubscriber, + Query: types.EventQueryNewBlock, + }) require.NoError(t, err) - select { - case <-newBlockSub.Out(): - case <-newBlockSub.Canceled(): - t.Fatal("newBlockSub was canceled") - case <-time.After(120 * time.Second): + ctx, cancel := context.WithTimeout(ctx, 120*time.Second) + defer cancel() + _, err = newBlockSub.Next(ctx) + if errors.Is(err, context.DeadlineExceeded) { t.Fatal("Timed out waiting for new block (see trace above)") + } else if err != nil { + t.Fatal("newBlockSub was canceled") } } @@ -102,7 +110,7 @@ func sendTxs(ctx context.Context, cs *State) { return default: tx := []byte{byte(i)} - if err := assertMempool(cs.txNotifier).CheckTx(context.Background(), tx, nil, mempl.TxInfo{}); err != nil { + if err := assertMempool(cs.txNotifier).CheckTx(ctx, tx, nil, mempool.TxInfo{}); err != nil { panic(err) } i++ @@ -112,6 +120,9 @@ func sendTxs(ctx context.Context, cs *State) { // TestWALCrash uses crashing WAL to test we can recover from any WAL failure. func TestWALCrash(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + testCases := []struct { name string initFn func(dbm.DB, *State, context.Context) @@ -130,13 +141,14 @@ func TestWALCrash(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - consensusReplayConfig := ResetConfig(tc.name) - crashWALandCheckLiveness(t, consensusReplayConfig, tc.initFn, tc.heightToStop) + consensusReplayConfig, err := ResetConfig(tc.name) + require.NoError(t, err) + crashWALandCheckLiveness(ctx, t, consensusReplayConfig, tc.initFn, tc.heightToStop) }) } } -func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *cfg.Config, +func crashWALandCheckLiveness(ctx context.Context, t *testing.T, consensusReplayConfig *config.Config, initFn func(dbm.DB, *State, context.Context), heightToStop int64) { walPanicked := make(chan error) crashingWal := &crashingWAL{panicCh: walPanicked, heightToStop: heightToStop} @@ -156,16 +168,17 @@ LOOP: require.NoError(t, err) privValidator := loadPrivValidator(t, consensusReplayConfig) cs := newStateWithConfigAndBlockStore( + ctx, + logger, consensusReplayConfig, state, privValidator, kvstore.NewApplication(), blockStore, ) - cs.SetLogger(logger) // start sending transactions - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) initFn(stateDB, cs, ctx) // clean up WAL file from the previous iteration @@ -173,7 +186,7 @@ LOOP: os.Remove(walFile) // set crashing WAL - csWal, err := cs.OpenWAL(walFile) + csWal, err := cs.OpenWAL(ctx, walFile) require.NoError(t, err) crashingWal.next = csWal @@ -182,7 +195,7 @@ LOOP: cs.wal = crashingWal // start consensus state - err = cs.Start() + err = cs.Start(ctx) require.NoError(t, err) i++ @@ -192,7 +205,7 @@ LOOP: t.Logf("WAL panicked: %v", err) // make sure we can make blocks after a crash - startNewStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateStore) + startNewStateAndWaitForBlock(ctx, t, consensusReplayConfig, cs.Height, blockDB, stateStore) // stop consensus state and transactions sender (initFn) cs.Stop() //nolint:errcheck // Logging this error causes failure @@ -278,19 +291,19 @@ func (w *crashingWAL) SearchForEndHeight( return w.next.SearchForEndHeight(height, options) } -func (w *crashingWAL) Start() error { return w.next.Start() } -func (w *crashingWAL) Stop() error { return w.next.Stop() } -func (w *crashingWAL) Wait() { w.next.Wait() } +func (w *crashingWAL) Start(ctx context.Context) error { return w.next.Start(ctx) } +func (w *crashingWAL) Stop() error { return w.next.Stop() } +func (w *crashingWAL) Wait() { w.next.Wait() } //------------------------------------------------------------------------------------------ type simulatorTestSuite struct { GenesisState sm.State - Config *cfg.Config + Config *config.Config Chain []*types.Block Commits []*types.Commit CleanupFunc cleanupFunc - Mempool mempl.Mempool + Mempool mempool.Mempool Evpool sm.EvidencePool } @@ -308,9 +321,9 @@ const ( var modes = []uint{0, 1, 2, 3} // This is actually not a test, it's for storing validator change tx data for testHandshakeReplay -func setupSimulator(t *testing.T) *simulatorTestSuite { +func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite { t.Helper() - config := configSetup(t) + cfg := configSetup(t) sim := &simulatorTestSuite{ Mempool: emptyMempool{}, @@ -320,22 +333,23 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { nPeers := 7 nVals := 4 - css, genDoc, config, cleanup := randConsensusNetWithPeers( + css, genDoc, cfg, cleanup := randConsensusNetWithPeers( + ctx, t, - config, + cfg, nVals, nPeers, "replay_test", newMockTickerFunc(true), newPersistentKVStoreWithPath) - sim.Config = config + sim.Config = cfg sim.GenesisState, _ = sm.MakeGenesisState(genDoc) sim.CleanupFunc = cleanup partSize := types.BlockPartSizeBytes - newRoundCh := subscribe(css[0].eventBus, types.EventQueryNewRound) - proposalCh := subscribe(css[0].eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, css[0].eventBus, types.EventQueryNewRound) + proposalCh := subscribe(ctx, t, css[0].eventBus, types.EventQueryCompleteProposal) vss := make([]*validatorStub, nPeers) for i := 0; i < nPeers; i++ { @@ -344,13 +358,13 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { height, round := css[0].Height, css[0].Round // start the machine - startTestRound(css[0], height, round) + startTestRound(ctx, css[0], height, round) incrementHeight(vss...) ensureNewRound(t, newRoundCh, height, 0) ensureNewProposal(t, proposalCh, height, round) rs := css[0].GetRoundState() - signAddVotes(css[0], tmproto.PrecommitType, sim.Config.ChainID(), + signAddVotes(ctx, css[0], tmproto.PrecommitType, sim.Config.ChainID(), types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()}, vss[1:nVals]...) @@ -359,12 +373,12 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { // HEIGHT 2 height++ incrementHeight(vss...) - newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(context.Background()) + newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(ctx) require.NoError(t, err) - valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1) + valPubKey1ABCI, err := encoding.PubKeyToProto(newValidatorPubKey1) require.NoError(t, err) newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx1, nil, mempl.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(ctx, newValidatorTx1, nil, mempool.TxInfo{}) assert.Nil(t, err) propBlock, _ := css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts := propBlock.MakePartSet(partSize) @@ -372,7 +386,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { proposal := types.NewProposal(vss[1].Height, round, -1, blockID) p := proposal.ToProto() - if err := vss[1].SignProposal(context.Background(), config.ChainID(), p); err != nil { + if err := vss[1].SignProposal(ctx, cfg.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -383,7 +397,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { } ensureNewProposal(t, proposalCh, height, round) rs = css[0].GetRoundState() - signAddVotes(css[0], tmproto.PrecommitType, sim.Config.ChainID(), + signAddVotes(ctx, css[0], tmproto.PrecommitType, sim.Config.ChainID(), types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()}, vss[1:nVals]...) ensureNewRound(t, newRoundCh, height+1, 0) @@ -391,12 +405,12 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { // HEIGHT 3 height++ incrementHeight(vss...) - updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(context.Background()) + updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(ctx) require.NoError(t, err) - updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1) + updatePubKey1ABCI, err := encoding.PubKeyToProto(updateValidatorPubKey1) require.NoError(t, err) updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), updateValidatorTx1, nil, mempl.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(ctx, updateValidatorTx1, nil, mempool.TxInfo{}) assert.Nil(t, err) propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts = propBlock.MakePartSet(partSize) @@ -404,7 +418,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { proposal = types.NewProposal(vss[2].Height, round, -1, blockID) p = proposal.ToProto() - if err := vss[2].SignProposal(context.Background(), config.ChainID(), p); err != nil { + if err := vss[2].SignProposal(ctx, cfg.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -415,7 +429,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { } ensureNewProposal(t, proposalCh, height, round) rs = css[0].GetRoundState() - signAddVotes(css[0], tmproto.PrecommitType, sim.Config.ChainID(), + signAddVotes(ctx, css[0], tmproto.PrecommitType, sim.Config.ChainID(), types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()}, vss[1:nVals]...) ensureNewRound(t, newRoundCh, height+1, 0) @@ -423,19 +437,19 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { // HEIGHT 4 height++ incrementHeight(vss...) - newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey(context.Background()) + newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey(ctx) require.NoError(t, err) - newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2) + newVal2ABCI, err := encoding.PubKeyToProto(newValidatorPubKey2) require.NoError(t, err) newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx2, nil, mempl.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(ctx, newValidatorTx2, nil, mempool.TxInfo{}) assert.Nil(t, err) - newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey(context.Background()) + newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey(ctx) require.NoError(t, err) - newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3) + newVal3ABCI, err := encoding.PubKeyToProto(newValidatorPubKey3) require.NoError(t, err) newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx3, nil, mempl.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(ctx, newValidatorTx3, nil, mempool.TxInfo{}) assert.Nil(t, err) propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts = propBlock.MakePartSet(partSize) @@ -446,10 +460,10 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { valIndexFn := func(cssIdx int) int { for i, vs := range newVss { - vsPubKey, err := vs.GetPubKey(context.Background()) + vsPubKey, err := vs.GetPubKey(ctx) require.NoError(t, err) - cssPubKey, err := css[cssIdx].privValidator.GetPubKey(context.Background()) + cssPubKey, err := css[cssIdx].privValidator.GetPubKey(ctx) require.NoError(t, err) if vsPubKey.Equals(cssPubKey) { @@ -463,7 +477,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { proposal = types.NewProposal(vss[3].Height, round, -1, blockID) p = proposal.ToProto() - if err := vss[3].SignProposal(context.Background(), config.ChainID(), p); err != nil { + if err := vss[3].SignProposal(ctx, cfg.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -475,7 +489,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { ensureNewProposal(t, proposalCh, height, round) removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), removeValidatorTx2, nil, mempl.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(ctx, removeValidatorTx2, nil, mempool.TxInfo{}) assert.Nil(t, err) rs = css[0].GetRoundState() @@ -483,7 +497,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { if i == selfIndex { continue } - signAddVotes(css[0], + signAddVotes(ctx, css[0], tmproto.PrecommitType, sim.Config.ChainID(), types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()}, newVss[i]) @@ -505,7 +519,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { if i == selfIndex { continue } - signAddVotes(css[0], + signAddVotes(ctx, css[0], tmproto.PrecommitType, sim.Config.ChainID(), types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()}, newVss[i]) @@ -516,7 +530,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { height++ incrementHeight(vss...) removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), removeValidatorTx3, nil, mempl.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(ctx, removeValidatorTx3, nil, mempool.TxInfo{}) assert.Nil(t, err) propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts = propBlock.MakePartSet(partSize) @@ -528,7 +542,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { selfIndex = valIndexFn(0) proposal = types.NewProposal(vss[1].Height, round, -1, blockID) p = proposal.ToProto() - if err := vss[1].SignProposal(context.Background(), config.ChainID(), p); err != nil { + if err := vss[1].SignProposal(ctx, cfg.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -543,7 +557,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { if i == selfIndex { continue } - signAddVotes(css[0], + signAddVotes(ctx, css[0], tmproto.PrecommitType, sim.Config.ChainID(), types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()}, newVss[i]) @@ -565,57 +579,72 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { // Sync from scratch func TestHandshakeReplayAll(t *testing.T) { - sim := setupSimulator(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sim := setupSimulator(ctx, t) for _, m := range modes { - testHandshakeReplay(t, sim, 0, m, false) + testHandshakeReplay(ctx, t, sim, 0, m, false) } for _, m := range modes { - testHandshakeReplay(t, sim, 0, m, true) + testHandshakeReplay(ctx, t, sim, 0, m, true) } } // Sync many, not from scratch func TestHandshakeReplaySome(t *testing.T) { - sim := setupSimulator(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sim := setupSimulator(ctx, t) for _, m := range modes { - testHandshakeReplay(t, sim, 2, m, false) + testHandshakeReplay(ctx, t, sim, 2, m, false) } for _, m := range modes { - testHandshakeReplay(t, sim, 2, m, true) + testHandshakeReplay(ctx, t, sim, 2, m, true) } } // Sync from lagging by one func TestHandshakeReplayOne(t *testing.T) { - sim := setupSimulator(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sim := setupSimulator(ctx, t) for _, m := range modes { - testHandshakeReplay(t, sim, numBlocks-1, m, false) + testHandshakeReplay(ctx, t, sim, numBlocks-1, m, false) } for _, m := range modes { - testHandshakeReplay(t, sim, numBlocks-1, m, true) + testHandshakeReplay(ctx, t, sim, numBlocks-1, m, true) } } // Sync from caught up func TestHandshakeReplayNone(t *testing.T) { - sim := setupSimulator(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sim := setupSimulator(ctx, t) for _, m := range modes { - testHandshakeReplay(t, sim, numBlocks, m, false) + testHandshakeReplay(ctx, t, sim, numBlocks, m, false) } for _, m := range modes { - testHandshakeReplay(t, sim, numBlocks, m, true) + testHandshakeReplay(ctx, t, sim, numBlocks, m, true) } } // Test mockProxyApp should not panic when app return ABCIResponses with some empty ResponseDeliverTx func TestMockProxyApp(t *testing.T) { - sim := setupSimulator(t) // setup config and simulator - config := sim.Config - assert.NotNil(t, config) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sim := setupSimulator(ctx, t) // setup config and simulator + cfg := sim.Config + assert.NotNil(t, cfg) logger := log.TestingLogger() var validTxs, invalidTxs = 0, 0 @@ -635,7 +664,7 @@ func TestMockProxyApp(t *testing.T) { err = proto.Unmarshal(bytes, loadedAbciRes) require.NoError(t, err) - mock := newMockProxyApp([]byte("mock_hash"), loadedAbciRes) + mock := newMockProxyApp(ctx, logger, []byte("mock_hash"), loadedAbciRes) abciRes := new(tmstate.ABCIResponses) abciRes.DeliverTxs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTxs)) @@ -659,7 +688,7 @@ func TestMockProxyApp(t *testing.T) { mock.SetResponseCallback(proxyCb) someTx := []byte("tx") - _, err = mock.DeliverTxAsync(context.Background(), abci.RequestDeliverTx{Tx: someTx}) + _, err = mock.DeliverTxAsync(ctx, abci.RequestDeliverTx{Tx: someTx}) assert.NoError(t, err) }) assert.True(t, validTxs == 1) @@ -667,7 +696,7 @@ func TestMockProxyApp(t *testing.T) { } func tempWALWithData(data []byte) string { - walFile, err := ioutil.TempFile("", "wal") + walFile, err := os.CreateTemp("", "wal") if err != nil { panic(fmt.Sprintf("failed to create temp WAL file: %v", err)) } @@ -683,51 +712,60 @@ func tempWALWithData(data []byte) string { // Make some blocks. Start a fresh app and apply nBlocks blocks. // Then restart the app and sync it up with the remaining blocks -func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mode uint, testValidatorsChange bool) { +func testHandshakeReplay( + ctx context.Context, + t *testing.T, + sim *simulatorTestSuite, + nBlocks int, + mode uint, + testValidatorsChange bool, +) { var chain []*types.Block var commits []*types.Commit var store *mockBlockStore var stateDB dbm.DB var genesisState sm.State + var cancel context.CancelFunc - config := sim.Config + ctx, cancel = context.WithCancel(ctx) + t.Cleanup(cancel) + cfg := sim.Config + + logger := log.TestingLogger() if testValidatorsChange { - testConfig := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode)) + testConfig, err := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode)) + require.NoError(t, err) defer func() { _ = os.RemoveAll(testConfig.RootDir) }() stateDB = dbm.NewMemDB() genesisState = sim.GenesisState - config = sim.Config + cfg = sim.Config chain = append([]*types.Block{}, sim.Chain...) // copy chain commits = sim.Commits - store = newMockBlockStore(config, genesisState.ConsensusParams) + store = newMockBlockStore(cfg, genesisState.ConsensusParams) } else { // test single node - testConfig := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode)) + testConfig, err := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode)) + require.NoError(t, err) defer func() { _ = os.RemoveAll(testConfig.RootDir) }() - walBody, err := WALWithNBlocks(t, numBlocks) + walBody, err := WALWithNBlocks(ctx, t, numBlocks) require.NoError(t, err) walFile := tempWALWithData(walBody) - config.Consensus.SetWalFile(walFile) + cfg.Consensus.SetWalFile(walFile) - privVal, err := privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile()) + privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) require.NoError(t, err) - wal, err := NewWAL(walFile) + wal, err := NewWAL(logger, walFile) require.NoError(t, err) - wal.SetLogger(log.TestingLogger()) - err = wal.Start() + err = wal.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := wal.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); wal.Wait() }) chain, commits, err = makeBlockchainFromWAL(wal) require.NoError(t, err) - pubKey, err := privVal.GetPubKey(context.Background()) + pubKey, err := privVal.GetPubKey(ctx) require.NoError(t, err) - stateDB, genesisState, store = stateAndStore(config, pubKey, kvstore.ProtocolVersion) + stateDB, genesisState, store = stateAndStore(cfg, pubKey, kvstore.ProtocolVersion) } stateStore := sm.NewStore(stateDB) @@ -736,24 +774,36 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod state := genesisState.Copy() // run the chain through state.ApplyBlock to build up the tendermint state - state = buildTMStateFromChain(config, sim.Mempool, sim.Evpool, stateStore, state, chain, nBlocks, mode, store) + state = buildTMStateFromChain( + ctx, + cfg, + logger, + sim.Mempool, + sim.Evpool, + stateStore, + state, + chain, + nBlocks, + mode, + store, + ) latestAppHash := state.AppHash // make a new client creator kvstoreApp := kvstore.NewPersistentKVStoreApplication( - filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_a_r%d", nBlocks, mode, rand.Int()))) + filepath.Join(cfg.DBDir(), fmt.Sprintf("replay_test_%d_%d_a_r%d", nBlocks, mode, rand.Int()))) t.Cleanup(func() { require.NoError(t, kvstoreApp.Close()) }) - clientCreator2 := proxy.NewLocalClientCreator(kvstoreApp) + clientCreator2 := abciclient.NewLocalCreator(kvstoreApp) if nBlocks > 0 { // run nBlocks against a new client to build up the app state. // use a throwaway tendermint state - proxyApp := proxy.NewAppConns(clientCreator2) + proxyApp := proxy.NewAppConns(clientCreator2, logger, proxy.NopMetrics()) stateDB1 := dbm.NewMemDB() stateStore := sm.NewStore(stateDB1) err := stateStore.Save(genesisState) require.NoError(t, err) - buildAppStateFromChain(proxyApp, stateStore, sim.Mempool, sim.Evpool, genesisState, chain, nBlocks, mode, store) + buildAppStateFromChain(ctx, proxyApp, stateStore, sim.Mempool, sim.Evpool, genesisState, chain, nBlocks, mode, store) } // Prune block store if requested @@ -766,20 +816,16 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod } // now start the app using the handshake - it should sync - genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) - handshaker := NewHandshaker(stateStore, state, store, genDoc) - proxyApp := proxy.NewAppConns(clientCreator2) - if err := proxyApp.Start(); err != nil { + genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) + handshaker := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc) + proxyApp := proxy.NewAppConns(clientCreator2, logger, proxy.NopMetrics()) + if err := proxyApp.Start(ctx); err != nil { t.Fatalf("Error starting proxy app connections: %v", err) } - t.Cleanup(func() { - if err := proxyApp.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); proxyApp.Wait() }) - err := handshaker.Handshake(proxyApp) + err := handshaker.Handshake(ctx, proxyApp) if expectError { require.Error(t, err) return @@ -788,7 +834,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod } // get the latest app hash from the app - res, err := proxyApp.Query().InfoSync(context.Background(), abci.RequestInfo{Version: ""}) + res, err := proxyApp.Query().InfoSync(ctx, abci.RequestInfo{Version: ""}) if err != nil { t.Fatal(err) } @@ -814,7 +860,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod } func applyBlock(stateStore sm.Store, - mempool mempl.Mempool, + mempool mempool.Mempool, evpool sm.EvidencePool, st sm.State, blk *types.Block, @@ -832,24 +878,25 @@ func applyBlock(stateStore sm.Store, } func buildAppStateFromChain( + ctx context.Context, proxyApp proxy.AppConns, stateStore sm.Store, - mempool mempl.Mempool, + mempool mempool.Mempool, evpool sm.EvidencePool, state sm.State, chain []*types.Block, nBlocks int, mode uint, - blockStore *mockBlockStore) { + blockStore *mockBlockStore, +) { // start a new app without handshake, play nBlocks blocks - if err := proxyApp.Start(); err != nil { + if err := proxyApp.Start(ctx); err != nil { panic(err) } - defer proxyApp.Stop() //nolint:errcheck // ignore state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version validators := types.TM2PB.ValidatorUpdates(state.Validators) - if _, err := proxyApp.Consensus().InitChainSync(context.Background(), abci.RequestInitChain{ + if _, err := proxyApp.Consensus().InitChainSync(ctx, abci.RequestInitChain{ Validators: validators, }); err != nil { panic(err) @@ -881,30 +928,32 @@ func buildAppStateFromChain( } func buildTMStateFromChain( - config *cfg.Config, - mempool mempl.Mempool, + ctx context.Context, + cfg *config.Config, + logger log.Logger, + mempool mempool.Mempool, evpool sm.EvidencePool, stateStore sm.Store, state sm.State, chain []*types.Block, nBlocks int, mode uint, - blockStore *mockBlockStore) sm.State { + blockStore *mockBlockStore, +) sm.State { // run the whole chain against this client to build up the tendermint state kvstoreApp := kvstore.NewPersistentKVStoreApplication( - filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode))) + filepath.Join(cfg.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode))) defer kvstoreApp.Close() - clientCreator := proxy.NewLocalClientCreator(kvstoreApp) + clientCreator := abciclient.NewLocalCreator(kvstoreApp) - proxyApp := proxy.NewAppConns(clientCreator) - if err := proxyApp.Start(); err != nil { + proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) + if err := proxyApp.Start(ctx); err != nil { panic(err) } - defer proxyApp.Stop() //nolint:errcheck state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version validators := types.TM2PB.ValidatorUpdates(state.Validators) - if _, err := proxyApp.Consensus().InitChainSync(context.Background(), abci.RequestInitChain{ + if _, err := proxyApp.Consensus().InitChainSync(ctx, abci.RequestInitChain{ Validators: validators, }); err != nil { panic(err) @@ -941,40 +990,43 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { // - 0x01 // - 0x02 // - 0x03 - config := ResetConfig("handshake_test_") - t.Cleanup(func() { os.RemoveAll(config.RootDir) }) - privVal, err := privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := ResetConfig("handshake_test_") + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(cfg.RootDir) }) + privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) require.NoError(t, err) const appVersion = 0x0 - pubKey, err := privVal.GetPubKey(context.Background()) + pubKey, err := privVal.GetPubKey(ctx) require.NoError(t, err) - stateDB, state, store := stateAndStore(config, pubKey, appVersion) + stateDB, state, store := stateAndStore(cfg, pubKey, appVersion) stateStore := sm.NewStore(stateDB) - genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) + genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) state.LastValidators = state.Validators.Copy() // mode = 0 for committing all the blocks blocks := sf.MakeBlocks(3, &state, privVal) store.chain = blocks + logger := log.TestingLogger() + // 2. Tendermint must panic if app returns wrong hash for the first block // - RANDOM HASH // - 0x02 // - 0x03 { app := &badApp{numBlocks: 3, allHashesAreWrong: true} - clientCreator := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(clientCreator) - err := proxyApp.Start() + clientCreator := abciclient.NewLocalCreator(app) + proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := proxyApp.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); proxyApp.Wait() }) assert.Panics(t, func() { - h := NewHandshaker(stateStore, state, store, genDoc) - if err = h.Handshake(proxyApp); err != nil { + h := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc) + if err = h.Handshake(ctx, proxyApp); err != nil { t.Log(err) } }) @@ -986,19 +1038,15 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { // - RANDOM HASH { app := &badApp{numBlocks: 3, onlyLastHashIsWrong: true} - clientCreator := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(clientCreator) - err := proxyApp.Start() + clientCreator := abciclient.NewLocalCreator(app) + proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := proxyApp.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); proxyApp.Wait() }) assert.Panics(t, func() { - h := NewHandshaker(stateStore, state, store, genDoc) - if err = h.Handshake(proxyApp); err != nil { + h := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc) + if err = h.Handshake(ctx, proxyApp); err != nil { t.Log(err) } }) @@ -1071,7 +1119,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { // if its not the first one, we have a full block if thisBlockParts != nil { var pbb = new(tmproto.Block) - bz, err := ioutil.ReadAll(thisBlockParts.GetReader()) + bz, err := io.ReadAll(thisBlockParts.GetReader()) if err != nil { panic(err) } @@ -1110,7 +1158,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { } } // grab the last block too - bz, err := ioutil.ReadAll(thisBlockParts.GetReader()) + bz, err := io.ReadAll(thisBlockParts.GetReader()) if err != nil { panic(err) } @@ -1156,14 +1204,14 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} { // fresh state and mock store func stateAndStore( - config *cfg.Config, + cfg *config.Config, pubKey crypto.PubKey, appVersion uint64) (dbm.DB, sm.State, *mockBlockStore) { stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) - state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile()) + state, _ := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) state.Version.Consensus.App = appVersion - store := newMockBlockStore(config, state.ConsensusParams) + store := newMockBlockStore(cfg, state.ConsensusParams) if err := stateStore.Save(state); err != nil { panic(err) } @@ -1174,7 +1222,7 @@ func stateAndStore( // mock block store type mockBlockStore struct { - config *cfg.Config + cfg *config.Config params types.ConsensusParams chain []*types.Block commits []*types.Commit @@ -1182,8 +1230,8 @@ type mockBlockStore struct { } // TODO: NewBlockStore(db.NewMemDB) ... -func newMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore { - return &mockBlockStore{config, params, nil, nil, 0} +func newMockBlockStore(cfg *config.Config, params types.ConsensusParams) *mockBlockStore { + return &mockBlockStore{cfg, params, nil, nil, 0} } func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) } @@ -1226,37 +1274,41 @@ func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) { // Test handshake/init chain func TestHandshakeUpdatesValidators(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + votePower := 10 + int64(rand.Uint32()) val, _ := factory.Validator(votePower) + vals := types.NewValidatorSet([]*types.Validator{val}) app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)} - clientCreator := proxy.NewLocalClientCreator(app) + clientCreator := abciclient.NewLocalCreator(app) - config := ResetConfig("handshake_test_") - t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) }) + cfg, err := ResetConfig("handshake_test_") + require.NoError(t, err) + t.Cleanup(func() { _ = os.RemoveAll(cfg.RootDir) }) - privVal, err := privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile()) + privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) require.NoError(t, err) - pubKey, err := privVal.GetPubKey(context.Background()) + pubKey, err := privVal.GetPubKey(ctx) require.NoError(t, err) - stateDB, state, store := stateAndStore(config, pubKey, 0x0) + stateDB, state, store := stateAndStore(cfg, pubKey, 0x0) stateStore := sm.NewStore(stateDB) oldValAddr := state.Validators.Validators[0].Address // now start the app using the handshake - it should sync - genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) - handshaker := NewHandshaker(stateStore, state, store, genDoc) - proxyApp := proxy.NewAppConns(clientCreator) - if err := proxyApp.Start(); err != nil { + genDoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) + require.NoError(t, err) + + logger := log.TestingLogger() + handshaker := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc) + proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) + if err := proxyApp.Start(ctx); err != nil { t.Fatalf("Error starting proxy app connections: %v", err) } - t.Cleanup(func() { - if err := proxyApp.Stop(); err != nil { - t.Error(err) - } - }) - if err := handshaker.Handshake(proxyApp); err != nil { + + if err := handshaker.Handshake(ctx, proxyApp); err != nil { t.Fatalf("Error on abci handshake: %v", err) } // reload the state, check the validator set was updated diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 2388f4d77..270a0b568 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -5,18 +5,20 @@ import ( "context" "errors" "fmt" - "io/ioutil" + "io" "os" "runtime/debug" "time" "github.com/gogo/protobuf/proto" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" cstypes "github.com/tendermint/tendermint/internal/consensus/types" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/libs/fail" tmsync "github.com/tendermint/tendermint/internal/libs/sync" + sm "github.com/tendermint/tendermint/internal/state" tmevents "github.com/tendermint/tendermint/libs/events" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" @@ -27,7 +29,6 @@ import ( "github.com/tendermint/tendermint/privval" tmgrpc "github.com/tendermint/tendermint/privval/grpc" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -80,7 +81,7 @@ type State struct { service.BaseService // config details - config *cfg.ConsensusConfig + config *config.ConsensusConfig privValidator types.PrivValidator // for signing votes privValidatorType types.PrivValidatorType @@ -117,7 +118,7 @@ type State struct { // we use eventBus to trigger msg broadcasts in the reactor, // and to notify external subscribers, eg. through a websocket - eventBus *types.EventBus + eventBus *eventbus.EventBus // a Write-Ahead Log ensures we can recover from any kind of crash // and helps us avoid signing conflicting votes @@ -152,7 +153,8 @@ type StateOption func(*State) // NewState returns a new State. func NewState( - config *cfg.ConsensusConfig, + logger log.Logger, + cfg *config.ConsensusConfig, state sm.State, blockExec *sm.BlockExecutor, blockStore sm.BlockStore, @@ -161,13 +163,13 @@ func NewState( options ...StateOption, ) *State { cs := &State{ - config: config, + config: cfg, blockExec: blockExec, blockStore: blockStore, txNotifier: txNotifier, peerMsgQueue: make(chan msgInfo, msgQueueSize), internalMsgQueue: make(chan msgInfo, msgQueueSize), - timeoutTicker: NewTimeoutTicker(), + timeoutTicker: NewTimeoutTicker(logger), statsMsgQueue: make(chan msgInfo, msgQueueSize), done: make(chan struct{}), doWALCatchup: true, @@ -192,7 +194,7 @@ func NewState( // NOTE: we do not call scheduleRound0 yet, we do that upon Start() - cs.BaseService = *service.NewBaseService(nil, "State", cs) + cs.BaseService = *service.NewBaseService(logger, "State", cs) for _, option := range options { option(cs) } @@ -200,14 +202,8 @@ func NewState( return cs } -// SetLogger implements Service. -func (cs *State) SetLogger(l log.Logger) { - cs.BaseService.Logger = l - cs.timeoutTicker.SetLogger(l) -} - // SetEventBus sets event bus. -func (cs *State) SetEventBus(b *types.EventBus) { +func (cs *State) SetEventBus(b *eventbus.EventBus) { cs.eventBus = b cs.blockExec.SetEventBus(b) } @@ -241,8 +237,12 @@ func (cs *State) GetLastHeight() int64 { // GetRoundState returns a shallow copy of the internal consensus state. func (cs *State) GetRoundState() *cstypes.RoundState { cs.mtx.RLock() + defer cs.mtx.RUnlock() + + // NOTE: this might be dodgy, as RoundState itself isn't thread + // safe as it contains a number of pointers and is explicitly + // not thread safe. rs := cs.RoundState // copy - cs.mtx.RUnlock() return &rs } @@ -329,11 +329,11 @@ func (cs *State) LoadCommit(height int64) *types.Commit { // OnStart loads the latest state via the WAL, and starts the timeout and // receive routines. -func (cs *State) OnStart() error { +func (cs *State) OnStart(ctx context.Context) error { // We may set the WAL in testing before calling Start, so only OpenWAL if its // still the nilWAL. if _, ok := cs.wal.(nilWAL); ok { - if err := cs.loadWalFile(); err != nil { + if err := cs.loadWalFile(ctx); err != nil { return err } } @@ -384,13 +384,13 @@ func (cs *State) OnStart() error { cs.Logger.Info("successful WAL repair") // reload WAL file - if err := cs.loadWalFile(); err != nil { + if err := cs.loadWalFile(ctx); err != nil { return err } } } - if err := cs.evsw.Start(); err != nil { + if err := cs.evsw.Start(ctx); err != nil { return err } @@ -399,7 +399,7 @@ func (cs *State) OnStart() error { // NOTE: we will get a build up of garbage go routines // firing on the tockChan until the receiveRoutine is started // to deal with them (by that point, at most one will be valid) - if err := cs.timeoutTicker.Start(); err != nil { + if err := cs.timeoutTicker.Start(ctx); err != nil { return err } @@ -420,8 +420,8 @@ func (cs *State) OnStart() error { // timeoutRoutine: receive requests for timeouts on tickChan and fire timeouts on tockChan // receiveRoutine: serializes processing of proposoals, block parts, votes; coordinates state transitions -func (cs *State) startRoutines(maxSteps int) { - err := cs.timeoutTicker.Start() +func (cs *State) startRoutines(ctx context.Context, maxSteps int) { + err := cs.timeoutTicker.Start(ctx) if err != nil { cs.Logger.Error("failed to start timeout ticker", "err", err) return @@ -431,8 +431,8 @@ func (cs *State) startRoutines(maxSteps int) { } // loadWalFile loads WAL data from file. It overwrites cs.wal. -func (cs *State) loadWalFile() error { - wal, err := cs.OpenWAL(cs.config.WalFile()) +func (cs *State) loadWalFile(ctx context.Context) error { + wal, err := cs.OpenWAL(ctx, cs.config.WalFile()) if err != nil { cs.Logger.Error("failed to load state WAL", "err", err) return err @@ -457,11 +457,15 @@ func (cs *State) OnStop() { close(cs.onStopCh) if err := cs.evsw.Stop(); err != nil { - cs.Logger.Error("failed trying to stop eventSwitch", "error", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + cs.Logger.Error("failed trying to stop eventSwitch", "error", err) + } } if err := cs.timeoutTicker.Stop(); err != nil { - cs.Logger.Error("failed trying to stop timeoutTicket", "error", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + cs.Logger.Error("failed trying to stop timeoutTicket", "error", err) + } } // WAL is stopped in receiveRoutine. } @@ -475,16 +479,14 @@ func (cs *State) Wait() { // OpenWAL opens a file to log all consensus messages and timeouts for // deterministic accountability. -func (cs *State) OpenWAL(walFile string) (WAL, error) { - wal, err := NewWAL(walFile) +func (cs *State) OpenWAL(ctx context.Context, walFile string) (WAL, error) { + wal, err := NewWAL(cs.Logger.With("wal", walFile), walFile) if err != nil { cs.Logger.Error("failed to open WAL", "file", walFile, "err", err) return nil, err } - wal.SetLogger(cs.Logger.With("wal", walFile)) - - if err := wal.Start(); err != nil { + if err := wal.Start(ctx); err != nil { cs.Logger.Error("failed to start WAL", "err", err) return nil, err } @@ -764,7 +766,9 @@ func (cs *State) receiveRoutine(maxSteps int) { // close wal now that we're done writing to it if err := cs.wal.Stop(); err != nil { - cs.Logger.Error("failed trying to stop WAL", "error", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + cs.Logger.Error("failed trying to stop WAL", "error", err) + } } cs.wal.Wait() @@ -1925,7 +1929,7 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID ) } if added && cs.ProposalBlockParts.IsComplete() { - bz, err := ioutil.ReadAll(cs.ProposalBlockParts.GetReader()) + bz, err := io.ReadAll(cs.ProposalBlockParts.GetReader()) if err != nil { return added, err } diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index 47e839185..c299dd244 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -13,7 +13,7 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/crypto/tmhash" cstypes "github.com/tendermint/tendermint/internal/consensus/types" - p2pmock "github.com/tendermint/tendermint/internal/p2p/mock" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -63,22 +63,25 @@ x * TestHalt1 - if we see +2/3 precommits after timing out into new round, we sh // ProposeSuite func TestStateProposerSelection0(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config := configSetup(t) - cs1, vss := makeState(config, 4) + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 4) + require.NoError(t, err) height, round := cs1.Height, cs1.Round - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) // Wait for new round so proposer is set. ensureNewRound(t, newRoundCh, height, round) // Commit a block and ensure proposer for the next height is correct. prop := cs1.GetRoundState().Validators.GetProposer() - pv, err := cs1.privValidator.GetPubKey(context.Background()) + pv, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) address := pv.Address() if !bytes.Equal(prop.Address, address) { @@ -89,7 +92,7 @@ func TestStateProposerSelection0(t *testing.T) { ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{ + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{ Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header(), }, vss[1:]...) @@ -98,7 +101,7 @@ func TestStateProposerSelection0(t *testing.T) { ensureNewRound(t, newRoundCh, height+1, 0) prop = cs1.GetRoundState().Validators.GetProposer() - pv1, err := vss[1].GetPubKey(context.Background()) + pv1, err := vss[1].GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() if !bytes.Equal(prop.Address, addr) { @@ -110,23 +113,28 @@ func TestStateProposerSelection0(t *testing.T) { func TestStateProposerSelection2(t *testing.T) { config := configSetup(t) - cs1, vss := makeState(config, 4) // test needs more work for more than 3 validators + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 4) // test needs more work for more than 3 validators + require.NoError(t, err) + height := cs1.Height - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) // this time we jump in at round 2 incrementRound(vss[1:]...) incrementRound(vss[1:]...) var round int32 = 2 - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(t, newRoundCh, height, round) // wait for the new round // everyone just votes nil. we get a new proposer each round for i := int32(0); int(i) < len(vss); i++ { prop := cs1.GetRoundState().Validators.GetProposer() - pvk, err := vss[int(i+round)%len(vss)].GetPubKey(context.Background()) + pvk, err := vss[int(i+round)%len(vss)].GetPubKey(ctx) require.NoError(t, err) addr := pvk.Address() correctProposer := addr @@ -137,7 +145,7 @@ func TestStateProposerSelection2(t *testing.T) { prop.Address)) } - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vss[1:]...) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vss[1:]...) ensureNewRound(t, newRoundCh, height, i+round+1) // wait for the new round event each round incrementRound(vss[1:]...) } @@ -147,15 +155,18 @@ func TestStateProposerSelection2(t *testing.T) { // a non-validator should timeout into the prevote round func TestStateEnterProposeNoPrivValidator(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs, _ := makeState(config, 1) + cs, _, err := makeState(ctx, config, log.TestingLogger(), 1) + require.NoError(t, err) cs.SetPrivValidator(nil) height, round := cs.Height, cs.Round // Listen for propose timeout event - timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) + timeoutCh := subscribe(ctx, t, cs.eventBus, types.EventQueryTimeoutPropose) - startTestRound(cs, height, round) + startTestRound(ctx, cs, height, round) // if we're not a validator, EnterPropose should timeout ensureNewTimeout(t, timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds()) @@ -168,17 +179,20 @@ func TestStateEnterProposeNoPrivValidator(t *testing.T) { // a validator should not timeout of the prevote round (TODO: unless the block is really big!) func TestStateEnterProposeYesPrivValidator(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs, _ := makeState(config, 1) + cs, _, err := makeState(ctx, config, log.TestingLogger(), 1) + require.NoError(t, err) height, round := cs.Height, cs.Round // Listen for propose timeout event - timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) - proposalCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) + timeoutCh := subscribe(ctx, t, cs.eventBus, types.EventQueryTimeoutPropose) + proposalCh := subscribe(ctx, t, cs.eventBus, types.EventQueryCompleteProposal) cs.enterNewRound(height, round) - cs.startRoutines(3) + cs.startRoutines(ctx, 3) ensureNewProposal(t, proposalCh, height, round) @@ -200,15 +214,18 @@ func TestStateEnterProposeYesPrivValidator(t *testing.T) { func TestStateBadProposal(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := makeState(config, 2) + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 2) + require.NoError(t, err) height, round := cs1.Height, cs1.Round vs2 := vss[1] partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + voteCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryVote) propBlock, _ := cs1.createProposalBlock() // changeProposer(t, cs1, vs2) @@ -227,7 +244,7 @@ func TestStateBadProposal(t *testing.T) { blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} proposal := types.NewProposal(vs2.Height, round, -1, blockID) p := proposal.ToProto() - if err := vs2.SignProposal(context.Background(), config.ChainID(), p); err != nil { + if err := vs2.SignProposal(ctx, config.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } @@ -239,37 +256,40 @@ func TestStateBadProposal(t *testing.T) { } // start the machine - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) // wait for proposal ensureProposal(t, proposalCh, height, round, blockID) // wait for prevote ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + validatePrevote(ctx, t, cs1, round, vss[0], nil) // add bad prevote from vs2 and wait for it - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2) ensurePrevote(t, voteCh, height, round) // wait for precommit ensurePrecommit(t, voteCh, height, round) - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs2) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs2) } func TestStateOversizedBlock(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := makeState(config, 2) + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 2) + require.NoError(t, err) cs1.state.ConsensusParams.Block.MaxBytes = 2000 height, round := cs1.Height, cs1.Round vs2 := vss[1] partSize := types.BlockPartSizeBytes - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + voteCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryVote) propBlock, _ := cs1.createProposalBlock() propBlock.Data.Txs = []types.Tx{tmrand.Bytes(2001)} @@ -283,7 +303,7 @@ func TestStateOversizedBlock(t *testing.T) { blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} proposal := types.NewProposal(height, round, -1, blockID) p := proposal.ToProto() - if err := vs2.SignProposal(context.Background(), config.ChainID(), p); err != nil { + if err := vs2.SignProposal(ctx, config.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -299,7 +319,7 @@ func TestStateOversizedBlock(t *testing.T) { } // start the machine - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) t.Log("Block Sizes", "Limit", cs1.state.ConsensusParams.Block.MaxBytes, "Current", totalBytes) @@ -310,12 +330,12 @@ func TestStateOversizedBlock(t *testing.T) { // and then should send nil prevote and precommit regardless of whether other validators prevote and // precommit on it ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2) + validatePrevote(ctx, t, cs1, round, vss[0], nil) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2) ensurePrevote(t, voteCh, height, round) ensurePrecommit(t, voteCh, height, round) - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs2) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs2) } //---------------------------------------------------------------------------------------------------- @@ -324,8 +344,12 @@ func TestStateOversizedBlock(t *testing.T) { // propose, prevote, and precommit a block func TestStateFullRound1(t *testing.T) { config := configSetup(t) + logger := log.TestingLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs, vss := makeState(config, 1) + cs, vss, err := makeState(ctx, config, log.TestingLogger(), 1) + require.NoError(t, err) height, round := cs.Height, cs.Round // NOTE: buffer capacity of 0 ensures we can validate prevote and last commit @@ -333,19 +357,19 @@ func TestStateFullRound1(t *testing.T) { if err := cs.eventBus.Stop(); err != nil { t.Error(err) } - eventBus := types.NewEventBusWithBufferCapacity(0) - eventBus.SetLogger(log.TestingLogger().With("module", "events")) + eventBus := eventbus.NewDefault(logger.With("module", "events")) + cs.SetEventBus(eventBus) - if err := eventBus.Start(); err != nil { + if err := eventBus.Start(ctx); err != nil { t.Error(err) } - voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote) - propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) - newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) + voteCh := subscribe(ctx, t, cs.eventBus, types.EventQueryVote) + propCh := subscribe(ctx, t, cs.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewRound) // Maybe it would be better to call explicitly startRoutines(4) - startTestRound(cs, height, round) + startTestRound(ctx, cs, height, round) ensureNewRound(t, newRoundCh, height, round) @@ -353,49 +377,55 @@ func TestStateFullRound1(t *testing.T) { propBlockHash := cs.GetRoundState().ProposalBlock.Hash() ensurePrevote(t, voteCh, height, round) // wait for prevote - validatePrevote(t, cs, round, vss[0], propBlockHash) + validatePrevote(ctx, t, cs, round, vss[0], propBlockHash) ensurePrecommit(t, voteCh, height, round) // wait for precommit // we're going to roll right into new height ensureNewRound(t, newRoundCh, height+1, 0) - validateLastPrecommit(t, cs, vss[0], propBlockHash) + validateLastPrecommit(ctx, t, cs, vss[0], propBlockHash) } // nil is proposed, so prevote and precommit nil func TestStateFullRoundNil(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs, vss := makeState(config, 1) + cs, vss, err := makeState(ctx, config, log.TestingLogger(), 1) + require.NoError(t, err) height, round := cs.Height, cs.Round - voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote) + voteCh := subscribe(ctx, t, cs.eventBus, types.EventQueryVote) cs.enterPrevote(height, round) - cs.startRoutines(4) + cs.startRoutines(ctx, 4) ensurePrevote(t, voteCh, height, round) // prevote ensurePrecommit(t, voteCh, height, round) // precommit // should prevote and precommit nil - validatePrevoteAndPrecommit(t, cs, round, -1, vss[0], nil, nil) + validatePrevoteAndPrecommit(ctx, t, cs, round, -1, vss[0], nil, nil) } // run through propose, prevote, precommit commit with two validators // where the first validator has to wait for votes from the second func TestStateFullRound2(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := makeState(config, 2) + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 2) + require.NoError(t, err) vs2 := vss[1] height, round := cs1.Height, cs1.Round - voteCh := subscribeUnBuffered(cs1.eventBus, types.EventQueryVote) - newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) + voteCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryVote) + newBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewBlock) // start round and wait for propose and prevote - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensurePrevote(t, voteCh, height, round) // prevote @@ -404,17 +434,17 @@ func TestStateFullRound2(t *testing.T) { blockID := types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()} // prevote arrives from vs2: - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2) ensurePrevote(t, voteCh, height, round) // prevote ensurePrecommit(t, voteCh, height, round) // precommit // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, vss[0], blockID.Hash, blockID.Hash) + validatePrecommit(ctx, t, cs1, 0, 0, vss[0], blockID.Hash, blockID.Hash) // we should be stuck in limbo waiting for more precommits // precommit arrives from vs2: - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs2) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs2) ensurePrecommit(t, voteCh, height, round) // wait to finish commit, propose in next height @@ -428,18 +458,21 @@ func TestStateFullRound2(t *testing.T) { // two vals take turns proposing. val1 locks on first one, precommits nil on everything else func TestStateLock_NoPOL(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := makeState(config, 2) + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 2) + require.NoError(t, err) vs2 := vss[1] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - voteCh := subscribeUnBuffered(cs1.eventBus, types.EventQueryVote) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + voteCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryVote) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) /* Round1 (cs1, B) // B B // B B2 @@ -447,7 +480,7 @@ func TestStateLock_NoPOL(t *testing.T) { // start round and wait for prevote cs1.enterNewRound(height, round) - cs1.startRoutines(0) + cs1.startRoutines(ctx, 0) ensureNewRound(t, newRoundCh, height, round) @@ -462,20 +495,20 @@ func TestStateLock_NoPOL(t *testing.T) { // we should now be stuck in limbo forever, waiting for more prevotes // prevote arrives from vs2: - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), initialBlockID, vs2) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), initialBlockID, vs2) ensurePrevote(t, voteCh, height, round) // prevote - validatePrevote(t, cs1, round, vss[0], initialBlockID.Hash) + validatePrevote(ctx, t, cs1, round, vss[0], initialBlockID.Hash) // the proposed block should now be locked and our precommit added ensurePrecommit(t, voteCh, height, round) - validatePrecommit(t, cs1, round, round, vss[0], initialBlockID.Hash, initialBlockID.Hash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], initialBlockID.Hash, initialBlockID.Hash) // we should now be stuck in limbo forever, waiting for more precommits // lets add one for a different block hash := make([]byte, len(initialBlockID.Hash)) copy(hash, initialBlockID.Hash) hash[0] = (hash[0] + 1) % 255 - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{ + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{ Hash: hash, PartSetHeader: initialBlockID.PartSetHeader, }, vs2) @@ -507,11 +540,11 @@ func TestStateLock_NoPOL(t *testing.T) { // we should have prevoted nil since we did not see a proposal in the round. ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + validatePrevote(ctx, t, cs1, round, vss[0], nil) // add a conflicting prevote from the other validator conflictingBlockID := types.BlockID{Hash: hash, PartSetHeader: rs.LockedBlock.MakePartSet(partSize).Header()} - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), conflictingBlockID, vs2) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), conflictingBlockID, vs2) ensurePrevote(t, voteCh, height, round) // now we're going to enter prevote again, but with invalid args @@ -521,10 +554,10 @@ func TestStateLock_NoPOL(t *testing.T) { // the proposed block should still be locked block. // we should precommit nil and be locked on the proposal. ensurePrecommit(t, voteCh, height, round) - validatePrecommit(t, cs1, round, 0, vss[0], nil, initialBlockID.Hash) + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, initialBlockID.Hash) // add conflicting precommit from vs2 - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), conflictingBlockID, vs2) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), conflictingBlockID, vs2) ensurePrecommit(t, voteCh, height, round) // (note we're entering precommit for a second time this round, but with invalid args @@ -552,18 +585,19 @@ func TestStateLock_NoPOL(t *testing.T) { } ensurePrevote(t, voteCh, height, round) // prevote - validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) + validatePrevote(ctx, t, cs1, round, vss[0], rs.LockedBlock.Hash()) newBlockID := types.BlockID{Hash: hash, PartSetHeader: rs.ProposalBlock.MakePartSet(partSize).Header()} - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), newBlockID, vs2) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), newBlockID, vs2) ensurePrevote(t, voteCh, height, round) ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) ensurePrecommit(t, voteCh, height, round) // precommit - validatePrecommit(t, cs1, round, 0, vss[0], nil, initialBlockID.Hash) // precommit nil but be locked on proposal + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, initialBlockID.Hash) // precommit nil but be locked on proposal signAddVotes( + ctx, cs1, tmproto.PrecommitType, config.ChainID(), @@ -574,9 +608,10 @@ func TestStateLock_NoPOL(t *testing.T) { ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) - cs2, _ := makeState(config, 2) // needed so generated block is different than locked block + cs2, _, err := makeState(ctx, config, log.TestingLogger(), 2) // needed so generated block is different than locked block + require.NoError(t, err) // before we time out into new round, set next proposal block - prop, propBlock := decideProposal(t, cs2, vs2, vs2.Height, vs2.Round+1) + prop, propBlock := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round+1) if prop == nil || propBlock == nil { t.Fatal("Failed to create proposal block with vs2") } @@ -604,17 +639,18 @@ func TestStateLock_NoPOL(t *testing.T) { // prevote for nil since we did not see a proposal for our locked block in the round. ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, 3, vss[0], nil) + validatePrevote(ctx, t, cs1, 3, vss[0], nil) // prevote for proposed block - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), propBlockID, vs2) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), propBlockID, vs2) ensurePrevote(t, voteCh, height, round) ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) ensurePrecommit(t, voteCh, height, round) - validatePrecommit(t, cs1, round, 0, vss[0], nil, initialBlockID.Hash) // precommit nil but locked on proposal + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, initialBlockID.Hash) // precommit nil but locked on proposal signAddVotes( + ctx, cs1, tmproto.PrecommitType, config.ChainID(), @@ -632,20 +668,25 @@ func TestStateLock_NoPOL(t *testing.T) { func TestStateLock_POLUpdateLock(t *testing.T) { config := configSetup(t) - cs1, vss := makeState(config, 4) + logger := log.TestingLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cs1, vss, err := makeState(ctx, config, logger, 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) - lockCh := subscribe(cs1.eventBus, types.EventQueryLock) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(ctx, t, cs1, addr) + lockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryLock) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) /* Round 0: @@ -657,7 +698,7 @@ func TestStateLock_POLUpdateLock(t *testing.T) { */ t.Log("### Starting Round 0") - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(t, newRoundCh, height, round) ensureNewProposal(t, proposalCh, height, round) @@ -669,17 +710,17 @@ func TestStateLock_POLUpdateLock(t *testing.T) { ensurePrevote(t, voteCh, height, round) - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), initialBlockID, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), initialBlockID, vs2, vs3, vs4) // check that the validator generates a Lock event. ensureLock(t, lockCh, height, round) // the proposed block should now be locked and our precommit added. ensurePrecommit(t, voteCh, height, round) - validatePrecommit(t, cs1, round, round, vss[0], initialBlockID.Hash, initialBlockID.Hash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], initialBlockID.Hash, initialBlockID.Hash) // add precommits from the rest of the validators. - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) // timeout to new round. ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -697,8 +738,9 @@ func TestStateLock_POLUpdateLock(t *testing.T) { round++ // Generate a new proposal block. - cs2 := newState(cs1.state, vs2, kvstore.NewApplication()) - propR1, propBlockR1 := decideProposal(t, cs2, vs2, vs2.Height, vs2.Round) + cs2, err := newState(ctx, logger, cs1.state, vs2, kvstore.NewApplication()) + assert.NoError(t, err) + propR1, propBlockR1 := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round) propBlockR1Parts := propBlockR1.MakePartSet(partSize) propBlockR1Hash := propBlockR1.Hash() r1BlockID := types.BlockID{ @@ -717,10 +759,10 @@ func TestStateLock_POLUpdateLock(t *testing.T) { // Prevote our nil since the proposal does not match our locked block. ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + validatePrevote(ctx, t, cs1, round, vss[0], nil) // Add prevotes from the remainder of the validators for the new locked block. - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), r1BlockID, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), r1BlockID, vs2, vs3, vs4) // Check that we lock on a new block. ensureLock(t, lockCh, height, round) @@ -729,7 +771,7 @@ func TestStateLock_POLUpdateLock(t *testing.T) { // We should now be locked on the new block and prevote it since we saw a sufficient amount // prevote for the block. - validatePrecommit(t, cs1, round, round, vss[0], propBlockR1Hash, propBlockR1Hash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], propBlockR1Hash, propBlockR1Hash) } // TestStateLock_POLRelock tests that a validator updates its locked round if @@ -737,20 +779,23 @@ func TestStateLock_POLUpdateLock(t *testing.T) { // for a block that it is already locked in. func TestStateLock_POLRelock(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := makeState(config, 4) + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) - lockCh := subscribe(cs1.eventBus, types.EventQueryLock) - relockCh := subscribe(cs1.eventBus, types.EventQueryRelock) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(ctx, t, cs1, addr) + lockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryLock) + relockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryRelock) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) /* Round 0: @@ -762,7 +807,7 @@ func TestStateLock_POLRelock(t *testing.T) { */ t.Log("### Starting Round 0") - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(t, newRoundCh, height, round) ensureNewProposal(t, proposalCh, height, round) @@ -776,17 +821,17 @@ func TestStateLock_POLRelock(t *testing.T) { ensurePrevote(t, voteCh, height, round) - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) // check that the validator generates a Lock event. ensureLock(t, lockCh, height, round) // the proposed block should now be locked and our precommit added. ensurePrecommit(t, voteCh, height, round) - validatePrecommit(t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) // add precommits from the rest of the validators. - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) // timeout to new round. ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -819,10 +864,10 @@ func TestStateLock_POLRelock(t *testing.T) { // Prevote our locked block since it matches the propsal seen in this round. ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], blockID.Hash) + validatePrevote(ctx, t, cs1, round, vss[0], blockID.Hash) // Add prevotes from the remainder of the validators for the locked block. - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) // Check that we relock. ensureRelock(t, relockCh, height, round) @@ -830,26 +875,29 @@ func TestStateLock_POLRelock(t *testing.T) { ensurePrecommit(t, voteCh, height, round) // We should now be locked on the same block but with an updated locked round. - validatePrecommit(t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) } // TestStateLock_PrevoteNilWhenLockedAndMissProposal tests that a validator prevotes nil // if it is locked on a block and misses the proposal in a round. func TestStateLock_PrevoteNilWhenLockedAndMissProposal(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := makeState(config, 4) + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) - lockCh := subscribe(cs1.eventBus, types.EventQueryLock) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(ctx, t, cs1, addr) + lockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryLock) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) /* Round 0: @@ -861,7 +909,7 @@ func TestStateLock_PrevoteNilWhenLockedAndMissProposal(t *testing.T) { */ t.Log("### Starting Round 0") - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(t, newRoundCh, height, round) ensureNewProposal(t, proposalCh, height, round) @@ -873,17 +921,17 @@ func TestStateLock_PrevoteNilWhenLockedAndMissProposal(t *testing.T) { ensurePrevote(t, voteCh, height, round) - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) // check that the validator generates a Lock event. ensureLock(t, lockCh, height, round) // the proposed block should now be locked and our precommit added. ensurePrecommit(t, voteCh, height, round) - validatePrecommit(t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) // add precommits from the rest of the validators. - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) // timeout to new round. ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -904,19 +952,21 @@ func TestStateLock_PrevoteNilWhenLockedAndMissProposal(t *testing.T) { // Prevote our nil. ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + validatePrevote(ctx, t, cs1, round, vss[0], nil) // Add prevotes from the remainder of the validators nil. - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) ensurePrecommit(t, voteCh, height, round) // We should now be locked on the same block but with an updated locked round. - validatePrecommit(t, cs1, round, 0, vss[0], nil, blockID.Hash) + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, blockID.Hash) } // TestStateLock_PrevoteNilWhenLockedAndMissProposal tests that a validator prevotes nil // if it is locked on a block and misses the proposal in a round. func TestStateLock_PrevoteNilWhenLockedAndDifferentProposal(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() /* All of the assertions in this test occur on the `cs1` validator. The test sends signed votes from the other validators to cs1 and @@ -924,18 +974,19 @@ func TestStateLock_PrevoteNilWhenLockedAndDifferentProposal(t *testing.T) { state. */ - cs1, vss := makeState(config, 4) + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) - lockCh := subscribe(cs1.eventBus, types.EventQueryLock) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(ctx, t, cs1, addr) + lockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryLock) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) /* Round 0: @@ -946,7 +997,7 @@ func TestStateLock_PrevoteNilWhenLockedAndDifferentProposal(t *testing.T) { This ensures that cs1 will lock on B in this round but not precommit it. */ t.Log("### Starting Round 0") - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(t, newRoundCh, height, round) ensureNewProposal(t, proposalCh, height, round) @@ -958,17 +1009,17 @@ func TestStateLock_PrevoteNilWhenLockedAndDifferentProposal(t *testing.T) { ensurePrevote(t, voteCh, height, round) - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) // check that the validator generates a Lock event. ensureLock(t, lockCh, height, round) // the proposed block should now be locked and our precommit added. ensurePrecommit(t, voteCh, height, round) - validatePrecommit(t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) // add precommits from the rest of the validators. - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) // timeout to new round. ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -985,8 +1036,9 @@ func TestStateLock_PrevoteNilWhenLockedAndDifferentProposal(t *testing.T) { t.Log("### Starting Round 1") incrementRound(vs2, vs3, vs4) round++ - cs2 := newState(cs1.state, vs2, kvstore.NewApplication()) - propR1, propBlockR1 := decideProposal(t, cs2, vs2, vs2.Height, vs2.Round) + cs2, err := newState(ctx, log.TestingLogger(), cs1.state, vs2, kvstore.NewApplication()) + assert.NoError(t, err) + propR1, propBlockR1 := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round) propBlockR1Parts := propBlockR1.MakePartSet(types.BlockPartSizeBytes) propBlockR1Hash := propBlockR1.Hash() require.NotEqual(t, propBlockR1Hash, blockID.Hash) @@ -999,14 +1051,14 @@ func TestStateLock_PrevoteNilWhenLockedAndDifferentProposal(t *testing.T) { // Prevote our nil. ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + validatePrevote(ctx, t, cs1, round, vss[0], nil) // Add prevotes from the remainder of the validators for nil. - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) // We should now be locked on the same block but prevote nil. ensurePrecommit(t, voteCh, height, round) - validatePrecommit(t, cs1, round, 0, vss[0], nil, blockID.Hash) + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, blockID.Hash) } // TestStateLock_POLDoesNotUnlock tests that a validator maintains its locked block @@ -1016,25 +1068,29 @@ func TestStateLock_PrevoteNilWhenLockedAndDifferentProposal(t *testing.T) { // that it has been completely removed. func TestStateLock_POLDoesNotUnlock(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() /* All of the assertions in this test occur on the `cs1` validator. The test sends signed votes from the other validators to cs1 and cs1's state is then examined to verify that it now matches the expected state. */ + logger := log.TestingLogger() - cs1, vss := makeState(config, 4) + cs1, vss, err := makeState(ctx, config, logger, 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - lockCh := subscribe(cs1.eventBus, types.EventQueryLock) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + lockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryLock) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) /* Round 0: @@ -1047,7 +1103,7 @@ func TestStateLock_POLDoesNotUnlock(t *testing.T) { t.Log("#### ONTO ROUND 0") // start round and wait for propose and prevote - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(t, newRoundCh, height, round) ensureNewProposal(t, proposalCh, height, round) @@ -1058,9 +1114,9 @@ func TestStateLock_POLDoesNotUnlock(t *testing.T) { } ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], blockID.Hash) + validatePrevote(ctx, t, cs1, round, vss[0], blockID.Hash) - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) // the validator should have locked a block in this round. ensureLock(t, lockCh, height, round) @@ -1068,15 +1124,15 @@ func TestStateLock_POLDoesNotUnlock(t *testing.T) { ensurePrecommit(t, voteCh, height, round) // the proposed block should now be locked and our should be for this locked block. - validatePrecommit(t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) // Add precommits from the other validators. // We only issue 1/2 Precommits for the block in this round. // This ensures that the validator being tested does not commit the block. // We do not want the validator to commit the block because we want the test // test to proceeds to the next consensus round. - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs4) - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs3) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs4) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs3) // timeout to new round ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -1090,8 +1146,9 @@ func TestStateLock_POLDoesNotUnlock(t *testing.T) { t.Log("#### ONTO ROUND 1") round++ incrementRound(vs2, vs3, vs4) - cs2 := newState(cs1.state, vs2, kvstore.NewApplication()) - prop, propBlock := decideProposal(t, cs2, vs2, vs2.Height, vs2.Round) + cs2, err := newState(ctx, logger, cs1.state, vs2, kvstore.NewApplication()) + assert.NoError(t, err) + prop, propBlock := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round) propBlockParts := propBlock.MakePartSet(types.BlockPartSizeBytes) require.NotEqual(t, propBlock.Hash(), blockID.Hash) if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, ""); err != nil { @@ -1104,17 +1161,17 @@ func TestStateLock_POLDoesNotUnlock(t *testing.T) { // Prevote for nil since the proposed block does not match our locked block. ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + validatePrevote(ctx, t, cs1, round, vss[0], nil) // add >2/3 prevotes for nil from all other validators - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) ensurePrecommit(t, voteCh, height, round) // verify that we haven't update our locked block since the first round - validatePrecommit(t, cs1, round, 0, vss[0], nil, blockID.Hash) + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, blockID.Hash) - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) /* @@ -1126,8 +1183,9 @@ func TestStateLock_POLDoesNotUnlock(t *testing.T) { t.Log("#### ONTO ROUND 2") round++ incrementRound(vs2, vs3, vs4) - cs3 := newState(cs1.state, vs2, kvstore.NewApplication()) - prop, propBlock = decideProposal(t, cs3, vs3, vs3.Height, vs3.Round) + cs3, err := newState(ctx, logger, cs1.state, vs2, kvstore.NewApplication()) + assert.NoError(t, err) + prop, propBlock = decideProposal(ctx, t, cs3, vs3, vs3.Height, vs3.Round) propBlockParts = propBlock.MakePartSet(types.BlockPartSizeBytes) if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, ""); err != nil { t.Fatal(err) @@ -1136,18 +1194,19 @@ func TestStateLock_POLDoesNotUnlock(t *testing.T) { ensureNewRound(t, newRoundCh, height, round) ensureNewProposal(t, proposalCh, height, round) - // Prevote for nil since the proposal does not match our locked block. ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + validatePrevote(ctx, t, cs1, round, vss[0], nil) - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) ensurePrecommit(t, voteCh, height, round) // verify that we haven't update our locked block since the first round - validatePrecommit(t, cs1, round, 0, vss[0], nil, blockID.Hash) + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, blockID.Hash) + // verify that we haven't update our locked block since the first round + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, blockID.Hash) } // TestStateLock_MissingProposalWhenPOLSeenDoesNotUnlock tests that observing @@ -1155,20 +1214,24 @@ func TestStateLock_POLDoesNotUnlock(t *testing.T) { // new block if a proposal was not seen for that block. func TestStateLock_MissingProposalWhenPOLSeenDoesNotUpdateLock(t *testing.T) { config := configSetup(t) + logger := log.TestingLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := makeState(config, 4) + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(ctx, t, cs1, addr) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) /* Round 0: cs1 creates a proposal for block B. @@ -1178,7 +1241,7 @@ func TestStateLock_MissingProposalWhenPOLSeenDoesNotUpdateLock(t *testing.T) { This ensures that cs1 will lock on B in this round but not precommit it. */ t.Log("### Starting Round 0") - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(t, newRoundCh, height, round) ensureNewProposal(t, proposalCh, height, round) @@ -1190,14 +1253,14 @@ func TestStateLock_MissingProposalWhenPOLSeenDoesNotUpdateLock(t *testing.T) { ensurePrevote(t, voteCh, height, round) // prevote - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), firstBlockID, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), firstBlockID, vs2, vs3, vs4) ensurePrecommit(t, voteCh, height, round) // our precommit // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], firstBlockID.Hash, firstBlockID.Hash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], firstBlockID.Hash, firstBlockID.Hash) // add precommits from the rest - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) // timeout to new round ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -1212,8 +1275,9 @@ func TestStateLock_MissingProposalWhenPOLSeenDoesNotUpdateLock(t *testing.T) { t.Log("### Starting Round 1") incrementRound(vs2, vs3, vs4) round++ - cs2 := newState(cs1.state, vs2, kvstore.NewApplication()) - prop, propBlock := decideProposal(t, cs2, vs2, vs2.Height, vs2.Round) + cs2, err := newState(ctx, logger, cs1.state, vs2, kvstore.NewApplication()) + assert.NoError(t, err) + prop, propBlock := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round) if prop == nil || propBlock == nil { t.Fatal("Failed to create proposal block with vs2") } @@ -1227,13 +1291,13 @@ func TestStateLock_MissingProposalWhenPOLSeenDoesNotUpdateLock(t *testing.T) { // prevote for nil since the proposal was not seen. ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + validatePrevote(ctx, t, cs1, round, vss[0], nil) // now lets add prevotes from everyone else for the new block - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), secondBlockID, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), secondBlockID, vs2, vs3, vs4) ensurePrecommit(t, voteCh, height, round) - validatePrecommit(t, cs1, round, 0, vss[0], nil, firstBlockID.Hash) + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, firstBlockID.Hash) } // TestStateLock_DoesNotLockOnOldProposal tests that observing @@ -1242,18 +1306,21 @@ func TestStateLock_MissingProposalWhenPOLSeenDoesNotUpdateLock(t *testing.T) { // was seen in a previous round. func TestStateLock_DoesNotLockOnOldProposal(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := makeState(config, 4) + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(ctx, t, cs1, addr) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) /* Round 0: cs1 creates a proposal for block B. @@ -1263,7 +1330,7 @@ func TestStateLock_DoesNotLockOnOldProposal(t *testing.T) { This ensures that cs1 will not lock on B. */ t.Log("### Starting Round 0") - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(t, newRoundCh, height, round) ensureNewProposal(t, proposalCh, height, round) @@ -1275,13 +1342,13 @@ func TestStateLock_DoesNotLockOnOldProposal(t *testing.T) { ensurePrevote(t, voteCh, height, round) - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) // The proposed block should not have been locked. ensurePrecommit(t, voteCh, height, round) - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) incrementRound(vs2, vs3, vs4) @@ -1302,14 +1369,14 @@ func TestStateLock_DoesNotLockOnOldProposal(t *testing.T) { ensureNewRound(t, newRoundCh, height, round) ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + validatePrevote(ctx, t, cs1, round, vss[0], nil) // All validators prevote for the old block. - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), firstBlockID, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), firstBlockID, vs2, vs3, vs4) // Make sure that cs1 did not lock on the block since it did not receive a proposal for it. ensurePrecommit(t, voteCh, height, round) - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) } // 4 vals @@ -1318,24 +1385,28 @@ func TestStateLock_DoesNotLockOnOldProposal(t *testing.T) { // then we see the polka from round 1 but shouldn't unlock func TestStateLock_POLSafety1(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.TestingLogger() - cs1, vss := makeState(config, 4) + cs1, vss, err := makeState(ctx, config, logger, 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, round) + startTestRound(ctx, cs1, cs1.Height, round) ensureNewRound(t, newRoundCh, height, round) ensureNewProposal(t, proposalCh, height, round) @@ -1343,15 +1414,15 @@ func TestStateLock_POLSafety1(t *testing.T) { propBlock := rs.ProposalBlock ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlock.Hash()) + validatePrevote(ctx, t, cs1, round, vss[0], propBlock.Hash()) blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlock.MakePartSet(partSize).Header()} // the others sign a polka but we don't see it - prevotes := signVotes(tmproto.PrevoteType, config.ChainID(), + prevotes := signVotes(ctx, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) // we do see them precommit nil - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) // cs1 precommit nil ensurePrecommit(t, voteCh, height, round) @@ -1360,8 +1431,9 @@ func TestStateLock_POLSafety1(t *testing.T) { t.Log("### ONTO ROUND 1") incrementRound(vs2, vs3, vs4) round++ // moving to the next round - cs2 := newState(cs1.state, vs2, kvstore.NewApplication()) - prop, propBlock := decideProposal(t, cs2, vs2, vs2.Height, vs2.Round) + cs2, err := newState(ctx, logger, cs1.state, vs2, kvstore.NewApplication()) + assert.NoError(t, err) + prop, propBlock := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round) propBlockParts := propBlock.MakePartSet(partSize) r2BlockID := types.BlockID{ Hash: propBlock.Hash(), @@ -1389,16 +1461,15 @@ func TestStateLock_POLSafety1(t *testing.T) { // go to prevote, prevote for proposal block ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], r2BlockID.Hash) + validatePrevote(ctx, t, cs1, round, vss[0], r2BlockID.Hash) // now we see the others prevote for it, so we should lock on it - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), r2BlockID, vs2, vs3, vs4) - + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), r2BlockID, vs2, vs3, vs4) ensurePrecommit(t, voteCh, height, round) // we should have precommitted - validatePrecommit(t, cs1, round, round, vss[0], r2BlockID.Hash, r2BlockID.Hash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], r2BlockID.Hash, r2BlockID.Hash) - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -1418,9 +1489,9 @@ func TestStateLock_POLSafety1(t *testing.T) { // finish prevote ensurePrevote(t, voteCh, height, round) // we should prevote for nil - validatePrevote(t, cs1, round, vss[0], nil) + validatePrevote(ctx, t, cs1, round, vss[0], nil) - newStepCh := subscribe(cs1.eventBus, types.EventQueryNewRoundStep) + newStepCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRoundStep) // before prevotes from the previous round are added // add prevotes from the earlier round @@ -1438,33 +1509,36 @@ func TestStateLock_POLSafety1(t *testing.T) { // dont see P0, lock on P1 at R1, dont unlock using P0 at R2 func TestStateLock_POLSafety2(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := makeState(config, 4) + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // the block for R0: gets polkad but we miss it // (even though we signed it, shhh) - _, propBlock0 := decideProposal(t, cs1, vss[0], height, round) + _, propBlock0 := decideProposal(ctx, t, cs1, vss[0], height, round) propBlockHash0 := propBlock0.Hash() propBlockParts0 := propBlock0.MakePartSet(partSize) propBlockID0 := types.BlockID{Hash: propBlockHash0, PartSetHeader: propBlockParts0.Header()} // the others sign a polka but we don't see it - prevotes := signVotes(tmproto.PrevoteType, config.ChainID(), propBlockID0, vs2, vs3, vs4) + prevotes := signVotes(ctx, tmproto.PrevoteType, config.ChainID(), propBlockID0, vs2, vs3, vs4) // the block for round 1 - prop1, propBlock1 := decideProposal(t, cs1, vs2, vs2.Height, vs2.Round+1) + prop1, propBlock1 := decideProposal(ctx, t, cs1, vs2, vs2.Height, vs2.Round+1) propBlockParts1 := propBlock1.MakePartSet(partSize) propBlockID1 := types.BlockID{Hash: propBlock1.Hash(), PartSetHeader: propBlockParts1.Header()} @@ -1473,7 +1547,7 @@ func TestStateLock_POLSafety2(t *testing.T) { round++ // moving to the next round t.Log("### ONTO Round 1") // jump in at round 1 - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(t, newRoundCh, height, round) if err := cs1.SetProposalAndBlock(prop1, propBlock1, propBlockParts1, "some peer"); err != nil { @@ -1482,17 +1556,17 @@ func TestStateLock_POLSafety2(t *testing.T) { ensureNewProposal(t, proposalCh, height, round) ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockID1.Hash) + validatePrevote(ctx, t, cs1, round, vss[0], propBlockID1.Hash) - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), propBlockID1, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), propBlockID1, vs2, vs3, vs4) ensurePrecommit(t, voteCh, height, round) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], propBlockID1.Hash, propBlockID1.Hash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], propBlockID1.Hash, propBlockID1.Hash) // add precommits from the rest - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs4) - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), propBlockID1, vs3) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs4) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), propBlockID1, vs3) incrementRound(vs2, vs3, vs4) @@ -1503,7 +1577,7 @@ func TestStateLock_POLSafety2(t *testing.T) { // in round 2 we see the polkad block from round 0 newProp := types.NewProposal(height, round, 0, propBlockID0) p := newProp.ToProto() - if err := vs3.SignProposal(context.Background(), config.ChainID(), p); err != nil { + if err := vs3.SignProposal(ctx, config.ChainID(), p); err != nil { t.Fatal(err) } @@ -1524,7 +1598,7 @@ func TestStateLock_POLSafety2(t *testing.T) { ensureNewProposal(t, proposalCh, height, round) ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockID1.Hash) + validatePrevote(ctx, t, cs1, round, vss[0], propBlockID1.Hash) } @@ -1533,21 +1607,25 @@ func TestStateLock_POLSafety2(t *testing.T) { // it is not locked on in a previous round. func TestState_PrevotePOLFromPreviousRound(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := makeState(config, 4) + logger := log.TestingLogger() + cs1, vss, err := makeState(ctx, config, logger, 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) - lockCh := subscribe(cs1.eventBus, types.EventQueryLock) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(ctx, t, cs1, addr) + lockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryLock) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) /* Round 0: @@ -1559,7 +1637,7 @@ func TestState_PrevotePOLFromPreviousRound(t *testing.T) { */ t.Log("### Starting Round 0") - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(t, newRoundCh, height, round) ensureNewProposal(t, proposalCh, height, round) @@ -1571,17 +1649,17 @@ func TestState_PrevotePOLFromPreviousRound(t *testing.T) { ensurePrevote(t, voteCh, height, round) - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), r0BlockID, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), r0BlockID, vs2, vs3, vs4) // check that the validator generates a Lock event. ensureLock(t, lockCh, height, round) // the proposed block should now be locked and our precommit added. ensurePrecommit(t, voteCh, height, round) - validatePrecommit(t, cs1, round, round, vss[0], r0BlockID.Hash, r0BlockID.Hash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], r0BlockID.Hash, r0BlockID.Hash) // add precommits from the rest of the validators. - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) // timeout to new round. ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -1599,9 +1677,10 @@ func TestState_PrevotePOLFromPreviousRound(t *testing.T) { incrementRound(vs2, vs3, vs4) round++ // Generate a new proposal block. - cs2 := newState(cs1.state, vs2, kvstore.NewApplication()) + cs2, err := newState(ctx, logger, cs1.state, vs2, kvstore.NewApplication()) + assert.NoError(t, err) cs2.ValidRound = 1 - propR1, propBlockR1 := decideProposal(t, cs2, vs2, vs2.Height, round) + propR1, propBlockR1 := decideProposal(ctx, t, cs2, vs2, vs2.Height, round) t.Log(propR1.POLRound) propBlockR1Parts := propBlockR1.MakePartSet(partSize) r1BlockID := types.BlockID{ @@ -1612,12 +1691,12 @@ func TestState_PrevotePOLFromPreviousRound(t *testing.T) { ensureNewRound(t, newRoundCh, height, round) - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), r1BlockID, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), r1BlockID, vs2, vs3, vs4) ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + validatePrevote(ctx, t, cs1, round, vss[0], nil) - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) ensurePrecommit(t, voteCh, height, round) @@ -1657,14 +1736,14 @@ func TestState_PrevotePOLFromPreviousRound(t *testing.T) { // We should now prevote this block, despite being locked on the block from // round 0. ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], r1BlockID.Hash) + validatePrevote(ctx, t, cs1, round, vss[0], r1BlockID.Hash) - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) // cs1 did not receive a POL within this round, so it should remain locked // on the block from round 0. ensurePrecommit(t, voteCh, height, round) - validatePrecommit(t, cs1, round, 0, vss[0], nil, r0BlockID.Hash) + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, r0BlockID.Hash) } // 4 vals. @@ -1674,24 +1753,27 @@ func TestState_PrevotePOLFromPreviousRound(t *testing.T) { // P0 proposes B0 at R3. func TestProposeValidBlock(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := makeState(config, 4) + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, round) + startTestRound(ctx, cs1, cs1.Height, round) ensureNewRound(t, newRoundCh, height, round) ensureNewProposal(t, proposalCh, height, round) @@ -1703,17 +1785,17 @@ func TestProposeValidBlock(t *testing.T) { } ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], blockID.Hash) + validatePrevote(ctx, t, cs1, round, vss[0], blockID.Hash) // the others sign a polka - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) ensurePrecommit(t, voteCh, height, round) // we should have precommitted the proposed block in this round. - validatePrecommit(t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -1728,19 +1810,19 @@ func TestProposeValidBlock(t *testing.T) { // We did not see a valid proposal within this round, so prevote nil. ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + validatePrevote(ctx, t, cs1, round, vss[0], nil) - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) ensurePrecommit(t, voteCh, height, round) // we should have precommitted nil during this round because we received // >2/3 precommits for nil from the other validators. - validatePrecommit(t, cs1, round, 0, vss[0], nil, blockID.Hash) + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, blockID.Hash) incrementRound(vs2, vs3, vs4) incrementRound(vs2, vs3, vs4) - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) round += 2 // increment by multiple rounds @@ -1766,24 +1848,27 @@ func TestProposeValidBlock(t *testing.T) { // P0 miss to lock B but set valid block to B after receiving delayed prevote. func TestSetValidBlockOnDelayedPrevote(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := makeState(config, 4) + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryValidBlock) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, round) + startTestRound(ctx, cs1, cs1.Height, round) ensureNewRound(t, newRoundCh, height, round) ensureNewProposal(t, proposalCh, height, round) @@ -1795,19 +1880,19 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { } ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], blockID.Hash) + validatePrevote(ctx, t, cs1, round, vss[0], blockID.Hash) // vs2 send prevote for propBlock - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2) // vs3 send prevote nil - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs3) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs3) ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) ensurePrecommit(t, voteCh, height, round) // we should have precommitted - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) rs = cs1.GetRoundState() @@ -1816,7 +1901,7 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { assert.True(t, rs.ValidRound == -1) // vs2 send (delayed) prevote for propBlock - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs4) ensureNewValidBlock(t, validBlockCh, height, round) @@ -1832,48 +1917,51 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { // receiving delayed Block Proposal. func TestSetValidBlockOnDelayedProposal(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := makeState(config, 4) + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryValidBlock) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + voteCh := subscribeToVoter(ctx, t, cs1, addr) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) round++ // move to round in which P0 is not proposer incrementRound(vs2, vs3, vs4) - startTestRound(cs1, cs1.Height, round) + startTestRound(ctx, cs1, cs1.Height, round) ensureNewRound(t, newRoundCh, height, round) ensureNewTimeout(t, timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + validatePrevote(ctx, t, cs1, round, vss[0], nil) - prop, propBlock := decideProposal(t, cs1, vs2, vs2.Height, vs2.Round+1) + prop, propBlock := decideProposal(ctx, t, cs1, vs2, vs2.Height, vs2.Round+1) blockID := types.BlockID{ Hash: propBlock.Hash(), PartSetHeader: propBlock.MakePartSet(partSize).Header(), } // vs2, vs3 and vs4 send prevote for propBlock - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) ensureNewValidBlock(t, validBlockCh, height, round) ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) ensurePrecommit(t, voteCh, height, round) - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) if err := cs1.SetProposalAndBlock(prop, propBlock, propBlock.MakePartSet(partSize), "some peer"); err != nil { t.Fatal(err) @@ -1892,19 +1980,23 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) { // P0 waits for timeoutPrecommit before starting next round func TestWaitingTimeoutOnNilPolka(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 4) + require.NoError(t, err) - cs1, vss := makeState(config, 4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) // start round - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(t, newRoundCh, height, round) - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) ensureNewRound(t, newRoundCh, height, round+1) @@ -1915,26 +2007,29 @@ func TestWaitingTimeoutOnNilPolka(t *testing.T) { // P0 waits for timeoutPropose in the next round before entering prevote func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := makeState(config, 4) + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(t, newRoundCh, height, round) ensurePrevote(t, voteCh, height, round) incrementRound(vss[1:]...) - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) round++ // moving to the next round ensureNewRound(t, newRoundCh, height, round) @@ -1945,7 +2040,7 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Propose(round).Nanoseconds()) ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + validatePrevote(ctx, t, cs1, round, vss[0], nil) } // 4 vals, 3 Precommits for nil from the higher round. @@ -1953,32 +2048,35 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { // P0 jump to higher round, precommit and start precommit wait func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := makeState(config, 4) + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(t, newRoundCh, height, round) ensurePrevote(t, voteCh, height, round) incrementRound(vss[1:]...) - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) round++ // moving to the next round ensureNewRound(t, newRoundCh, height, round) ensurePrecommit(t, voteCh, height, round) - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -1991,37 +2089,43 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { // P0 wait for timeoutPropose to expire before sending prevote. func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := makeState(config, 4) + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, int32(1) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round in which PO is not proposer - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(t, newRoundCh, height, round) incrementRound(vss[1:]...) - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) ensureNewTimeout(t, timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + validatePrevote(ctx, t, cs1, round, vss[0], nil) } // What we want: // P0 emit NewValidBlock event upon receiving 2/3+ Precommit for B but hasn't received block B yet func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := makeState(config, 4) + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, int32(1) @@ -2029,21 +2133,21 @@ func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { partSize := types.BlockPartSizeBytes - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryValidBlock) - _, propBlock := decideProposal(t, cs1, vs2, vs2.Height, vs2.Round) + _, propBlock := decideProposal(ctx, t, cs1, vs2, vs2.Height, vs2.Round) blockID := types.BlockID{ Hash: propBlock.Hash(), PartSetHeader: propBlock.MakePartSet(partSize).Header(), } // start round in which PO is not proposer - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(t, newRoundCh, height, round) // vs2, vs3 and vs4 send precommit for propBlock - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs2, vs3, vs4) ensureNewValidBlock(t, validBlockCh, height, round) rs := cs1.GetRoundState() @@ -2058,29 +2162,32 @@ func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { // After receiving block, it executes block and moves to the next height. func TestCommitFromPreviousRound(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := makeState(config, 4) + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, int32(1) partSize := types.BlockPartSizeBytes - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryValidBlock) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) - prop, propBlock := decideProposal(t, cs1, vs2, vs2.Height, vs2.Round) + prop, propBlock := decideProposal(ctx, t, cs1, vs2, vs2.Height, vs2.Round) blockID := types.BlockID{ Hash: propBlock.Hash(), PartSetHeader: propBlock.MakePartSet(partSize).Header(), } // start round in which PO is not proposer - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(t, newRoundCh, height, round) // vs2, vs3 and vs4 send precommit for propBlock for the previous round - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs2, vs3, vs4) ensureNewValidBlock(t, validBlockCh, height, round) @@ -2115,27 +2222,30 @@ func (n *fakeTxNotifier) Notify() { // start of the next round func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config.Consensus.SkipTimeoutCommit = false - cs1, vss := makeState(config, 4) + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 4) + require.NoError(t, err) cs1.txNotifier = &fakeTxNotifier{ch: make(chan struct{})} vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - precommitTimeoutCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + precommitTimeoutCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + newBlockHeader := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewBlockHeader) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round and wait for propose and prevote - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(t, newRoundCh, height, round) ensureNewProposal(t, proposalCh, height, round) @@ -2146,17 +2256,17 @@ func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { } ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], blockID.Hash) + validatePrevote(ctx, t, cs1, round, vss[0], blockID.Hash) - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) ensurePrecommit(t, voteCh, height, round) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) // add precommits - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2) - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs3) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs3) // wait till timeout occurs ensurePrecommitTimeout(t, precommitTimeoutCh) @@ -2164,7 +2274,7 @@ func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { ensureNewRound(t, newRoundCh, height, round+1) // majority is now reached - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs4) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs4) ensureNewBlockHeader(t, newBlockHeader, height, blockID.Hash) @@ -2180,26 +2290,29 @@ func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config.Consensus.SkipTimeoutCommit = false - cs1, vss := makeState(config, 4) + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + newBlockHeader := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewBlockHeader) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round and wait for propose and prevote - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(t, newRoundCh, height, round) ensureNewProposal(t, proposalCh, height, round) @@ -2210,21 +2323,21 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { } ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], blockID.Hash) + validatePrevote(ctx, t, cs1, round, vss[0], blockID.Hash) - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) ensurePrecommit(t, voteCh, height, round) - validatePrecommit(t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) // add precommits - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2) - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs3) - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs4) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs3) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs4) ensureNewBlockHeader(t, newBlockHeader, height, blockID.Hash) - prop, propBlock := decideProposal(t, cs1, vs2, height+1, 0) + prop, propBlock := decideProposal(ctx, t, cs1, vs2, height+1, 0) propBlockParts := propBlock.MakePartSet(partSize) if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { @@ -2249,13 +2362,13 @@ func TestStateSlashing_Prevotes(t *testing.T) { vs2 := vss[1] - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(ctx, t, cs1, cs1.privValidator.GetAddress()) // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, 0) + startTestRound(ctx, cs1, cs1.Height, 0) <-newRoundCh re := <-proposalCh <-voteCh // prevote @@ -2266,8 +2379,8 @@ func TestStateSlashing_Prevotes(t *testing.T) { // add one for a different block should cause us to go into prevote wait hash := rs.ProposalBlock.Hash() hash[0] = byte(hash[0]+1) % 255 - signAddVotes(tmproto.PrevoteType, hash, config.ChainID(), blockID, - rs.ProposalBlock.Hash(), config.ChainID(), rs.ProposalBlockParts.Header(), vs2) + signAddVotes(tmproto.PrevoteType, hash, cfg.ChainID(), blockID, + rs.ProposalBlock.Hash(), cfg.ChainID(), rs.ProposalBlockParts.Header(), vs2) // XXX: Check for existence of Dupeout info } @@ -2277,24 +2390,24 @@ func TestStateSlashing_Precommits(t *testing.T) { vs2 := vss[1] - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(ctx, t, cs1, cs1.privValidator.GetAddress()) // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, 0) + startTestRound(ctx, cs1, cs1.Height, 0) <-newRoundCh re := <-proposalCh <-voteCh // prevote // add prevote from vs2 - signAddVotes(tmproto.PrevoteType, rs.ProposalBlock.Hash(),config.ChainID(), blockID, waiting for more prevotes + signAddVotes(tmproto.PrevoteType, rs.ProposalBlock.Hash(),cfg.ChainID(), blockID, waiting for more prevotes // add one for a different block should cause us to go into prevote wait hash := rs.ProposalBlock.Hash() hash[0] = byte(hash[0]+1) % 255 - signAddVotes(tmproto.PrecommitType, hash, config.ChainID(), blockID, - rs.ProposalBlock.Hash(), config.ChainID(), rs.ProposalBlockParts.Header(), vs2) + signAddVotes(tmproto.PrecommitType, hash, cfg.ChainID(), blockID, + rs.ProposalBlock.Hash(), cfg.ChainID(), rs.ProposalBlockParts.Header(), vs2) // XXX: Check for existence of Dupeout info } @@ -2310,23 +2423,26 @@ func TestStateSlashing_Precommits(t *testing.T) { // we receive a final precommit after going into next round, but others might have gone to commit already! func TestStateHalt1(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := makeState(config, 4) + cs1, vss, err := makeState(ctx, config, log.TestingLogger(), 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + newBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewBlock) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round and wait for propose and prevote - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(t, newRoundCh, height, round) ensureNewProposal(t, proposalCh, height, round) @@ -2339,17 +2455,17 @@ func TestStateHalt1(t *testing.T) { ensurePrevote(t, voteCh, height, round) - signAddVotes(cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) + signAddVotes(ctx, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) ensurePrecommit(t, voteCh, height, round) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], propBlock.Hash(), propBlock.Hash()) + validatePrecommit(ctx, t, cs1, round, round, vss[0], propBlock.Hash(), propBlock.Hash()) // add precommits from the rest - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2) // didnt receive proposal - signAddVotes(cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs3) + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2) // didnt receive proposal + signAddVotes(ctx, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs3) // we receive this later, but vs3 might receive it earlier and with ours will go to commit! - precommit4 := signVote(vs4, tmproto.PrecommitType, config.ChainID(), blockID) + precommit4 := signVote(ctx, vs4, tmproto.PrecommitType, config.ChainID(), blockID) incrementRound(vs2, vs3, vs4) @@ -2368,7 +2484,7 @@ func TestStateHalt1(t *testing.T) { // prevote for nil since we did not receive a proposal in this round. ensurePrevote(t, voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + validatePrevote(ctx, t, cs1, round, vss[0], nil) // now we receive the precommit from the previous round addVotes(cs1, precommit4) @@ -2381,10 +2497,14 @@ func TestStateHalt1(t *testing.T) { func TestStateOutputsBlockPartsStats(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // create dummy peer - cs, _ := makeState(config, 1) - peer := p2pmock.NewPeer(nil) + cs, _, err := makeState(ctx, config, log.TestingLogger(), 1) + require.NoError(t, err) + peerID, err := types.NewNodeID("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") + require.NoError(t, err) // 1) new block part parts := types.NewPartSetFromData(tmrand.Bytes(100), 10) @@ -2395,26 +2515,26 @@ func TestStateOutputsBlockPartsStats(t *testing.T) { } cs.ProposalBlockParts = types.NewPartSetFromHeader(parts.Header()) - cs.handleMsg(msgInfo{msg, peer.ID()}) + cs.handleMsg(msgInfo{msg, peerID}) statsMessage := <-cs.statsMsgQueue require.Equal(t, msg, statsMessage.Msg, "") - require.Equal(t, peer.ID(), statsMessage.PeerID, "") + require.Equal(t, peerID, statsMessage.PeerID, "") // sending the same part from different peer cs.handleMsg(msgInfo{msg, "peer2"}) // sending the part with the same height, but different round msg.Round = 1 - cs.handleMsg(msgInfo{msg, peer.ID()}) + cs.handleMsg(msgInfo{msg, peerID}) // sending the part from the smaller height msg.Height = 0 - cs.handleMsg(msgInfo{msg, peer.ID()}) + cs.handleMsg(msgInfo{msg, peerID}) // sending the part from the bigger height msg.Height = 3 - cs.handleMsg(msgInfo{msg, peer.ID()}) + cs.handleMsg(msgInfo{msg, peerID}) select { case <-cs.statsMsgQueue: @@ -2426,33 +2546,37 @@ func TestStateOutputsBlockPartsStats(t *testing.T) { func TestStateOutputVoteStats(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs, vss := makeState(config, 2) + cs, vss, err := makeState(ctx, config, log.TestingLogger(), 2) + require.NoError(t, err) // create dummy peer - peer := p2pmock.NewPeer(nil) + peerID, err := types.NewNodeID("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") + require.NoError(t, err) randBytes := tmrand.Bytes(tmhash.Size) blockID := types.BlockID{ Hash: randBytes, } - vote := signVote(vss[1], tmproto.PrecommitType, config.ChainID(), blockID) + vote := signVote(ctx, vss[1], tmproto.PrecommitType, config.ChainID(), blockID) voteMessage := &VoteMessage{vote} - cs.handleMsg(msgInfo{voteMessage, peer.ID()}) + cs.handleMsg(msgInfo{voteMessage, peerID}) statsMessage := <-cs.statsMsgQueue require.Equal(t, voteMessage, statsMessage.Msg, "") - require.Equal(t, peer.ID(), statsMessage.PeerID, "") + require.Equal(t, peerID, statsMessage.PeerID, "") // sending the same part from different peer cs.handleMsg(msgInfo{&VoteMessage{vote}, "peer2"}) // sending the vote for the bigger height incrementHeight(vss[1]) - vote = signVote(vss[1], tmproto.PrecommitType, config.ChainID(), blockID) + vote = signVote(ctx, vss[1], tmproto.PrecommitType, config.ChainID(), blockID) - cs.handleMsg(msgInfo{&VoteMessage{vote}, peer.ID()}) + cs.handleMsg(msgInfo{&VoteMessage{vote}, peerID}) select { case <-cs.statsMsgQueue: @@ -2464,12 +2588,16 @@ func TestStateOutputVoteStats(t *testing.T) { func TestSignSameVoteTwice(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - _, vss := makeState(config, 2) + _, vss, err := makeState(ctx, config, log.TestingLogger(), 2) + require.NoError(t, err) randBytes := tmrand.Bytes(tmhash.Size) - vote := signVote(vss[1], + vote := signVote(ctx, + vss[1], tmproto.PrecommitType, config.ChainID(), types.BlockID{ @@ -2478,7 +2606,9 @@ func TestSignSameVoteTwice(t *testing.T) { }, ) - vote2 := signVote(vss[1], + vote2 := signVote( + ctx, + vss[1], tmproto.PrecommitType, config.ChainID(), @@ -2492,19 +2622,33 @@ func TestSignSameVoteTwice(t *testing.T) { } // subscribe subscribes test client to the given query and returns a channel with cap = 1. -func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message { - sub, err := eventBus.Subscribe(context.Background(), testSubscriber, q) +func subscribe( + ctx context.Context, + t *testing.T, + eventBus *eventbus.EventBus, + q tmpubsub.Query, +) <-chan tmpubsub.Message { + t.Helper() + sub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: testSubscriber, + Query: q, + }) if err != nil { - panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q)) + t.Fatalf("Failed to subscribe %q to %v: %v", testSubscriber, q, err) } - return sub.Out() -} - -// subscribe subscribes test client to the given query and returns a channel with cap = 0. -func subscribeUnBuffered(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message { - sub, err := eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, q) - if err != nil { - panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q)) - } - return sub.Out() + ch := make(chan tmpubsub.Message) + go func() { + for { + next, err := sub.Next(ctx) + if err != nil { + if ctx.Err() != nil { + return + } + t.Errorf("Subscription for %v unexpectedly terminated: %v", q, err) + return + } + ch <- next + } + }() + return ch } diff --git a/internal/consensus/ticker.go b/internal/consensus/ticker.go index fb3571ac8..e8583932d 100644 --- a/internal/consensus/ticker.go +++ b/internal/consensus/ticker.go @@ -1,6 +1,7 @@ package consensus import ( + "context" "time" "github.com/tendermint/tendermint/libs/log" @@ -15,12 +16,10 @@ var ( // conditional on the height/round/step in the timeoutInfo. // The timeoutInfo.Duration may be non-positive. type TimeoutTicker interface { - Start() error + Start(context.Context) error Stop() error Chan() <-chan timeoutInfo // on which to receive a timeout ScheduleTimeout(ti timeoutInfo) // reset the timer - - SetLogger(log.Logger) } // timeoutTicker wraps time.Timer, @@ -37,20 +36,19 @@ type timeoutTicker struct { } // NewTimeoutTicker returns a new TimeoutTicker. -func NewTimeoutTicker() TimeoutTicker { +func NewTimeoutTicker(logger log.Logger) TimeoutTicker { tt := &timeoutTicker{ timer: time.NewTimer(0), tickChan: make(chan timeoutInfo, tickTockBufferSize), tockChan: make(chan timeoutInfo, tickTockBufferSize), } - tt.BaseService = *service.NewBaseService(nil, "TimeoutTicker", tt) + tt.BaseService = *service.NewBaseService(logger, "TimeoutTicker", tt) tt.stopTimer() // don't want to fire until the first scheduled timeout return tt } // OnStart implements service.Service. It starts the timeout routine. -func (t *timeoutTicker) OnStart() error { - +func (t *timeoutTicker) OnStart(gctx context.Context) error { go t.timeoutRoutine() return nil diff --git a/internal/consensus/types/height_vote_set_test.go b/internal/consensus/types/height_vote_set_test.go index 49dc4f46b..6f6211a13 100644 --- a/internal/consensus/types/height_vote_set_test.go +++ b/internal/consensus/types/height_vote_set_test.go @@ -6,7 +6,7 @@ import ( "os" "testing" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/internal/test/factory" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -15,33 +15,40 @@ import ( "github.com/tendermint/tendermint/types" ) -var config *cfg.Config // NOTE: must be reset for each _test.go file +var cfg *config.Config // NOTE: must be reset for each _test.go file func TestMain(m *testing.M) { - config = cfg.ResetTestRoot("consensus_height_vote_set_test") + var err error + cfg, err = config.ResetTestRoot("consensus_height_vote_set_test") + if err != nil { + panic(err) + } code := m.Run() - os.RemoveAll(config.RootDir) + os.RemoveAll(cfg.RootDir) os.Exit(code) } func TestPeerCatchupRounds(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + valSet, privVals := factory.ValidatorSet(10, 1) - hvs := NewHeightVoteSet(config.ChainID(), 1, valSet) + hvs := NewHeightVoteSet(cfg.ChainID(), 1, valSet) - vote999_0 := makeVoteHR(t, 1, 0, 999, privVals) + vote999_0 := makeVoteHR(ctx, t, 1, 0, 999, privVals) added, err := hvs.AddVote(vote999_0, "peer1") if !added || err != nil { t.Error("Expected to successfully add vote from peer", added, err) } - vote1000_0 := makeVoteHR(t, 1, 0, 1000, privVals) + vote1000_0 := makeVoteHR(ctx, t, 1, 0, 1000, privVals) added, err = hvs.AddVote(vote1000_0, "peer1") if !added || err != nil { t.Error("Expected to successfully add vote from peer", added, err) } - vote1001_0 := makeVoteHR(t, 1, 0, 1001, privVals) + vote1001_0 := makeVoteHR(ctx, t, 1, 0, 1001, privVals) added, err = hvs.AddVote(vote1001_0, "peer1") if err != ErrGotVoteFromUnwantedRound { t.Errorf("expected GotVoteFromUnwantedRoundError, but got %v", err) @@ -57,9 +64,15 @@ func TestPeerCatchupRounds(t *testing.T) { } -func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []types.PrivValidator) *types.Vote { +func makeVoteHR( + ctx context.Context, + t *testing.T, + height int64, + valIndex, round int32, + privVals []types.PrivValidator, +) *types.Vote { privVal := privVals[valIndex] - pubKey, err := privVal.GetPubKey(context.Background()) + pubKey, err := privVal.GetPubKey(ctx) if err != nil { panic(err) } @@ -75,10 +88,10 @@ func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []ty Type: tmproto.PrecommitType, BlockID: types.BlockID{Hash: randBytes, PartSetHeader: types.PartSetHeader{}}, } - chainID := config.ChainID() + chainID := cfg.ChainID() v := vote.ToProto() - err = privVal.SignVote(context.Background(), chainID, v) + err = privVal.SignVote(ctx, chainID, v) if err != nil { panic(fmt.Sprintf("Error signing vote: %v", err)) } diff --git a/internal/consensus/wal.go b/internal/consensus/wal.go index 0d9efb839..13f29a202 100644 --- a/internal/consensus/wal.go +++ b/internal/consensus/wal.go @@ -1,6 +1,7 @@ package consensus import ( + "context" "encoding/binary" "errors" "fmt" @@ -63,7 +64,7 @@ type WAL interface { SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) // service methods - Start() error + Start(context.Context) error Stop() error Wait() } @@ -88,7 +89,7 @@ var _ WAL = &BaseWAL{} // NewWAL returns a new write-ahead logger based on `baseWAL`, which implements // WAL. It's flushed and synced to disk every 2s and once when stopped. -func NewWAL(walFile string, groupOptions ...func(*auto.Group)) (*BaseWAL, error) { +func NewWAL(logger log.Logger, walFile string, groupOptions ...func(*auto.Group)) (*BaseWAL, error) { err := tmos.EnsureDir(filepath.Dir(walFile), 0700) if err != nil { return nil, fmt.Errorf("failed to ensure WAL directory is in place: %w", err) @@ -103,7 +104,7 @@ func NewWAL(walFile string, groupOptions ...func(*auto.Group)) (*BaseWAL, error) enc: NewWALEncoder(group), flushInterval: walDefaultFlushInterval, } - wal.BaseService = *service.NewBaseService(nil, "baseWAL", wal) + wal.BaseService = *service.NewBaseService(logger, "baseWAL", wal) return wal, nil } @@ -116,12 +117,7 @@ func (wal *BaseWAL) Group() *auto.Group { return wal.group } -func (wal *BaseWAL) SetLogger(l log.Logger) { - wal.BaseService.Logger = l - wal.group.SetLogger(l) -} - -func (wal *BaseWAL) OnStart() error { +func (wal *BaseWAL) OnStart(ctx context.Context) error { size, err := wal.group.Head.Size() if err != nil { return err @@ -130,7 +126,7 @@ func (wal *BaseWAL) OnStart() error { return err } } - err = wal.group.Start() + err = wal.group.Start(ctx) if err != nil { return err } @@ -164,10 +160,14 @@ func (wal *BaseWAL) FlushAndSync() error { func (wal *BaseWAL) OnStop() { wal.flushTicker.Stop() if err := wal.FlushAndSync(); err != nil { - wal.Logger.Error("error on flush data to disk", "error", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + wal.Logger.Error("error on flush data to disk", "error", err) + } } if err := wal.group.Stop(); err != nil { - wal.Logger.Error("error trying to stop wal", "error", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + wal.Logger.Error("error trying to stop wal", "error", err) + } } wal.group.Close() } @@ -428,6 +428,6 @@ func (nilWAL) FlushAndSync() error { return nil } func (nilWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { return nil, false, nil } -func (nilWAL) Start() error { return nil } -func (nilWAL) Stop() error { return nil } -func (nilWAL) Wait() {} +func (nilWAL) Start(context.Context) error { return nil } +func (nilWAL) Stop() error { return nil } +func (nilWAL) Wait() {} diff --git a/internal/consensus/wal_fuzz.go b/internal/consensus/wal_fuzz.go index e15097c30..06d894a81 100644 --- a/internal/consensus/wal_fuzz.go +++ b/internal/consensus/wal_fuzz.go @@ -1,3 +1,4 @@ +//go:build gofuzz // +build gofuzz package consensus diff --git a/internal/consensus/wal_generator.go b/internal/consensus/wal_generator.go index 81c2125ca..35a539d64 100644 --- a/internal/consensus/wal_generator.go +++ b/internal/consensus/wal_generator.go @@ -3,6 +3,7 @@ package consensus import ( "bufio" "bytes" + "context" "fmt" "io" mrand "math/rand" @@ -11,15 +12,17 @@ import ( "time" "github.com/stretchr/testify/require" - db "github.com/tendermint/tm-db" + dbm "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) @@ -28,10 +31,10 @@ import ( // persistent kvstore application and special consensus wal instance // (byteBufferWAL) and waits until numBlocks are created. // If the node fails to produce given numBlocks, it returns an error. -func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { - config := getConfig(t) +func WALGenerateNBlocks(ctx context.Context, t *testing.T, wr io.Writer, numBlocks int) (err error) { + cfg := getConfig(t) - app := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), "wal_generator")) + app := kvstore.NewPersistentKVStoreApplication(filepath.Join(cfg.DBDir(), "wal_generator")) t.Cleanup(func() { require.NoError(t, app.Close()) }) logger := log.TestingLogger().With("wal_generator", "wal_generator") @@ -40,17 +43,17 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { // COPY PASTE FROM node.go WITH A FEW MODIFICATIONS // NOTE: we can't import node package because of circular dependency. // NOTE: we don't do handshake so need to set state.Version.Consensus.App directly. - privValidatorKeyFile := config.PrivValidator.KeyFile() - privValidatorStateFile := config.PrivValidator.StateFile() + privValidatorKeyFile := cfg.PrivValidator.KeyFile() + privValidatorStateFile := cfg.PrivValidator.StateFile() privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) if err != nil { return err } - genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) + genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) if err != nil { return fmt.Errorf("failed to read genesis file: %w", err) } - blockStoreDB := db.NewMemDB() + blockStoreDB := dbm.NewMemDB() stateDB := blockStoreDB stateStore := sm.NewStore(stateDB) state, err := sm.MakeGenesisState(genDoc) @@ -64,32 +67,20 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { blockStore := store.NewBlockStore(blockStoreDB) - proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app)) - proxyApp.SetLogger(logger.With("module", "proxy")) - if err := proxyApp.Start(); err != nil { + proxyApp := proxy.NewAppConns(abciclient.NewLocalCreator(app), logger.With("module", "proxy"), proxy.NopMetrics()) + if err := proxyApp.Start(ctx); err != nil { return fmt.Errorf("failed to start proxy app connections: %w", err) } - t.Cleanup(func() { - if err := proxyApp.Stop(); err != nil { - t.Error(err) - } - }) - eventBus := types.NewEventBus() - eventBus.SetLogger(logger.With("module", "events")) - if err := eventBus.Start(); err != nil { + eventBus := eventbus.NewDefault(logger.With("module", "events")) + if err := eventBus.Start(ctx); err != nil { return fmt.Errorf("failed to start event bus: %w", err) } - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) + mempool := emptyMempool{} evpool := sm.EmptyEvidencePool{} blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore) - consensusState := NewState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) - consensusState.SetLogger(logger) + consensusState := NewState(logger, cfg.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) consensusState.SetEventBus(eventBus) if privValidator != nil && privValidator != (*privval.FilePV)(nil) { consensusState.SetPrivValidator(privValidator) @@ -106,7 +97,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { consensusState.wal = wal - if err := consensusState.Start(); err != nil { + if err := consensusState.Start(ctx); err != nil { return fmt.Errorf("failed to start consensus state: %w", err) } @@ -125,11 +116,11 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { } // WALWithNBlocks returns a WAL content with numBlocks. -func WALWithNBlocks(t *testing.T, numBlocks int) (data []byte, err error) { +func WALWithNBlocks(ctx context.Context, t *testing.T, numBlocks int) (data []byte, err error) { var b bytes.Buffer wr := bufio.NewWriter(&b) - if err := WALGenerateNBlocks(t, wr, numBlocks); err != nil { + if err := WALGenerateNBlocks(ctx, t, wr, numBlocks); err != nil { return []byte{}, err } @@ -144,22 +135,23 @@ func randPort() int { return base + mrand.Intn(spread) } -func makeAddrs() (string, string, string) { +// makeAddrs constructs local TCP addresses for node services. +// It uses consecutive ports from a random starting point, so that concurrent +// instances are less likely to collide. +func makeAddrs() (p2pAddr, rpcAddr string) { + const addrTemplate = "tcp://127.0.0.1:%d" start := randPort() - return fmt.Sprintf("tcp://127.0.0.1:%d", start), - fmt.Sprintf("tcp://127.0.0.1:%d", start+1), - fmt.Sprintf("tcp://127.0.0.1:%d", start+2) + return fmt.Sprintf(addrTemplate, start), fmt.Sprintf(addrTemplate, start+1) } // getConfig returns a config for test cases -func getConfig(t *testing.T) *cfg.Config { - c := cfg.ResetTestRoot(t.Name()) +func getConfig(t *testing.T) *config.Config { + c, err := config.ResetTestRoot(t.Name()) + require.NoError(t, err) - // and we use random ports to run in parallel - tm, rpc, grpc := makeAddrs() - c.P2P.ListenAddress = tm - c.RPC.ListenAddress = rpc - c.RPC.GRPCListenAddress = grpc + p2pAddr, rpcAddr := makeAddrs() + c.P2P.ListenAddress = p2pAddr + c.RPC.ListenAddress = rpcAddr return c } @@ -227,6 +219,6 @@ func (w *byteBufferWAL) SearchForEndHeight( return nil, false, nil } -func (w *byteBufferWAL) Start() error { return nil } -func (w *byteBufferWAL) Stop() error { return nil } -func (w *byteBufferWAL) Wait() {} +func (w *byteBufferWAL) Start(context.Context) error { return nil } +func (w *byteBufferWAL) Stop() error { return nil } +func (w *byteBufferWAL) Wait() {} diff --git a/internal/consensus/wal_test.go b/internal/consensus/wal_test.go index 180af5f34..c0290fcf8 100644 --- a/internal/consensus/wal_test.go +++ b/internal/consensus/wal_test.go @@ -2,10 +2,9 @@ package consensus import ( "bytes" - "crypto/rand" + "context" "path/filepath" - // "sync" "testing" "time" @@ -20,39 +19,33 @@ import ( tmtypes "github.com/tendermint/tendermint/types" ) -const ( - walTestFlushInterval = time.Duration(100) * time.Millisecond -) +const walTestFlushInterval = 100 * time.Millisecond func TestWALTruncate(t *testing.T) { walDir := t.TempDir() walFile := filepath.Join(walDir, "wal") + logger := log.TestingLogger() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // this magic number 4K can truncate the content when RotateFile. // defaultHeadSizeLimit(10M) is hard to simulate. // this magic number 1 * time.Millisecond make RotateFile check frequently. // defaultGroupCheckDuration(5s) is hard to simulate. - wal, err := NewWAL(walFile, + wal, err := NewWAL(logger, walFile, autofile.GroupHeadSizeLimit(4096), autofile.GroupCheckDuration(1*time.Millisecond), ) require.NoError(t, err) - wal.SetLogger(log.TestingLogger()) - err = wal.Start() + err = wal.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := wal.Stop(); err != nil { - t.Error(err) - } - // wait for the wal to finish shutting down so we - // can safely remove the directory - wal.Wait() - }) + t.Cleanup(wal.Wait) // 60 block's size nearly 70K, greater than group's headBuf size(4096 * 10), // when headBuf is full, truncate content will Flush to the file. at this // time, RotateFile is called, truncate content exist in each file. - err = WALGenerateNBlocks(t, wal.Group(), 60) + err = WALGenerateNBlocks(ctx, t, wal.Group(), 60) require.NoError(t, err) time.Sleep(1 * time.Millisecond) // wait groupCheckDuration, make sure RotateFile run @@ -107,18 +100,14 @@ func TestWALWrite(t *testing.T) { walDir := t.TempDir() walFile := filepath.Join(walDir, "wal") - wal, err := NewWAL(walFile) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + wal, err := NewWAL(log.TestingLogger(), walFile) require.NoError(t, err) - err = wal.Start() + err = wal.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := wal.Stop(); err != nil { - t.Error(err) - } - // wait for the wal to finish shutting down so we - // can safely remove the directory - wal.Wait() - }) + t.Cleanup(wal.Wait) // 1) Write returns an error if msg is too big msg := &BlockPartMessage{ @@ -144,15 +133,17 @@ func TestWALWrite(t *testing.T) { } func TestWALSearchForEndHeight(t *testing.T) { - walBody, err := WALWithNBlocks(t, 6) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + walBody, err := WALWithNBlocks(ctx, t, 6) if err != nil { t.Fatal(err) } walFile := tempWALWithData(walBody) - wal, err := NewWAL(walFile) + wal, err := NewWAL(log.TestingLogger(), walFile) require.NoError(t, err) - wal.SetLogger(log.TestingLogger()) h := int64(3) gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{}) @@ -172,21 +163,23 @@ func TestWALSearchForEndHeight(t *testing.T) { func TestWALPeriodicSync(t *testing.T) { walDir := t.TempDir() walFile := filepath.Join(walDir, "wal") - wal, err := NewWAL(walFile, autofile.GroupCheckDuration(1*time.Millisecond)) + wal, err := NewWAL(log.TestingLogger(), walFile, autofile.GroupCheckDuration(1*time.Millisecond)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() require.NoError(t, err) wal.SetFlushInterval(walTestFlushInterval) - wal.SetLogger(log.TestingLogger()) // Generate some data - err = WALGenerateNBlocks(t, wal.Group(), 5) + err = WALGenerateNBlocks(ctx, t, wal.Group(), 5) require.NoError(t, err) // We should have data in the buffer now assert.NotZero(t, wal.Group().Buffered()) - require.NoError(t, wal.Start()) + require.NoError(t, wal.Start(ctx)) t.Cleanup(func() { if err := wal.Stop(); err != nil { t.Error(err) @@ -208,69 +201,3 @@ func TestWALPeriodicSync(t *testing.T) { gr.Close() } } - -/* -var initOnce sync.Once - -func registerInterfacesOnce() { - initOnce.Do(func() { - var _ = wire.RegisterInterface( - struct{ WALMessage }{}, - wire.ConcreteType{[]byte{}, 0x10}, - ) - }) -} -*/ - -func nBytes(n int) []byte { - buf := make([]byte, n) - n, _ = rand.Read(buf) - return buf[:n] -} - -func benchmarkWalDecode(b *testing.B, n int) { - // registerInterfacesOnce() - buf := new(bytes.Buffer) - enc := NewWALEncoder(buf) - - data := nBytes(n) - if err := enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second).UTC()}); err != nil { - b.Error(err) - } - - encoded := buf.Bytes() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - buf.Reset() - buf.Write(encoded) - dec := NewWALDecoder(buf) - if _, err := dec.Decode(); err != nil { - b.Fatal(err) - } - } - b.ReportAllocs() -} - -func BenchmarkWalDecode512B(b *testing.B) { - benchmarkWalDecode(b, 512) -} - -func BenchmarkWalDecode10KB(b *testing.B) { - benchmarkWalDecode(b, 10*1024) -} -func BenchmarkWalDecode100KB(b *testing.B) { - benchmarkWalDecode(b, 100*1024) -} -func BenchmarkWalDecode1MB(b *testing.B) { - benchmarkWalDecode(b, 1024*1024) -} -func BenchmarkWalDecode10MB(b *testing.B) { - benchmarkWalDecode(b, 10*1024*1024) -} -func BenchmarkWalDecode100MB(b *testing.B) { - benchmarkWalDecode(b, 100*1024*1024) -} -func BenchmarkWalDecode1GB(b *testing.B) { - benchmarkWalDecode(b, 1024*1024*1024) -} diff --git a/internal/eventbus/event_bus.go b/internal/eventbus/event_bus.go new file mode 100644 index 000000000..f13582af7 --- /dev/null +++ b/internal/eventbus/event_bus.go @@ -0,0 +1,240 @@ +package eventbus + +import ( + "context" + "errors" + "fmt" + "strings" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" + tmpubsub "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/types" +) + +// Subscription is a proxy interface for a pubsub Subscription. +type Subscription interface { + ID() string + Next(context.Context) (tmpubsub.Message, error) +} + +// EventBus is a common bus for all events going through the system. +// It is a type-aware wrapper around an underlying pubsub server. +// All events should be published via the bus. +type EventBus struct { + service.BaseService + pubsub *tmpubsub.Server +} + +// NewDefault returns a new event bus with default options. +func NewDefault(l log.Logger) *EventBus { + logger := l.With("module", "eventbus") + pubsub := tmpubsub.NewServer(tmpubsub.BufferCapacity(0), + func(s *tmpubsub.Server) { + s.Logger = logger + }) + b := &EventBus{pubsub: pubsub} + b.BaseService = *service.NewBaseService(logger, "EventBus", b) + return b +} + +func (b *EventBus) OnStart(ctx context.Context) error { + return b.pubsub.Start(ctx) +} + +func (b *EventBus) OnStop() { + if err := b.pubsub.Stop(); err != nil { + if !errors.Is(err, service.ErrAlreadyStopped) { + b.pubsub.Logger.Error("error trying to stop eventBus", "error", err) + } + } +} + +func (b *EventBus) NumClients() int { + return b.pubsub.NumClients() +} + +func (b *EventBus) NumClientSubscriptions(clientID string) int { + return b.pubsub.NumClientSubscriptions(clientID) +} + +// Deprecated: Use SubscribeWithArgs instead. +func (b *EventBus) Subscribe(ctx context.Context, + clientID string, query tmpubsub.Query, capacities ...int) (Subscription, error) { + + return b.pubsub.Subscribe(ctx, clientID, query, capacities...) +} + +func (b *EventBus) SubscribeWithArgs(ctx context.Context, args tmpubsub.SubscribeArgs) (Subscription, error) { + return b.pubsub.SubscribeWithArgs(ctx, args) +} + +func (b *EventBus) Unsubscribe(ctx context.Context, args tmpubsub.UnsubscribeArgs) error { + return b.pubsub.Unsubscribe(ctx, args) +} + +func (b *EventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { + return b.pubsub.UnsubscribeAll(ctx, subscriber) +} + +func (b *EventBus) Observe(ctx context.Context, observe func(tmpubsub.Message) error, queries ...tmpubsub.Query) error { + return b.pubsub.Observe(ctx, observe, queries...) +} + +func (b *EventBus) Publish(eventValue string, eventData types.TMEventData) error { + // no explicit deadline for publishing events + ctx := context.Background() + + tokens := strings.Split(types.EventTypeKey, ".") + event := abci.Event{ + Type: tokens[0], + Attributes: []abci.EventAttribute{ + { + Key: tokens[1], + Value: eventValue, + }, + }, + } + + return b.pubsub.PublishWithEvents(ctx, eventData, []abci.Event{event}) +} + +func (b *EventBus) PublishEventNewBlock(data types.EventDataNewBlock) error { + // no explicit deadline for publishing events + ctx := context.Background() + events := append(data.ResultBeginBlock.Events, data.ResultEndBlock.Events...) + + // add Tendermint-reserved new block event + events = append(events, types.EventNewBlock) + + return b.pubsub.PublishWithEvents(ctx, data, events) +} + +func (b *EventBus) PublishEventNewBlockHeader(data types.EventDataNewBlockHeader) error { + // no explicit deadline for publishing events + ctx := context.Background() + events := append(data.ResultBeginBlock.Events, data.ResultEndBlock.Events...) + + // add Tendermint-reserved new block header event + events = append(events, types.EventNewBlockHeader) + + return b.pubsub.PublishWithEvents(ctx, data, events) +} + +func (b *EventBus) PublishEventNewEvidence(evidence types.EventDataNewEvidence) error { + return b.Publish(types.EventNewEvidenceValue, evidence) +} + +func (b *EventBus) PublishEventVote(data types.EventDataVote) error { + return b.Publish(types.EventVoteValue, data) +} + +func (b *EventBus) PublishEventValidBlock(data types.EventDataRoundState) error { + return b.Publish(types.EventValidBlockValue, data) +} + +func (b *EventBus) PublishEventBlockSyncStatus(data types.EventDataBlockSyncStatus) error { + return b.Publish(types.EventBlockSyncStatusValue, data) +} + +func (b *EventBus) PublishEventStateSyncStatus(data types.EventDataStateSyncStatus) error { + return b.Publish(types.EventStateSyncStatusValue, data) +} + +// PublishEventTx publishes tx event with events from Result. Note it will add +// predefined keys (EventTypeKey, TxHashKey). Existing events with the same keys +// will be overwritten. +func (b *EventBus) PublishEventTx(data types.EventDataTx) error { + // no explicit deadline for publishing events + ctx := context.Background() + events := data.Result.Events + + // add Tendermint-reserved events + events = append(events, types.EventTx) + + tokens := strings.Split(types.TxHashKey, ".") + events = append(events, abci.Event{ + Type: tokens[0], + Attributes: []abci.EventAttribute{ + { + Key: tokens[1], + Value: fmt.Sprintf("%X", types.Tx(data.Tx).Hash()), + }, + }, + }) + + tokens = strings.Split(types.TxHeightKey, ".") + events = append(events, abci.Event{ + Type: tokens[0], + Attributes: []abci.EventAttribute{ + { + Key: tokens[1], + Value: fmt.Sprintf("%d", data.Height), + }, + }, + }) + + return b.pubsub.PublishWithEvents(ctx, data, events) +} + +func (b *EventBus) PublishEventNewRoundStep(data types.EventDataRoundState) error { + return b.Publish(types.EventNewRoundStepValue, data) +} + +func (b *EventBus) PublishEventTimeoutPropose(data types.EventDataRoundState) error { + return b.Publish(types.EventTimeoutProposeValue, data) +} + +func (b *EventBus) PublishEventTimeoutWait(data types.EventDataRoundState) error { + return b.Publish(types.EventTimeoutWaitValue, data) +} + +func (b *EventBus) PublishEventNewRound(data types.EventDataNewRound) error { + return b.Publish(types.EventNewRoundValue, data) +} + +func (b *EventBus) PublishEventCompleteProposal(data types.EventDataCompleteProposal) error { + return b.Publish(types.EventCompleteProposalValue, data) +} + +func (b *EventBus) PublishEventPolka(data types.EventDataRoundState) error { + return b.Publish(types.EventPolkaValue, data) +} + +func (b *EventBus) PublishEventRelock(data types.EventDataRoundState) error { + return b.Publish(types.EventRelockValue, data) +} + +func (b *EventBus) PublishEventLock(data types.EventDataRoundState) error { + return b.Publish(types.EventLockValue, data) +} + +func (b *EventBus) PublishEventValidatorSetUpdates(data types.EventDataValidatorSetUpdates) error { + return b.Publish(types.EventValidatorSetUpdatesValue, data) +} + +//----------------------------------------------------------------------------- + +// NopEventBus implements a types.BlockEventPublisher that discards all events. +type NopEventBus struct{} + +func (NopEventBus) PublishEventNewBlock(types.EventDataNewBlock) error { + return nil +} + +func (NopEventBus) PublishEventNewBlockHeader(types.EventDataNewBlockHeader) error { + return nil +} + +func (NopEventBus) PublishEventNewEvidence(types.EventDataNewEvidence) error { + return nil +} + +func (NopEventBus) PublishEventTx(types.EventDataTx) error { + return nil +} + +func (NopEventBus) PublishEventValidatorSetUpdates(types.EventDataValidatorSetUpdates) error { + return nil +} diff --git a/internal/eventbus/event_bus_test.go b/internal/eventbus/event_bus_test.go new file mode 100644 index 000000000..f940659ba --- /dev/null +++ b/internal/eventbus/event_bus_test.go @@ -0,0 +1,520 @@ +package eventbus_test + +import ( + "context" + "fmt" + mrand "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/libs/log" + tmpubsub "github.com/tendermint/tendermint/libs/pubsub" + tmquery "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/types" +) + +func TestEventBusPublishEventTx(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + eventBus := eventbus.NewDefault(log.TestingLogger()) + err := eventBus.Start(ctx) + require.NoError(t, err) + + tx := types.Tx("foo") + result := abci.ResponseDeliverTx{ + Data: []byte("bar"), + Events: []abci.Event{ + {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, + }, + } + + // PublishEventTx adds 3 composite keys, so the query below should work + query := fmt.Sprintf("tm.event='Tx' AND tx.height=1 AND tx.hash='%X' AND testType.baz=1", tx.Hash()) + txsSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: "test", + Query: tmquery.MustParse(query), + }) + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + defer close(done) + msg, err := txsSub.Next(ctx) + assert.NoError(t, err) + + edt := msg.Data().(types.EventDataTx) + assert.Equal(t, int64(1), edt.Height) + assert.Equal(t, uint32(0), edt.Index) + assert.EqualValues(t, tx, edt.Tx) + assert.Equal(t, result, edt.Result) + }() + + err = eventBus.PublishEventTx(types.EventDataTx{ + TxResult: abci.TxResult{ + Height: 1, + Index: 0, + Tx: tx, + Result: result, + }, + }) + assert.NoError(t, err) + + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("did not receive a transaction after 1 sec.") + } +} + +func TestEventBusPublishEventNewBlock(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + eventBus := eventbus.NewDefault(log.TestingLogger()) + err := eventBus.Start(ctx) + require.NoError(t, err) + + block := types.MakeBlock(0, []types.Tx{}, nil, []types.Evidence{}) + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()} + resultBeginBlock := abci.ResponseBeginBlock{ + Events: []abci.Event{ + {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, + }, + } + resultEndBlock := abci.ResponseEndBlock{ + Events: []abci.Event{ + {Type: "testType", Attributes: []abci.EventAttribute{{Key: "foz", Value: "2"}}}, + }, + } + + // PublishEventNewBlock adds the tm.event compositeKey, so the query below should work + query := "tm.event='NewBlock' AND testType.baz=1 AND testType.foz=2" + blocksSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: "test", + Query: tmquery.MustParse(query), + }) + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + defer close(done) + msg, err := blocksSub.Next(ctx) + assert.NoError(t, err) + + edt := msg.Data().(types.EventDataNewBlock) + assert.Equal(t, block, edt.Block) + assert.Equal(t, blockID, edt.BlockID) + assert.Equal(t, resultBeginBlock, edt.ResultBeginBlock) + assert.Equal(t, resultEndBlock, edt.ResultEndBlock) + }() + + err = eventBus.PublishEventNewBlock(types.EventDataNewBlock{ + Block: block, + BlockID: blockID, + ResultBeginBlock: resultBeginBlock, + ResultEndBlock: resultEndBlock, + }) + assert.NoError(t, err) + + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("did not receive a block after 1 sec.") + } +} + +func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + eventBus := eventbus.NewDefault(log.TestingLogger()) + err := eventBus.Start(ctx) + require.NoError(t, err) + + tx := types.Tx("foo") + result := abci.ResponseDeliverTx{ + Data: []byte("bar"), + Events: []abci.Event{ + { + Type: "transfer", + Attributes: []abci.EventAttribute{ + {Key: "sender", Value: "foo"}, + {Key: "recipient", Value: "bar"}, + {Key: "amount", Value: "5"}, + }, + }, + { + Type: "transfer", + Attributes: []abci.EventAttribute{ + {Key: "sender", Value: "baz"}, + {Key: "recipient", Value: "cat"}, + {Key: "amount", Value: "13"}, + }, + }, + { + Type: "withdraw.rewards", + Attributes: []abci.EventAttribute{ + {Key: "address", Value: "bar"}, + {Key: "source", Value: "iceman"}, + {Key: "amount", Value: "33"}, + }, + }, + }, + } + + testCases := []struct { + query string + expectResults bool + }{ + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='DoesNotExist'", + false, + }, + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo'", + true, + }, + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='baz'", + true, + }, + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo' AND transfer.sender='baz'", + true, + }, + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo' AND transfer.sender='DoesNotExist'", + false, + }, + } + + for i, tc := range testCases { + var name string + + if tc.expectResults { + name = fmt.Sprintf("ExpetedResultsCase%d", i) + } else { + name = fmt.Sprintf("NoResultsCase%d", i) + } + + t.Run(name, func(t *testing.T) { + + sub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: fmt.Sprintf("client-%d", i), + Query: tmquery.MustParse(tc.query), + }) + require.NoError(t, err) + + gotResult := make(chan bool, 1) + go func() { + defer close(gotResult) + tctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + msg, err := sub.Next(tctx) + if err == nil { + data := msg.Data().(types.EventDataTx) + assert.Equal(t, int64(1), data.Height) + assert.Equal(t, uint32(0), data.Index) + assert.EqualValues(t, tx, data.Tx) + assert.Equal(t, result, data.Result) + gotResult <- true + } + }() + + assert.NoError(t, eventBus.PublishEventTx(types.EventDataTx{ + TxResult: abci.TxResult{ + Height: 1, + Index: 0, + Tx: tx, + Result: result, + }, + })) + + require.NoError(t, ctx.Err(), "context should not have been canceled") + + if got := <-gotResult; got != tc.expectResults { + require.Failf(t, "Wrong transaction result", + "got a tx: %v, wanted a tx: %v", got, tc.expectResults) + } + }) + + } +} + +func TestEventBusPublishEventNewBlockHeader(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + eventBus := eventbus.NewDefault(log.TestingLogger()) + err := eventBus.Start(ctx) + require.NoError(t, err) + + block := types.MakeBlock(0, []types.Tx{}, nil, []types.Evidence{}) + resultBeginBlock := abci.ResponseBeginBlock{ + Events: []abci.Event{ + {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, + }, + } + resultEndBlock := abci.ResponseEndBlock{ + Events: []abci.Event{ + {Type: "testType", Attributes: []abci.EventAttribute{{Key: "foz", Value: "2"}}}, + }, + } + + // PublishEventNewBlockHeader adds the tm.event compositeKey, so the query below should work + query := "tm.event='NewBlockHeader' AND testType.baz=1 AND testType.foz=2" + headersSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: "test", + Query: tmquery.MustParse(query), + }) + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + defer close(done) + msg, err := headersSub.Next(ctx) + assert.NoError(t, err) + + edt := msg.Data().(types.EventDataNewBlockHeader) + assert.Equal(t, block.Header, edt.Header) + assert.Equal(t, resultBeginBlock, edt.ResultBeginBlock) + assert.Equal(t, resultEndBlock, edt.ResultEndBlock) + }() + + err = eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ + Header: block.Header, + ResultBeginBlock: resultBeginBlock, + ResultEndBlock: resultEndBlock, + }) + assert.NoError(t, err) + + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("did not receive a block header after 1 sec.") + } +} + +func TestEventBusPublishEventNewEvidence(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + eventBus := eventbus.NewDefault(log.TestingLogger()) + err := eventBus.Start(ctx) + require.NoError(t, err) + + ev := types.NewMockDuplicateVoteEvidence(1, time.Now(), "test-chain-id") + + const query = `tm.event='NewEvidence'` + evSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: "test", + Query: tmquery.MustParse(query), + }) + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + defer close(done) + msg, err := evSub.Next(ctx) + assert.NoError(t, err) + + edt := msg.Data().(types.EventDataNewEvidence) + assert.Equal(t, ev, edt.Evidence) + assert.Equal(t, int64(4), edt.Height) + }() + + err = eventBus.PublishEventNewEvidence(types.EventDataNewEvidence{ + Evidence: ev, + Height: 4, + }) + assert.NoError(t, err) + + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("did not receive a block header after 1 sec.") + } +} + +func TestEventBusPublish(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + eventBus := eventbus.NewDefault(log.TestingLogger()) + err := eventBus.Start(ctx) + require.NoError(t, err) + + const numEventsExpected = 14 + + sub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: "test", + Query: tmquery.Empty{}, + Limit: numEventsExpected, + }) + require.NoError(t, err) + + count := make(chan int, 1) + go func() { + defer close(count) + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + for n := 0; ; n++ { + if _, err := sub.Next(ctx); err != nil { + count <- n + return + } + } + }() + + require.NoError(t, eventBus.Publish(types.EventNewBlockHeaderValue, + types.EventDataNewBlockHeader{})) + require.NoError(t, eventBus.PublishEventNewBlock(types.EventDataNewBlock{})) + require.NoError(t, eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{})) + require.NoError(t, eventBus.PublishEventVote(types.EventDataVote{})) + require.NoError(t, eventBus.PublishEventNewRoundStep(types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventTimeoutPropose(types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventTimeoutWait(types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventNewRound(types.EventDataNewRound{})) + require.NoError(t, eventBus.PublishEventCompleteProposal(types.EventDataCompleteProposal{})) + require.NoError(t, eventBus.PublishEventPolka(types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventRelock(types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventLock(types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventValidatorSetUpdates(types.EventDataValidatorSetUpdates{})) + require.NoError(t, eventBus.PublishEventBlockSyncStatus(types.EventDataBlockSyncStatus{})) + require.NoError(t, eventBus.PublishEventStateSyncStatus(types.EventDataStateSyncStatus{})) + + require.GreaterOrEqual(t, <-count, numEventsExpected) +} + +func BenchmarkEventBus(b *testing.B) { + benchmarks := []struct { + name string + numClients int + randQueries bool + randEvents bool + }{ + {"10Clients1Query1Event", 10, false, false}, + {"100Clients", 100, false, false}, + {"1000Clients", 1000, false, false}, + + {"10ClientsRandQueries1Event", 10, true, false}, + {"100Clients", 100, true, false}, + {"1000Clients", 1000, true, false}, + + {"10ClientsRandQueriesRandEvents", 10, true, true}, + {"100Clients", 100, true, true}, + {"1000Clients", 1000, true, true}, + + {"10Clients1QueryRandEvents", 10, false, true}, + {"100Clients", 100, false, true}, + {"1000Clients", 1000, false, true}, + } + + for _, bm := range benchmarks { + bm := bm + b.Run(bm.name, func(b *testing.B) { + benchmarkEventBus(bm.numClients, bm.randQueries, bm.randEvents, b) + }) + } +} + +func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *testing.B) { + // for random* functions + mrand.Seed(time.Now().Unix()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + eventBus := eventbus.NewDefault(log.TestingLogger()) // set buffer capacity to 0 so we are not testing cache + err := eventBus.Start(ctx) + if err != nil { + b.Error(err) + } + b.Cleanup(func() { + if err := eventBus.Stop(); err != nil { + b.Error(err) + } + }) + + q := types.EventQueryNewBlock + + for i := 0; i < numClients; i++ { + if randQueries { + q = randQuery() + } + sub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: fmt.Sprintf("client-%d", i), + Query: q, + }) + if err != nil { + b.Fatal(err) + } + go func() { + for { + if _, err := sub.Next(ctx); err != nil { + return + } + } + }() + } + + eventValue := types.EventNewBlockValue + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + if randEvents { + eventValue = randEventValue() + } + + err := eventBus.Publish(eventValue, types.EventDataString("Gamora")) + if err != nil { + b.Error(err) + } + } +} + +var events = []string{ + types.EventNewBlockValue, + types.EventNewBlockHeaderValue, + types.EventNewRoundValue, + types.EventNewRoundStepValue, + types.EventTimeoutProposeValue, + types.EventCompleteProposalValue, + types.EventPolkaValue, + types.EventLockValue, + types.EventRelockValue, + types.EventTimeoutWaitValue, + types.EventVoteValue, + types.EventBlockSyncStatusValue, + types.EventStateSyncStatusValue, +} + +func randEventValue() string { + return events[mrand.Intn(len(events))] +} + +var queries = []tmpubsub.Query{ + types.EventQueryNewBlock, + types.EventQueryNewBlockHeader, + types.EventQueryNewRound, + types.EventQueryNewRoundStep, + types.EventQueryTimeoutPropose, + types.EventQueryCompleteProposal, + types.EventQueryPolka, + types.EventQueryLock, + types.EventQueryRelock, + types.EventQueryTimeoutWait, + types.EventQueryVote, + types.EventQueryBlockSyncStatus, + types.EventQueryStateSyncStatus, +} + +func randQuery() tmpubsub.Query { + return queries[mrand.Intn(len(queries))] +} diff --git a/internal/evidence/pool.go b/internal/evidence/pool.go index 2a48fe032..f342dec4c 100644 --- a/internal/evidence/pool.go +++ b/internal/evidence/pool.go @@ -14,9 +14,9 @@ import ( dbm "github.com/tendermint/tm-db" clist "github.com/tendermint/tendermint/internal/libs/clist" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) diff --git a/internal/evidence/pool_test.go b/internal/evidence/pool_test.go index 3f24366b2..57d6e3227 100644 --- a/internal/evidence/pool_test.go +++ b/internal/evidence/pool_test.go @@ -13,12 +13,12 @@ import ( "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/evidence/mocks" + sm "github.com/tendermint/tendermint/internal/state" + smmocks "github.com/tendermint/tendermint/internal/state/mocks" + sf "github.com/tendermint/tendermint/internal/state/test/factory" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" - sm "github.com/tendermint/tendermint/state" - smmocks "github.com/tendermint/tendermint/state/mocks" - sf "github.com/tendermint/tendermint/state/test/factory" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) diff --git a/internal/evidence/reactor.go b/internal/evidence/reactor.go index a454038fd..4e37e1d17 100644 --- a/internal/evidence/reactor.go +++ b/internal/evidence/reactor.go @@ -1,6 +1,7 @@ package evidence import ( + "context" "fmt" "runtime/debug" "sync" @@ -15,29 +16,7 @@ import ( "github.com/tendermint/tendermint/types" ) -var ( - _ service.Service = (*Reactor)(nil) - - // ChannelShims contains a map of ChannelDescriptorShim objects, where each - // object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding - // p2p proto.Message the new p2p Channel is responsible for handling. - // - // - // TODO: Remove once p2p refactor is complete. - // ref: https://github.com/tendermint/tendermint/issues/5670 - ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - EvidenceChannel: { - MsgType: new(tmproto.EvidenceList), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(EvidenceChannel), - Priority: 6, - RecvMessageCapacity: maxMsgSize, - RecvBufferCapacity: 32, - MaxSendBytes: 400, - }, - }, - } -) +var _ service.Service = (*Reactor)(nil) const ( EvidenceChannel = p2p.ChannelID(0x38) @@ -51,6 +30,18 @@ const ( broadcastEvidenceIntervalS = 10 ) +// GetChannelDescriptor produces an instance of a descriptor for this +// package's required channels. +func GetChannelDescriptor() *p2p.ChannelDescriptor { + return &p2p.ChannelDescriptor{ + ID: EvidenceChannel, + MessageType: new(tmproto.EvidenceList), + Priority: 6, + RecvMessageCapacity: maxMsgSize, + RecvBufferCapacity: 32, + } +} + // Reactor handles evpool evidence broadcasting amongst peers. type Reactor struct { service.BaseService @@ -91,7 +82,7 @@ func NewReactor( // envelopes on each. In addition, it also listens for peer updates and handles // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. No error is returned. -func (r *Reactor) OnStart() error { +func (r *Reactor) OnStart(ctx context.Context) error { go r.processEvidenceCh() go r.processPeerUpdates() diff --git a/internal/evidence/reactor_test.go b/internal/evidence/reactor_test.go index 1cf995731..764450cd6 100644 --- a/internal/evidence/reactor_test.go +++ b/internal/evidence/reactor_test.go @@ -1,6 +1,7 @@ package evidence_test import ( + "context" "encoding/hex" "math/rand" "sync" @@ -20,9 +21,9 @@ import ( "github.com/tendermint/tendermint/internal/evidence/mocks" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -44,7 +45,7 @@ type reactorTestSuite struct { numStateStores int } -func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { +func setup(ctx context.Context, t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { t.Helper() pID := make([]byte, 16) @@ -55,18 +56,15 @@ func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { rts := &reactorTestSuite{ numStateStores: numStateStores, logger: log.TestingLogger().With("testCase", t.Name()), - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numStateStores}), + network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numStateStores}), reactors: make(map[types.NodeID]*evidence.Reactor, numStateStores), pools: make(map[types.NodeID]*evidence.Pool, numStateStores), peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numStateStores), peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numStateStores), } - chDesc := p2p.ChannelDescriptor{ID: byte(evidence.EvidenceChannel)} - rts.evidenceChannels = rts.network.MakeChannelsNoCleanup(t, - chDesc, - new(tmproto.EvidenceList), - int(chBuf)) + chDesc := &p2p.ChannelDescriptor{ID: evidence.EvidenceChannel, MessageType: new(tmproto.EvidenceList)} + rts.evidenceChannels = rts.network.MakeChannelsNoCleanup(t, chDesc) require.Len(t, rts.network.RandomNode().PeerManager.Peers(), 0) idx := 0 @@ -96,7 +94,7 @@ func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { rts.peerUpdates[nodeID], rts.pools[nodeID]) - require.NoError(t, rts.reactors[nodeID].Start()) + require.NoError(t, rts.reactors[nodeID].Start(ctx)) require.True(t, rts.reactors[nodeID].IsRunning()) idx++ @@ -236,13 +234,16 @@ func createEvidenceList( } func TestReactorMultiDisconnect(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + val := types.NewMockPV() height := int64(numEvidence) + 10 stateDB1 := initializeValidatorState(t, val, height) stateDB2 := initializeValidatorState(t, val, height) - rts := setup(t, []sm.Store{stateDB1, stateDB2}, 20) + rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2}, 20) primary := rts.nodes[0] secondary := rts.nodes[1] @@ -284,7 +285,10 @@ func TestReactorBroadcastEvidence(t *testing.T) { stateDBs[i] = initializeValidatorState(t, val, height) } - rts := setup(t, stateDBs, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, stateDBs, 0) rts.start(t) // Create a series of fixtures where each suite contains a reactor and @@ -338,7 +342,10 @@ func TestReactorBroadcastEvidence_Lagging(t *testing.T) { stateDB1 := initializeValidatorState(t, val, height1) stateDB2 := initializeValidatorState(t, val, height2) - rts := setup(t, []sm.Store{stateDB1, stateDB2}, 100) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2}, 100) rts.start(t) primary := rts.nodes[0] @@ -371,7 +378,10 @@ func TestReactorBroadcastEvidence_Pending(t *testing.T) { stateDB1 := initializeValidatorState(t, val, height) stateDB2 := initializeValidatorState(t, val, height) - rts := setup(t, []sm.Store{stateDB1, stateDB2}, 100) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2}, 100) primary := rts.nodes[0] secondary := rts.nodes[1] @@ -397,11 +407,8 @@ func TestReactorBroadcastEvidence_Pending(t *testing.T) { require.Len(t, rts.pools, 2) assert.EqualValues(t, numEvidence, rts.pools[primary.NodeID].Size(), "primary node should have all the evidence") - if assert.EqualValues(t, numEvidence, rts.pools[secondary.NodeID].Size(), - "secondary nodes should have caught up") { - - rts.assertEvidenceChannelsEmpty(t) - } + assert.EqualValues(t, numEvidence, rts.pools[secondary.NodeID].Size(), + "secondary nodes should have caught up") } func TestReactorBroadcastEvidence_Committed(t *testing.T) { @@ -411,7 +418,10 @@ func TestReactorBroadcastEvidence_Committed(t *testing.T) { stateDB1 := initializeValidatorState(t, val, height) stateDB2 := initializeValidatorState(t, val, height) - rts := setup(t, []sm.Store{stateDB1, stateDB2}, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2}, 0) primary := rts.nodes[0] secondary := rts.nodes[1] @@ -441,11 +451,6 @@ func TestReactorBroadcastEvidence_Committed(t *testing.T) { // start the network and ensure it's configured rts.start(t) - // without the following sleep the test consistently fails; - // likely because the sleep forces a context switch that lets - // the router process other operations. - time.Sleep(2 * time.Millisecond) - // The secondary reactor should have received all the evidence ignoring the // already committed evidence. rts.waitForEvidence(t, evList[numEvidence/2:], secondary.NodeID) @@ -453,11 +458,8 @@ func TestReactorBroadcastEvidence_Committed(t *testing.T) { require.Len(t, rts.pools, 2) assert.EqualValues(t, numEvidence, rts.pools[primary.NodeID].Size(), "primary node should have all the evidence") - if assert.EqualValues(t, numEvidence/2, rts.pools[secondary.NodeID].Size(), - "secondary nodes should have caught up") { - - rts.assertEvidenceChannelsEmpty(t) - } + assert.EqualValues(t, numEvidence/2, rts.pools[secondary.NodeID].Size(), + "secondary nodes should have caught up") } func TestReactorBroadcastEvidence_FullyConnected(t *testing.T) { @@ -474,7 +476,10 @@ func TestReactorBroadcastEvidence_FullyConnected(t *testing.T) { stateDBs[i] = initializeValidatorState(t, val, height) } - rts := setup(t, stateDBs, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, stateDBs, 0) rts.start(t) evList := createEvidenceList(t, rts.pools[rts.network.RandomNode().NodeID], val, numEvidence) diff --git a/internal/evidence/verify_test.go b/internal/evidence/verify_test.go index e579693e1..e5a660287 100644 --- a/internal/evidence/verify_test.go +++ b/internal/evidence/verify_test.go @@ -8,18 +8,17 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/evidence/mocks" + sm "github.com/tendermint/tendermint/internal/state" + smmocks "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" - smmocks "github.com/tendermint/tendermint/state/mocks" "github.com/tendermint/tendermint/types" ) diff --git a/inspect/doc.go b/internal/inspect/doc.go similarity index 100% rename from inspect/doc.go rename to internal/inspect/doc.go diff --git a/inspect/inspect.go b/internal/inspect/inspect.go similarity index 76% rename from inspect/inspect.go rename to internal/inspect/inspect.go index 38bc9ed5d..3bc744e4c 100644 --- a/inspect/inspect.go +++ b/internal/inspect/inspect.go @@ -7,14 +7,15 @@ import ( "net" "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/inspect/rpc" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/inspect/rpc" + rpccore "github.com/tendermint/tendermint/internal/rpc/core" + "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer/sink" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" tmstrings "github.com/tendermint/tendermint/libs/strings" - rpccore "github.com/tendermint/tendermint/rpc/core" - "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/state/indexer/sink" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" "golang.org/x/sync/errgroup" @@ -32,7 +33,7 @@ type Inspector struct { config *config.RPCConfig indexerService *indexer.Service - eventBus *types.EventBus + eventBus *eventbus.EventBus logger log.Logger } @@ -43,22 +44,23 @@ type Inspector struct { /// //nolint:lll func New(cfg *config.RPCConfig, bs state.BlockStore, ss state.Store, es []indexer.EventSink, logger log.Logger) *Inspector { - routes := rpc.Routes(*cfg, ss, bs, es, logger) - eb := types.NewEventBus() - eb.SetLogger(logger.With("module", "events")) - is := indexer.NewIndexerService(es, eb) - is.SetLogger(logger.With("module", "txindex")) + eb := eventbus.NewDefault(logger.With("module", "events")) + return &Inspector{ - routes: routes, - config: cfg, - logger: logger, - eventBus: eb, - indexerService: is, + routes: rpc.Routes(*cfg, ss, bs, es, logger), + config: cfg, + logger: logger, + eventBus: eb, + indexerService: indexer.NewService(indexer.ServiceArgs{ + Sinks: es, + EventBus: eb, + Logger: logger.With("module", "txindex"), + }), } } // NewFromConfig constructs an Inspector using the values defined in the passed in config. -func NewFromConfig(cfg *config.Config) (*Inspector, error) { +func NewFromConfig(logger log.Logger, cfg *config.Config) (*Inspector, error) { bsDB, err := config.DefaultDBProvider(&config.DBContext{ID: "blockstore", Config: cfg}) if err != nil { return nil, err @@ -76,7 +78,6 @@ func NewFromConfig(cfg *config.Config) (*Inspector, error) { if err != nil { return nil, err } - logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) ss := state.NewStore(sDB) return New(cfg.RPC, bs, ss, sinks, logger), nil } @@ -84,26 +85,18 @@ func NewFromConfig(cfg *config.Config) (*Inspector, error) { // Run starts the Inspector servers and blocks until the servers shut down. The passed // in context is used to control the lifecycle of the servers. func (ins *Inspector) Run(ctx context.Context) error { - err := ins.eventBus.Start() + err := ins.eventBus.Start(ctx) if err != nil { return fmt.Errorf("error starting event bus: %s", err) } - defer func() { - err := ins.eventBus.Stop() - if err != nil { - ins.logger.Error("event bus stopped with error", "err", err) - } - }() - err = ins.indexerService.Start() + defer ins.eventBus.Wait() + + err = ins.indexerService.Start(ctx) if err != nil { return fmt.Errorf("error starting indexer service: %s", err) } - defer func() { - err := ins.indexerService.Stop() - if err != nil { - ins.logger.Error("indexer service stopped with error", "err", err) - } - }() + defer ins.indexerService.Wait() + return startRPCServers(ctx, ins.config, ins.logger, ins.routes) } diff --git a/inspect/inspect_test.go b/internal/inspect/inspect_test.go similarity index 90% rename from inspect/inspect_test.go rename to internal/inspect/inspect_test.go index c2a1df571..15a555ab0 100644 --- a/inspect/inspect_test.go +++ b/internal/inspect/inspect_test.go @@ -15,23 +15,26 @@ import ( "github.com/stretchr/testify/require" abcitypes "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/inspect" + "github.com/tendermint/tendermint/internal/inspect" + "github.com/tendermint/tendermint/internal/state/indexer" + indexermocks "github.com/tendermint/tendermint/internal/state/indexer/mocks" + statemocks "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/proto/tendermint/state" httpclient "github.com/tendermint/tendermint/rpc/client/http" - "github.com/tendermint/tendermint/state/indexer" - indexermocks "github.com/tendermint/tendermint/state/indexer/mocks" - statemocks "github.com/tendermint/tendermint/state/mocks" "github.com/tendermint/tendermint/types" ) func TestInspectConstructor(t *testing.T) { - cfg := config.ResetTestRoot("test") + cfg, err := config.ResetTestRoot("test") + require.NoError(t, err) + testLogger := log.TestingLogger() t.Cleanup(leaktest.Check(t)) defer func() { _ = os.RemoveAll(cfg.RootDir) }() t.Run("from config", func(t *testing.T) { - d, err := inspect.NewFromConfig(cfg) + logger := testLogger.With(t.Name()) + d, err := inspect.NewFromConfig(logger, cfg) require.NoError(t, err) require.NotNil(t, d) }) @@ -39,11 +42,15 @@ func TestInspectConstructor(t *testing.T) { } func TestInspectRun(t *testing.T) { - cfg := config.ResetTestRoot("test") + cfg, err := config.ResetTestRoot("test") + require.NoError(t, err) + + testLogger := log.TestingLogger() t.Cleanup(leaktest.Check(t)) defer func() { _ = os.RemoveAll(cfg.RootDir) }() t.Run("from config", func(t *testing.T) { - d, err := inspect.NewFromConfig(cfg) + logger := testLogger.With(t.Name()) + d, err := inspect.NewFromConfig(logger, cfg) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) stoppedWG := &sync.WaitGroup{} @@ -72,6 +79,7 @@ func TestBlock(t *testing.T) { blockStoreMock.On("LoadBlock", testHeight).Return(testBlock) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) rpcConfig := config.TestRPCConfig() l := log.TestingLogger() @@ -93,7 +101,7 @@ func TestBlock(t *testing.T) { requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - resultBlock, err := cli.Block(context.Background(), &testHeight) + resultBlock, err := cli.Block(ctx, &testHeight) require.NoError(t, err) require.Equal(t, testBlock.Height, resultBlock.Block.Height) require.Equal(t, testBlock.LastCommitHash, resultBlock.Block.LastCommitHash) @@ -145,7 +153,7 @@ func TestTxSearch(t *testing.T) { require.NoError(t, err) var page = 1 - resultTxSearch, err := cli.TxSearch(context.Background(), testQuery, false, &page, &page, "") + resultTxSearch, err := cli.TxSearch(ctx, testQuery, false, &page, &page, "") require.NoError(t, err) require.Len(t, resultTxSearch.Txs, 1) require.Equal(t, types.Tx(testTx), resultTxSearch.Txs[0].Tx) @@ -191,7 +199,7 @@ func TestTx(t *testing.T) { cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - res, err := cli.Tx(context.Background(), testHash, false) + res, err := cli.Tx(ctx, testHash, false) require.NoError(t, err) require.Equal(t, types.Tx(testTx), res.Tx) @@ -216,6 +224,8 @@ func TestConsensusParams(t *testing.T) { }, nil) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() l := log.TestingLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) @@ -237,7 +247,7 @@ func TestConsensusParams(t *testing.T) { requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - params, err := cli.ConsensusParams(context.Background(), &testHeight) + params, err := cli.ConsensusParams(ctx, &testHeight) require.NoError(t, err) require.Equal(t, params.ConsensusParams.Block.MaxGas, testMaxGas) @@ -267,6 +277,8 @@ func TestBlockResults(t *testing.T) { blockStoreMock.On("Height").Return(testHeight) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() l := log.TestingLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) @@ -288,7 +300,7 @@ func TestBlockResults(t *testing.T) { requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - res, err := cli.BlockResults(context.Background(), &testHeight) + res, err := cli.BlockResults(ctx, &testHeight) require.NoError(t, err) require.Equal(t, res.TotalGasUsed, testGasUsed) @@ -313,6 +325,8 @@ func TestCommit(t *testing.T) { }, nil) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() l := log.TestingLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) @@ -334,7 +348,7 @@ func TestCommit(t *testing.T) { requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - res, err := cli.Commit(context.Background(), &testHeight) + res, err := cli.Commit(ctx, &testHeight) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, res.SignedHeader.Commit.Round, testRound) @@ -365,6 +379,8 @@ func TestBlockByHash(t *testing.T) { blockStoreMock.On("LoadBlockByHash", testHash).Return(testBlock, nil) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() l := log.TestingLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) @@ -386,7 +402,7 @@ func TestBlockByHash(t *testing.T) { requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - res, err := cli.BlockByHash(context.Background(), testHash) + res, err := cli.BlockByHash(ctx, testHash) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, []byte(res.BlockID.Hash), testHash) @@ -416,6 +432,8 @@ func TestBlockchain(t *testing.T) { }) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() l := log.TestingLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) @@ -437,7 +455,7 @@ func TestBlockchain(t *testing.T) { requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - res, err := cli.BlockchainInfo(context.Background(), 0, 100) + res, err := cli.BlockchainInfo(ctx, 0, 100) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, testBlockHash, []byte(res.BlockMetas[0].BlockID.Hash)) @@ -467,6 +485,8 @@ func TestValidators(t *testing.T) { blockStoreMock.On("Base").Return(int64(0)) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() l := log.TestingLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) @@ -491,7 +511,7 @@ func TestValidators(t *testing.T) { testPage := 1 testPerPage := 100 - res, err := cli.Validators(context.Background(), &testHeight, &testPage, &testPerPage) + res, err := cli.Validators(ctx, &testHeight, &testPage, &testPerPage) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, testVotingPower, res.Validators[0].VotingPower) @@ -551,7 +571,7 @@ func TestBlockSearch(t *testing.T) { testPage := 1 testPerPage := 100 testOrderBy := "desc" - res, err := cli.BlockSearch(context.Background(), testQuery, &testPage, &testPerPage, testOrderBy) + res, err := cli.BlockSearch(ctx, testQuery, &testPage, &testPerPage, testOrderBy) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, testBlockHash, []byte(res.Blocks[0].BlockID.Hash)) diff --git a/inspect/rpc/rpc.go b/internal/inspect/rpc/rpc.go similarity index 94% rename from inspect/rpc/rpc.go rename to internal/inspect/rpc/rpc.go index 76dcda4eb..276bfe082 100644 --- a/inspect/rpc/rpc.go +++ b/internal/inspect/rpc/rpc.go @@ -9,12 +9,12 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/rpc/core" + "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/rpc/core" "github.com/tendermint/tendermint/rpc/jsonrpc/server" - "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) @@ -26,6 +26,10 @@ type Server struct { Config *config.RPCConfig } +type eventBusUnsubscriber interface { + UnsubscribeAll(ctx context.Context, subscriber string) error +} + // Routes returns the set of routes used by the Inspector server. // //nolint: lll @@ -59,7 +63,7 @@ func Handler(rpcConfig *config.RPCConfig, routes core.RoutesMap, logger log.Logg mux := http.NewServeMux() wmLogger := logger.With("protocol", "websocket") - var eventBus types.EventBusSubscriber + var eventBus eventBusUnsubscriber websocketDisconnectFn := func(remoteAddr string) { err := eventBus.UnsubscribeAll(context.Background(), remoteAddr) diff --git a/internal/libs/autofile/autofile_test.go b/internal/libs/autofile/autofile_test.go index c2442a56f..479a239cb 100644 --- a/internal/libs/autofile/autofile_test.go +++ b/internal/libs/autofile/autofile_test.go @@ -1,7 +1,6 @@ package autofile import ( - "io/ioutil" "os" "path/filepath" "syscall" @@ -22,7 +21,7 @@ func TestSIGHUP(t *testing.T) { }) // First, create a temporary directory and move into it - dir, err := ioutil.TempDir("", "sighup_test") + dir, err := os.MkdirTemp("", "sighup_test") require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(dir) @@ -45,7 +44,7 @@ func TestSIGHUP(t *testing.T) { require.NoError(t, os.Rename(name, name+"_old")) // Move into a different temporary directory - otherDir, err := ioutil.TempDir("", "sighup_test_other") + otherDir, err := os.MkdirTemp("", "sighup_test_other") require.NoError(t, err) t.Cleanup(func() { os.RemoveAll(otherDir) }) require.NoError(t, os.Chdir(otherDir)) @@ -72,7 +71,7 @@ func TestSIGHUP(t *testing.T) { } // The current directory should be empty - files, err := ioutil.ReadDir(".") + files, err := os.ReadDir(".") require.NoError(t, err) assert.Empty(t, files) } @@ -80,7 +79,7 @@ func TestSIGHUP(t *testing.T) { // // Manually modify file permissions, close, and reopen using autofile: // // We expect the file permissions to be changed back to the intended perms. // func TestOpenAutoFilePerms(t *testing.T) { -// file, err := ioutil.TempFile("", "permission_test") +// file, err := os.CreateTemp("", "permission_test") // require.NoError(t, err) // err = file.Close() // require.NoError(t, err) @@ -106,7 +105,7 @@ func TestSIGHUP(t *testing.T) { func TestAutoFileSize(t *testing.T) { // First, create an AutoFile writing to a tempfile dir - f, err := ioutil.TempFile("", "sighup_test") + f, err := os.CreateTemp("", "sighup_test") require.NoError(t, err) require.NoError(t, f.Close()) @@ -139,7 +138,7 @@ func TestAutoFileSize(t *testing.T) { } func mustReadFile(t *testing.T, filePath string) []byte { - fileBytes, err := ioutil.ReadFile(filePath) + fileBytes, err := os.ReadFile(filePath) require.NoError(t, err) return fileBytes diff --git a/internal/libs/autofile/cmd/logjack.go b/internal/libs/autofile/cmd/logjack.go index 1aa8b6a11..0f412a366 100644 --- a/internal/libs/autofile/cmd/logjack.go +++ b/internal/libs/autofile/cmd/logjack.go @@ -1,15 +1,17 @@ package main import ( + "context" "flag" "fmt" "io" "os" + "os/signal" "strconv" "strings" + "syscall" auto "github.com/tendermint/tendermint/internal/libs/autofile" - tmos "github.com/tendermint/tendermint/libs/os" ) const Version = "0.0.1" @@ -32,21 +34,10 @@ func parseFlags() (headPath string, chopSize int64, limitSize int64, version boo return } -type fmtLogger struct{} - -func (fmtLogger) Info(msg string, keyvals ...interface{}) { - strs := make([]string, len(keyvals)) - for i, kv := range keyvals { - strs[i] = fmt.Sprintf("%v", kv) - } - fmt.Printf("%s %s\n", msg, strings.Join(strs, ",")) -} - func main() { - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(fmtLogger{}, func() { - fmt.Println("logjack shutting down") - }) + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGTERM) + defer cancel() + defer func() { fmt.Println("logjack shutting down") }() // Read options headPath, chopSize, limitSize, version := parseFlags() @@ -62,7 +53,7 @@ func main() { os.Exit(1) } - if err = group.Start(); err != nil { + if err = group.Start(ctx); err != nil { fmt.Printf("logjack couldn't start with file %v\n", headPath) os.Exit(1) } diff --git a/internal/libs/autofile/group.go b/internal/libs/autofile/group.go index 23f27c59b..0e208d8e9 100644 --- a/internal/libs/autofile/group.go +++ b/internal/libs/autofile/group.go @@ -2,6 +2,7 @@ package autofile import ( "bufio" + "context" "errors" "fmt" "io" @@ -135,7 +136,7 @@ func GroupTotalSizeLimit(limit int64) func(*Group) { // OnStart implements service.Service by starting the goroutine that checks file // and group limits. -func (g *Group) OnStart() error { +func (g *Group) OnStart(ctx context.Context) error { g.ticker = time.NewTicker(g.groupCheckDuration) go g.processTicks() return nil diff --git a/internal/libs/autofile/group_test.go b/internal/libs/autofile/group_test.go index 0981923eb..ffdb70013 100644 --- a/internal/libs/autofile/group_test.go +++ b/internal/libs/autofile/group_test.go @@ -2,7 +2,6 @@ package autofile import ( "io" - "io/ioutil" "os" "path/filepath" "testing" @@ -122,7 +121,7 @@ func TestRotateFile(t *testing.T) { } }() - dir, err := ioutil.TempDir("", "rotate_test") + dir, err := os.MkdirTemp("", "rotate_test") require.NoError(t, err) defer os.RemoveAll(dir) err = os.Chdir(dir) @@ -151,21 +150,21 @@ func TestRotateFile(t *testing.T) { require.NoError(t, err) // Read g.Head.Path+"000" - body1, err := ioutil.ReadFile(g.Head.Path + ".000") + body1, err := os.ReadFile(g.Head.Path + ".000") assert.NoError(t, err, "Failed to read first rolled file") if string(body1) != "Line 1\nLine 2\nLine 3\n" { t.Errorf("got unexpected contents: [%v]", string(body1)) } // Read g.Head.Path - body2, err := ioutil.ReadFile(g.Head.Path) + body2, err := os.ReadFile(g.Head.Path) assert.NoError(t, err, "Failed to read first rolled file") if string(body2) != "Line 4\nLine 5\nLine 6\n" { t.Errorf("got unexpected contents: [%v]", string(body2)) } // Make sure there are no files in the current, temporary directory - files, err := ioutil.ReadDir(".") + files, err := os.ReadDir(".") require.NoError(t, err) assert.Empty(t, files) diff --git a/internal/libs/flowrate/README.md b/internal/libs/flowrate/README.md deleted file mode 100644 index caed79aa3..000000000 --- a/internal/libs/flowrate/README.md +++ /dev/null @@ -1,10 +0,0 @@ -Data Flow Rate Control -====================== - -To download and install this package run: - -go get github.com/mxk/go-flowrate/flowrate - -The documentation is available at: - - diff --git a/internal/libs/flowrate/io.go b/internal/libs/flowrate/io.go deleted file mode 100644 index fbe090972..000000000 --- a/internal/libs/flowrate/io.go +++ /dev/null @@ -1,133 +0,0 @@ -// -// Written by Maxim Khitrov (November 2012) -// - -package flowrate - -import ( - "errors" - "io" -) - -// ErrLimit is returned by the Writer when a non-blocking write is short due to -// the transfer rate limit. -var ErrLimit = errors.New("flowrate: flow rate limit exceeded") - -// Limiter is implemented by the Reader and Writer to provide a consistent -// interface for monitoring and controlling data transfer. -type Limiter interface { - Done() int64 - Status() Status - SetTransferSize(bytes int64) - SetLimit(new int64) (old int64) - SetBlocking(new bool) (old bool) -} - -// Reader implements io.ReadCloser with a restriction on the rate of data -// transfer. -type Reader struct { - io.Reader // Data source - *Monitor // Flow control monitor - - limit int64 // Rate limit in bytes per second (unlimited when <= 0) - block bool // What to do when no new bytes can be read due to the limit -} - -// NewReader restricts all Read operations on r to limit bytes per second. -func NewReader(r io.Reader, limit int64) *Reader { - return &Reader{r, New(0, 0), limit, true} -} - -// Read reads up to len(p) bytes into p without exceeding the current transfer -// rate limit. It returns (0, nil) immediately if r is non-blocking and no new -// bytes can be read at this time. -func (r *Reader) Read(p []byte) (n int, err error) { - p = p[:r.Limit(len(p), r.limit, r.block)] - if len(p) > 0 { - n, err = r.IO(r.Reader.Read(p)) - } - return -} - -// SetLimit changes the transfer rate limit to new bytes per second and returns -// the previous setting. -func (r *Reader) SetLimit(new int64) (old int64) { - old, r.limit = r.limit, new - return -} - -// SetBlocking changes the blocking behavior and returns the previous setting. A -// Read call on a non-blocking reader returns immediately if no additional bytes -// may be read at this time due to the rate limit. -func (r *Reader) SetBlocking(new bool) (old bool) { - old, r.block = r.block, new - return -} - -// Close closes the underlying reader if it implements the io.Closer interface. -func (r *Reader) Close() error { - defer r.Done() - if c, ok := r.Reader.(io.Closer); ok { - return c.Close() - } - return nil -} - -// Writer implements io.WriteCloser with a restriction on the rate of data -// transfer. -type Writer struct { - io.Writer // Data destination - *Monitor // Flow control monitor - - limit int64 // Rate limit in bytes per second (unlimited when <= 0) - block bool // What to do when no new bytes can be written due to the limit -} - -// NewWriter restricts all Write operations on w to limit bytes per second. The -// transfer rate and the default blocking behavior (true) can be changed -// directly on the returned *Writer. -func NewWriter(w io.Writer, limit int64) *Writer { - return &Writer{w, New(0, 0), limit, true} -} - -// Write writes len(p) bytes from p to the underlying data stream without -// exceeding the current transfer rate limit. It returns (n, ErrLimit) if w is -// non-blocking and no additional bytes can be written at this time. -func (w *Writer) Write(p []byte) (n int, err error) { - var c int - for len(p) > 0 && err == nil { - s := p[:w.Limit(len(p), w.limit, w.block)] - if len(s) > 0 { - c, err = w.IO(w.Writer.Write(s)) - } else { - return n, ErrLimit - } - p = p[c:] - n += c - } - return -} - -// SetLimit changes the transfer rate limit to new bytes per second and returns -// the previous setting. -func (w *Writer) SetLimit(new int64) (old int64) { - old, w.limit = w.limit, new - return -} - -// SetBlocking changes the blocking behavior and returns the previous setting. A -// Write call on a non-blocking writer returns as soon as no additional bytes -// may be written at this time due to the rate limit. -func (w *Writer) SetBlocking(new bool) (old bool) { - old, w.block = w.block, new - return -} - -// Close closes the underlying writer if it implements the io.Closer interface. -func (w *Writer) Close() error { - defer w.Done() - if c, ok := w.Writer.(io.Closer); ok { - return c.Close() - } - return nil -} diff --git a/internal/libs/flowrate/io_test.go b/internal/libs/flowrate/io_test.go deleted file mode 100644 index 4d7de417e..000000000 --- a/internal/libs/flowrate/io_test.go +++ /dev/null @@ -1,197 +0,0 @@ -// -// Written by Maxim Khitrov (November 2012) -// - -package flowrate - -import ( - "bytes" - "testing" - "time" -) - -const ( - _50ms = 50 * time.Millisecond - _100ms = 100 * time.Millisecond - _200ms = 200 * time.Millisecond - _300ms = 300 * time.Millisecond - _400ms = 400 * time.Millisecond - _500ms = 500 * time.Millisecond -) - -func nextStatus(m *Monitor) Status { - samples := m.samples - for i := 0; i < 30; i++ { - if s := m.Status(); s.Samples != samples { - return s - } - time.Sleep(5 * time.Millisecond) - } - return m.Status() -} - -func TestReader(t *testing.T) { - in := make([]byte, 100) - for i := range in { - in[i] = byte(i) - } - b := make([]byte, 100) - r := NewReader(bytes.NewReader(in), 100) - start := time.Now() - - // Make sure r implements Limiter - _ = Limiter(r) - - // 1st read of 10 bytes is performed immediately - if n, err := r.Read(b); n != 10 || err != nil { - t.Fatalf("r.Read(b) expected 10 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt > _50ms { - t.Fatalf("r.Read(b) took too long (%v)", rt) - } - - // No new Reads allowed in the current sample - r.SetBlocking(false) - if n, err := r.Read(b); n != 0 || err != nil { - t.Fatalf("r.Read(b) expected 0 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt > _50ms { - t.Fatalf("r.Read(b) took too long (%v)", rt) - } - - status := [6]Status{0: r.Status()} // No samples in the first status - - // 2nd read of 10 bytes blocks until the next sample - r.SetBlocking(true) - if n, err := r.Read(b[10:]); n != 10 || err != nil { - t.Fatalf("r.Read(b[10:]) expected 10 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt < _100ms { - t.Fatalf("r.Read(b[10:]) returned ahead of time (%v)", rt) - } - - status[1] = r.Status() // 1st sample - status[2] = nextStatus(r.Monitor) // 2nd sample - status[3] = nextStatus(r.Monitor) // No activity for the 3rd sample - - if n := r.Done(); n != 20 { - t.Fatalf("r.Done() expected 20; got %v", n) - } - - status[4] = r.Status() - status[5] = nextStatus(r.Monitor) // Timeout - start = status[0].Start - - // Active, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, Start, Duration, Idle, TimeRem, Progress - want := []Status{ - {start, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true}, - {start, 10, 1, 100, 100, 100, 100, 0, _100ms, 0, 0, 0, true}, - {start, 20, 2, 100, 100, 100, 100, 0, _200ms, _100ms, 0, 0, true}, - {start, 20, 3, 0, 90, 67, 100, 0, _300ms, _200ms, 0, 0, true}, - {start, 20, 3, 0, 0, 67, 100, 0, _300ms, 0, 0, 0, false}, - {start, 20, 3, 0, 0, 67, 100, 0, _300ms, 0, 0, 0, false}, - } - for i, s := range status { - s := s - if !statusesAreEqual(&s, &want[i]) { - t.Errorf("r.Status(%v)\nexpected: %v\ngot : %v", i, want[i], s) - } - } - if !bytes.Equal(b[:20], in[:20]) { - t.Errorf("r.Read() input doesn't match output") - } -} - -func TestWriter(t *testing.T) { - b := make([]byte, 100) - for i := range b { - b[i] = byte(i) - } - w := NewWriter(&bytes.Buffer{}, 200) - start := time.Now() - - // Make sure w implements Limiter - _ = Limiter(w) - - // Non-blocking 20-byte write for the first sample returns ErrLimit - w.SetBlocking(false) - if n, err := w.Write(b); n != 20 || err != ErrLimit { - t.Fatalf("w.Write(b) expected 20 (ErrLimit); got %v (%v)", n, err) - } else if rt := time.Since(start); rt > _50ms { - t.Fatalf("w.Write(b) took too long (%v)", rt) - } - - // Blocking 80-byte write - w.SetBlocking(true) - if n, err := w.Write(b[20:]); n != 80 || err != nil { - t.Fatalf("w.Write(b[20:]) expected 80 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt < _300ms { - // Explanation for `rt < _300ms` (as opposed to `< _400ms`) - // - // |<-- start | | - // epochs: -----0ms|---100ms|---200ms|---300ms|---400ms - // sends: 20|20 |20 |20 |20# - // - // NOTE: The '#' symbol can thus happen before 400ms is up. - // Thus, we can only panic if rt < _300ms. - t.Fatalf("w.Write(b[20:]) returned ahead of time (%v)", rt) - } - - w.SetTransferSize(100) - status := []Status{w.Status(), nextStatus(w.Monitor)} - start = status[0].Start - - // Active, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, Start, Duration, Idle, TimeRem, Progress - want := []Status{ - {start, 80, 4, 200, 200, 200, 200, 20, _400ms, 0, _100ms, 80000, true}, - {start, 100, 5, 200, 200, 200, 200, 0, _500ms, _100ms, 0, 100000, true}, - } - - for i, s := range status { - s := s - if !statusesAreEqual(&s, &want[i]) { - t.Errorf("w.Status(%v)\nexpected: %v\ngot : %v\n", i, want[i], s) - } - } - if !bytes.Equal(b, w.Writer.(*bytes.Buffer).Bytes()) { - t.Errorf("w.Write() input doesn't match output") - } -} - -const maxDeviationForDuration = 50 * time.Millisecond -const maxDeviationForRate int64 = 50 - -// statusesAreEqual returns true if s1 is equal to s2. Equality here means -// general equality of fields except for the duration and rates, which can -// drift due to unpredictable delays (e.g. thread wakes up 25ms after -// `time.Sleep` has ended). -func statusesAreEqual(s1 *Status, s2 *Status) bool { - if s1.Active == s2.Active && - s1.Start == s2.Start && - durationsAreEqual(s1.Duration, s2.Duration, maxDeviationForDuration) && - s1.Idle == s2.Idle && - s1.Bytes == s2.Bytes && - s1.Samples == s2.Samples && - ratesAreEqual(s1.InstRate, s2.InstRate, maxDeviationForRate) && - ratesAreEqual(s1.CurRate, s2.CurRate, maxDeviationForRate) && - ratesAreEqual(s1.AvgRate, s2.AvgRate, maxDeviationForRate) && - ratesAreEqual(s1.PeakRate, s2.PeakRate, maxDeviationForRate) && - s1.BytesRem == s2.BytesRem && - durationsAreEqual(s1.TimeRem, s2.TimeRem, maxDeviationForDuration) && - s1.Progress == s2.Progress { - return true - } - return false -} - -func durationsAreEqual(d1 time.Duration, d2 time.Duration, maxDeviation time.Duration) bool { - return d2-d1 <= maxDeviation -} - -func ratesAreEqual(r1 int64, r2 int64, maxDeviation int64) bool { - sub := r1 - r2 - if sub < 0 { - sub = -sub - } - if sub <= maxDeviation { - return true - } - return false -} diff --git a/internal/libs/protoio/io_test.go b/internal/libs/protoio/io_test.go index 2f1437c68..a84b34c00 100644 --- a/internal/libs/protoio/io_test.go +++ b/internal/libs/protoio/io_test.go @@ -71,7 +71,7 @@ func iotest(writer protoio.WriteCloser, reader protoio.ReadCloser) error { return err } if n != len(bz)+visize { - return fmt.Errorf("WriteMsg() wrote %v bytes, expected %v", n, len(bz)+visize) // nolint + return fmt.Errorf("WriteMsg() wrote %v bytes, expected %v", n, len(bz)+visize) } lens[i] = n } diff --git a/internal/libs/queue/queue.go b/internal/libs/queue/queue.go new file mode 100644 index 000000000..7b4199504 --- /dev/null +++ b/internal/libs/queue/queue.go @@ -0,0 +1,232 @@ +// Package queue implements a dynamic FIFO queue with a fixed upper bound +// and a flexible quota mechanism to handle bursty load. +package queue + +import ( + "context" + "errors" + "sync" +) + +var ( + // ErrQueueFull is returned by the Add method of a queue when the queue has + // reached its hard capacity limit. + ErrQueueFull = errors.New("queue is full") + + // ErrNoCredit is returned by the Add method of a queue when the queue has + // exceeded its soft quota and there is insufficient burst credit. + ErrNoCredit = errors.New("insufficient burst credit") + + // ErrQueueClosed is returned by the Add method of a closed queue, and by + // the Wait method of a closed empty queue. + ErrQueueClosed = errors.New("queue is closed") + + // Sentinel errors reported by the New constructor. + errHardLimit = errors.New("hard limit must be > 0 and ≥ soft quota") + errBurstCredit = errors.New("burst credit must be non-negative") +) + +// A Queue is a limited-capacity FIFO queue of arbitrary data items. +// +// A queue has a soft quota and a hard limit on the number of items that may be +// contained in the queue. Adding items in excess of the hard limit will fail +// unconditionally. +// +// For items in excess of the soft quota, a credit system applies: Each queue +// maintains a burst credit score. Adding an item in excess of the soft quota +// costs 1 unit of burst credit. If there is not enough burst credit, the add +// will fail. +// +// The initial burst credit is assigned when the queue is constructed. Removing +// items from the queue adds additional credit if the resulting queue length is +// less than the current soft quota. Burst credit is capped by the hard limit. +// +// A Queue is safe for concurrent use by multiple goroutines. +type Queue struct { + mu sync.Mutex // protects the fields below + + softQuota int // adjusted dynamically (see Add, Remove) + hardLimit int // fixed for the lifespan of the queue + queueLen int // number of entries in the queue list + credit float64 // current burst credit + + closed bool + nempty *sync.Cond + back *entry + front *entry + + // The queue is singly-linked. Front points to the sentinel and back points + // to the newest entry. The oldest entry is front.link if it exists. +} + +// New constructs a new empty queue with the specified options. It reports an +// error if any of the option values are invalid. +func New(opts Options) (*Queue, error) { + if opts.HardLimit <= 0 || opts.HardLimit < opts.SoftQuota { + return nil, errHardLimit + } + if opts.BurstCredit < 0 { + return nil, errBurstCredit + } + if opts.SoftQuota <= 0 { + opts.SoftQuota = opts.HardLimit + } + if opts.BurstCredit == 0 { + opts.BurstCredit = float64(opts.SoftQuota) + } + sentinel := new(entry) + q := &Queue{ + softQuota: opts.SoftQuota, + hardLimit: opts.HardLimit, + credit: opts.BurstCredit, + back: sentinel, + front: sentinel, + } + q.nempty = sync.NewCond(&q.mu) + return q, nil +} + +// Add adds item to the back of the queue. It reports an error and does not +// enqueue the item if the queue is full or closed, or if it exceeds its soft +// quota and there is not enough burst credit. +func (q *Queue) Add(item interface{}) error { + q.mu.Lock() + defer q.mu.Unlock() + + if q.closed { + return ErrQueueClosed + } + + if q.queueLen >= q.softQuota { + if q.queueLen == q.hardLimit { + return ErrQueueFull + } else if q.credit < 1 { + return ErrNoCredit + } + + // Successfully exceeding the soft quota deducts burst credit and raises + // the soft quota. This has the effect of reducing the credit cap and the + // amount of credit given for removing items to better approximate the + // rate at which the consumer is servicing the queue. + q.credit-- + q.softQuota = q.queueLen + 1 + } + e := &entry{item: item} + q.back.link = e + q.back = e + q.queueLen++ + if q.queueLen == 1 { // was empty + q.nempty.Signal() + } + return nil +} + +// Remove removes and returns the frontmost (oldest) item in the queue and +// reports whether an item was available. If the queue is empty, Remove +// returns nil, false. +func (q *Queue) Remove() (interface{}, bool) { + q.mu.Lock() + defer q.mu.Unlock() + + if q.queueLen == 0 { + return nil, false + } + return q.popFront(), true +} + +// Wait blocks until q is non-empty or closed, and then returns the frontmost +// (oldest) item from the queue. If ctx ends before an item is available, Wait +// returns a nil value and a context error. If the queue is closed while it is +// still empty, Wait returns nil, ErrQueueClosed. +func (q *Queue) Wait(ctx context.Context) (interface{}, error) { + // If the context terminates, wake the waiter. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + go func() { <-ctx.Done(); q.nempty.Broadcast() }() + + q.mu.Lock() + defer q.mu.Unlock() + + for q.queueLen == 0 { + if q.closed { + return nil, ErrQueueClosed + } + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + q.nempty.Wait() + } + } + return q.popFront(), nil +} + +// Close closes the queue. After closing, any further Add calls will report an +// error, but items that were added to the queue prior to closing will still be +// available for Remove and Wait. Wait will report an error without blocking if +// it is called on a closed, empty queue. +func (q *Queue) Close() error { + q.mu.Lock() + defer q.mu.Unlock() + q.closed = true + q.nempty.Broadcast() + return nil +} + +// popFront removes the frontmost item of q and returns its value after +// updating quota and credit settings. +// +// Preconditions: The caller holds q.mu and q is not empty. +func (q *Queue) popFront() interface{} { + e := q.front.link + q.front.link = e.link + if e == q.back { + q.back = q.front + } + q.queueLen-- + + if q.queueLen < q.softQuota { + // Successfully removing items from the queue below half the soft quota + // lowers the soft quota. This has the effect of increasing the credit cap + // and the amount of credit given for removing items to better approximate + // the rate at which the consumer is servicing the queue. + if q.softQuota > 1 && q.queueLen < q.softQuota/2 { + q.softQuota-- + } + + // Give credit for being below the soft quota. Note we do this after + // adjusting the quota so the credit reflects the item we just removed. + q.credit += float64(q.softQuota-q.queueLen) / float64(q.softQuota) + if cap := float64(q.hardLimit - q.softQuota); q.credit > cap { + q.credit = cap + } + } + + return e.item +} + +// Options are the initial settings for a Queue. +type Options struct { + // The maximum number of items the queue will ever be permitted to hold. + // This value must be positive, and greater than or equal to SoftQuota. The + // hard limit is fixed and does not change as the queue is used. + // + // The hard limit should be chosen to exceed the largest burst size expected + // under normal operating conditions. + HardLimit int + + // The initial expected maximum number of items the queue should contain on + // an average workload. If this value is zero, it is initialized to the hard + // limit. The soft quota is adjusted from the initial value dynamically as + // the queue is used. + SoftQuota int + + // The initial burst credit score. This value must be greater than or equal + // to zero. If it is zero, the soft quota is used. + BurstCredit float64 +} + +type entry struct { + item interface{} + link *entry +} diff --git a/internal/libs/queue/queue_test.go b/internal/libs/queue/queue_test.go new file mode 100644 index 000000000..f339e07fa --- /dev/null +++ b/internal/libs/queue/queue_test.go @@ -0,0 +1,188 @@ +package queue + +import ( + "context" + "testing" + "time" +) + +func TestNew(t *testing.T) { + tests := []struct { + desc string + opts Options + want error + }{ + {"empty options", Options{}, errHardLimit}, + {"zero limit negative quota", Options{SoftQuota: -1}, errHardLimit}, + {"zero limit and quota", Options{SoftQuota: 0}, errHardLimit}, + {"zero limit", Options{SoftQuota: 1, HardLimit: 0}, errHardLimit}, + {"limit less than quota", Options{SoftQuota: 5, HardLimit: 3}, errHardLimit}, + {"negative credit", Options{SoftQuota: 1, HardLimit: 1, BurstCredit: -6}, errBurstCredit}, + {"valid default credit", Options{SoftQuota: 1, HardLimit: 2, BurstCredit: 0}, nil}, + {"valid explicit credit", Options{SoftQuota: 1, HardLimit: 5, BurstCredit: 10}, nil}, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + got, err := New(test.opts) + if err != test.want { + t.Errorf("New(%+v): got (%+v, %v), want err=%v", test.opts, got, err, test.want) + } + }) + } +} + +type testQueue struct { + t *testing.T + *Queue +} + +func (q testQueue) mustAdd(item string) { + q.t.Helper() + if err := q.Add(item); err != nil { + q.t.Errorf("Add(%q): unexpected error: %v", item, err) + } +} + +func (q testQueue) mustRemove(want string) { + q.t.Helper() + got, ok := q.Remove() + if !ok { + q.t.Error("Remove: queue is empty") + } else if got.(string) != want { + q.t.Errorf("Remove: got %q, want %q", got, want) + } +} + +func mustQueue(t *testing.T, opts Options) testQueue { + t.Helper() + + q, err := New(opts) + if err != nil { + t.Fatalf("New(%+v): unexpected error: %v", opts, err) + } + return testQueue{t: t, Queue: q} +} + +func TestHardLimit(t *testing.T) { + q := mustQueue(t, Options{SoftQuota: 1, HardLimit: 1}) + q.mustAdd("foo") + if err := q.Add("bar"); err != ErrQueueFull { + t.Errorf("Add: got err=%v, want %v", err, ErrQueueFull) + } +} + +func TestSoftQuota(t *testing.T) { + q := mustQueue(t, Options{SoftQuota: 1, HardLimit: 4}) + q.mustAdd("foo") + q.mustAdd("bar") + if err := q.Add("baz"); err != ErrNoCredit { + t.Errorf("Add: got err=%v, want %v", err, ErrNoCredit) + } +} + +func TestBurstCredit(t *testing.T) { + q := mustQueue(t, Options{SoftQuota: 2, HardLimit: 5}) + q.mustAdd("foo") + q.mustAdd("bar") + + // We should still have all our initial credit. + if q.credit < 2 { + t.Errorf("Wrong credit: got %f, want ≥ 2", q.credit) + } + + // Removing an item below soft quota should increase our credit. + q.mustRemove("foo") + if q.credit <= 2 { + t.Errorf("wrong credit: got %f, want > 2", q.credit) + } + + // Credit should be capped by the hard limit. + q.mustRemove("bar") + q.mustAdd("baz") + q.mustRemove("baz") + if cap := float64(q.hardLimit - q.softQuota); q.credit > cap { + t.Errorf("Wrong credit: got %f, want ≤ %f", q.credit, cap) + } +} + +func TestClose(t *testing.T) { + q := mustQueue(t, Options{SoftQuota: 2, HardLimit: 10}) + q.mustAdd("alpha") + q.mustAdd("bravo") + q.mustAdd("charlie") + q.Close() + + // After closing the queue, subsequent writes should fail. + if err := q.Add("foxtrot"); err == nil { + t.Error("Add should have failed after Close") + } + + // However, the remaining contents of the queue should still work. + q.mustRemove("alpha") + q.mustRemove("bravo") + q.mustRemove("charlie") +} + +func TestWait(t *testing.T) { + q := mustQueue(t, Options{SoftQuota: 2, HardLimit: 2}) + + // A wait on an empty queue should time out. + t.Run("WaitTimeout", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + got, err := q.Wait(ctx) + if err == nil { + t.Errorf("Wait: got %v, want error", got) + } else { + t.Logf("Wait correctly failed: %v", err) + } + }) + + // A wait on a non-empty queue should report an item. + t.Run("WaitNonEmpty", func(t *testing.T) { + const input = "figgy pudding" + q.mustAdd(input) + + got, err := q.Wait(context.Background()) + if err != nil { + t.Errorf("Wait: unexpected error: %v", err) + } else if got != input { + t.Errorf("Wait: got %q, want %q", got, input) + } + }) + + // Wait should block until an item arrives. + t.Run("WaitOnEmpty", func(t *testing.T) { + const input = "fleet footed kittens" + + done := make(chan struct{}) + go func() { + defer close(done) + got, err := q.Wait(context.Background()) + if err != nil { + t.Errorf("Wait: unexpected error: %v", err) + } else if got != input { + t.Errorf("Wait: got %q, want %q", got, input) + } + }() + + q.mustAdd(input) + <-done + }) + + // Closing the queue unblocks a wait. + t.Run("UnblockOnClose", func(t *testing.T) { + done := make(chan struct{}) + go func() { + defer close(done) + got, err := q.Wait(context.Background()) + if err != ErrQueueClosed { + t.Errorf("Wait: got (%v, %v), want %v", got, err, ErrQueueClosed) + } + }() + + q.Close() + <-done + }) +} diff --git a/internal/libs/sync/deadlock.go b/internal/libs/sync/deadlock.go index 637d6fbb1..21b5130ba 100644 --- a/internal/libs/sync/deadlock.go +++ b/internal/libs/sync/deadlock.go @@ -1,3 +1,4 @@ +//go:build deadlock // +build deadlock package sync diff --git a/internal/libs/sync/sync.go b/internal/libs/sync/sync.go index a0880e7de..c6e7101c6 100644 --- a/internal/libs/sync/sync.go +++ b/internal/libs/sync/sync.go @@ -1,3 +1,4 @@ +//go:build !deadlock // +build !deadlock package sync diff --git a/internal/libs/tempfile/tempfile_test.go b/internal/libs/tempfile/tempfile_test.go index 5650fe720..212525d44 100644 --- a/internal/libs/tempfile/tempfile_test.go +++ b/internal/libs/tempfile/tempfile_test.go @@ -5,10 +5,9 @@ package tempfile import ( "bytes" "fmt" - "io/ioutil" mrand "math/rand" "os" - testing "testing" + "testing" "github.com/stretchr/testify/require" @@ -22,13 +21,13 @@ func TestWriteFileAtomic(t *testing.T) { perm os.FileMode = 0600 ) - f, err := ioutil.TempFile("/tmp", "write-atomic-test-") + f, err := os.CreateTemp("/tmp", "write-atomic-test-") if err != nil { t.Fatal(err) } defer os.Remove(f.Name()) - if err = ioutil.WriteFile(f.Name(), old, 0600); err != nil { + if err = os.WriteFile(f.Name(), old, 0600); err != nil { t.Fatal(err) } @@ -36,7 +35,7 @@ func TestWriteFileAtomic(t *testing.T) { t.Fatal(err) } - rData, err := ioutil.ReadFile(f.Name()) + rData, err := os.ReadFile(f.Name()) if err != nil { t.Fatal(err) } @@ -81,11 +80,11 @@ func TestWriteFileAtomicDuplicateFile(t *testing.T) { err = WriteFileAtomic(fileToWrite, []byte(expectedString), 0777) require.NoError(t, err) // Check that the first atomic file was untouched - firstAtomicFileBytes, err := ioutil.ReadFile(fname) + firstAtomicFileBytes, err := os.ReadFile(fname) require.NoError(t, err, "Error reading first atomic file") require.Equal(t, []byte(testString), firstAtomicFileBytes, "First atomic file was overwritten") // Check that the resultant file is correct - resultantFileBytes, err := ioutil.ReadFile(fileToWrite) + resultantFileBytes, err := os.ReadFile(fileToWrite) require.NoError(t, err, "Error reading resultant file") require.Equal(t, []byte(expectedString), resultantFileBytes, "Written file had incorrect bytes") @@ -132,14 +131,14 @@ func TestWriteFileAtomicManyDuplicates(t *testing.T) { for i := 0; i < atomicWriteFileMaxNumConflicts+2; i++ { fileRand := randWriteFileSuffix() fname := "/tmp/" + atomicWriteFilePrefix + fileRand - firstAtomicFileBytes, err := ioutil.ReadFile(fname) + firstAtomicFileBytes, err := os.ReadFile(fname) require.Nil(t, err, "Error reading first atomic file") require.Equal(t, []byte(fmt.Sprintf(testString, i)), firstAtomicFileBytes, "atomic write file %d was overwritten", i) } // Check that the resultant file is correct - resultantFileBytes, err := ioutil.ReadFile(fileToWrite) + resultantFileBytes, err := os.ReadFile(fileToWrite) require.Nil(t, err, "Error reading resultant file") require.Equal(t, []byte(expectedString), resultantFileBytes, "Written file had incorrect bytes") } diff --git a/internal/mempool/cache.go b/internal/mempool/cache.go index 43174f106..3cd45d2bc 100644 --- a/internal/mempool/cache.go +++ b/internal/mempool/cache.go @@ -31,14 +31,14 @@ var _ TxCache = (*LRUTxCache)(nil) type LRUTxCache struct { mtx tmsync.Mutex size int - cacheMap map[[TxKeySize]byte]*list.Element + cacheMap map[types.TxKey]*list.Element list *list.List } func NewLRUTxCache(cacheSize int) *LRUTxCache { return &LRUTxCache{ size: cacheSize, - cacheMap: make(map[[TxKeySize]byte]*list.Element, cacheSize), + cacheMap: make(map[types.TxKey]*list.Element, cacheSize), list: list.New(), } } @@ -53,7 +53,7 @@ func (c *LRUTxCache) Reset() { c.mtx.Lock() defer c.mtx.Unlock() - c.cacheMap = make(map[[TxKeySize]byte]*list.Element, c.size) + c.cacheMap = make(map[types.TxKey]*list.Element, c.size) c.list.Init() } @@ -61,7 +61,7 @@ func (c *LRUTxCache) Push(tx types.Tx) bool { c.mtx.Lock() defer c.mtx.Unlock() - key := TxKey(tx) + key := tx.Key() moved, ok := c.cacheMap[key] if ok { @@ -72,7 +72,7 @@ func (c *LRUTxCache) Push(tx types.Tx) bool { if c.list.Len() >= c.size { front := c.list.Front() if front != nil { - frontKey := front.Value.([TxKeySize]byte) + frontKey := front.Value.(types.TxKey) delete(c.cacheMap, frontKey) c.list.Remove(front) } @@ -88,7 +88,7 @@ func (c *LRUTxCache) Remove(tx types.Tx) { c.mtx.Lock() defer c.mtx.Unlock() - key := TxKey(tx) + key := tx.Key() e := c.cacheMap[key] delete(c.cacheMap, key) diff --git a/internal/mempool/ids.go b/internal/mempool/ids.go index 49a9ac607..656f5b74c 100644 --- a/internal/mempool/ids.go +++ b/internal/mempool/ids.go @@ -7,17 +7,15 @@ import ( "github.com/tendermint/tendermint/types" ) -// nolint: golint -// TODO: Rename type. -type MempoolIDs struct { +type IDs struct { mtx tmsync.RWMutex peerMap map[types.NodeID]uint16 nextID uint16 // assumes that a node will never have over 65536 active peers activeIDs map[uint16]struct{} // used to check if a given peerID key is used } -func NewMempoolIDs() *MempoolIDs { - return &MempoolIDs{ +func NewMempoolIDs() *IDs { + return &IDs{ peerMap: make(map[types.NodeID]uint16), // reserve UnknownPeerID for mempoolReactor.BroadcastTx @@ -28,7 +26,7 @@ func NewMempoolIDs() *MempoolIDs { // ReserveForPeer searches for the next unused ID and assigns it to the provided // peer. -func (ids *MempoolIDs) ReserveForPeer(peerID types.NodeID) { +func (ids *IDs) ReserveForPeer(peerID types.NodeID) { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -38,7 +36,7 @@ func (ids *MempoolIDs) ReserveForPeer(peerID types.NodeID) { } // Reclaim returns the ID reserved for the peer back to unused pool. -func (ids *MempoolIDs) Reclaim(peerID types.NodeID) { +func (ids *IDs) Reclaim(peerID types.NodeID) { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -50,7 +48,7 @@ func (ids *MempoolIDs) Reclaim(peerID types.NodeID) { } // GetForPeer returns an ID reserved for the peer. -func (ids *MempoolIDs) GetForPeer(peerID types.NodeID) uint16 { +func (ids *IDs) GetForPeer(peerID types.NodeID) uint16 { ids.mtx.RLock() defer ids.mtx.RUnlock() @@ -59,7 +57,7 @@ func (ids *MempoolIDs) GetForPeer(peerID types.NodeID) uint16 { // nextPeerID returns the next unused peer ID to use. We assume that the mutex // is already held. -func (ids *MempoolIDs) nextPeerID() uint16 { +func (ids *IDs) nextPeerID() uint16 { if len(ids.activeIDs) == MaxActiveIDs { panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", MaxActiveIDs)) } diff --git a/internal/mempool/mempool.go b/internal/mempool/mempool.go index d679b3506..ec7ef2e15 100644 --- a/internal/mempool/mempool.go +++ b/internal/mempool/mempool.go @@ -1,139 +1,869 @@ package mempool import ( + "bytes" "context" + "errors" "fmt" - "math" + "reflect" + "sync/atomic" + "time" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/libs/clist" + tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/internal/proxy" + "github.com/tendermint/tendermint/libs/log" + tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/types" ) -const ( - MempoolChannel = p2p.ChannelID(0x30) +var _ Mempool = (*TxMempool)(nil) - // PeerCatchupSleepIntervalMS defines how much time to sleep if a peer is behind - PeerCatchupSleepIntervalMS = 100 +// TxMempoolOption sets an optional parameter on the TxMempool. +type TxMempoolOption func(*TxMempool) - // UnknownPeerID is the peer ID to use when running CheckTx when there is - // no peer (e.g. RPC) - UnknownPeerID uint16 = 0 +// TxMempool defines a prioritized mempool data structure used by the v1 mempool +// reactor. It keeps a thread-safe priority queue of transactions that is used +// when a block proposer constructs a block and a thread-safe linked-list that +// is used to gossip transactions to peers in a FIFO manner. +type TxMempool struct { + logger log.Logger + metrics *Metrics + config *config.MempoolConfig + proxyAppConn proxy.AppConnMempool - MaxActiveIDs = math.MaxUint16 -) + // txsAvailable fires once for each height when the mempool is not empty + txsAvailable chan struct{} + notifiedTxsAvailable bool -// Mempool defines the mempool interface. + // height defines the last block height process during Update() + height int64 + + // sizeBytes defines the total size of the mempool (sum of all tx bytes) + sizeBytes int64 + + // cache defines a fixed-size cache of already seen transactions as this + // reduces pressure on the proxyApp. + cache TxCache + + // txStore defines the main storage of valid transactions. Indexes are built + // on top of this store. + txStore *TxStore + + // gossipIndex defines the gossiping index of valid transactions via a + // thread-safe linked-list. We also use the gossip index as a cursor for + // rechecking transactions already in the mempool. + gossipIndex *clist.CList + + // recheckCursor and recheckEnd are used as cursors based on the gossip index + // to recheck transactions that are already in the mempool. Iteration is not + // thread-safe and transaction may be mutated in serial order. + // + // XXX/TODO: It might be somewhat of a codesmell to use the gossip index for + // iterator and cursor management when rechecking transactions. If the gossip + // index changes or is removed in a future refactor, this will have to be + // refactored. Instead, we should consider just keeping a slice of a snapshot + // of the mempool's current transactions during Update and an integer cursor + // into that slice. This, however, requires additional O(n) space complexity. + recheckCursor *clist.CElement // next expected response + recheckEnd *clist.CElement // re-checking stops here + + // priorityIndex defines the priority index of valid transactions via a + // thread-safe priority queue. + priorityIndex *TxPriorityQueue + + // heightIndex defines a height-based, in ascending order, transaction index. + // i.e. older transactions are first. + heightIndex *WrappedTxList + + // timestampIndex defines a timestamp-based, in ascending order, transaction + // index. i.e. older transactions are first. + timestampIndex *WrappedTxList + + // A read/write lock is used to safe guard updates, insertions and deletions + // from the mempool. A read-lock is implicitly acquired when executing CheckTx, + // however, a caller must explicitly grab a write-lock via Lock when updating + // the mempool via Update(). + mtx tmsync.RWMutex + preCheck PreCheckFunc + postCheck PostCheckFunc +} + +func NewTxMempool( + logger log.Logger, + cfg *config.MempoolConfig, + proxyAppConn proxy.AppConnMempool, + height int64, + options ...TxMempoolOption, +) *TxMempool { + + txmp := &TxMempool{ + logger: logger, + config: cfg, + proxyAppConn: proxyAppConn, + height: height, + cache: NopTxCache{}, + metrics: NopMetrics(), + txStore: NewTxStore(), + gossipIndex: clist.New(), + priorityIndex: NewTxPriorityQueue(), + heightIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { + return wtx1.height >= wtx2.height + }), + timestampIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { + return wtx1.timestamp.After(wtx2.timestamp) || wtx1.timestamp.Equal(wtx2.timestamp) + }), + } + + if cfg.CacheSize > 0 { + txmp.cache = NewLRUTxCache(cfg.CacheSize) + } + + proxyAppConn.SetResponseCallback(txmp.defaultTxCallback) + + for _, opt := range options { + opt(txmp) + } + + return txmp +} + +// WithPreCheck sets a filter for the mempool to reject a transaction if f(tx) +// returns an error. This is executed before CheckTx. It only applies to the +// first created block. After that, Update() overwrites the existing value. +func WithPreCheck(f PreCheckFunc) TxMempoolOption { + return func(txmp *TxMempool) { txmp.preCheck = f } +} + +// WithPostCheck sets a filter for the mempool to reject a transaction if +// f(tx, resp) returns an error. This is executed after CheckTx. It only applies +// to the first created block. After that, Update overwrites the existing value. +func WithPostCheck(f PostCheckFunc) TxMempoolOption { + return func(txmp *TxMempool) { txmp.postCheck = f } +} + +// WithMetrics sets the mempool's metrics collector. +func WithMetrics(metrics *Metrics) TxMempoolOption { + return func(txmp *TxMempool) { txmp.metrics = metrics } +} + +// Lock obtains a write-lock on the mempool. A caller must be sure to explicitly +// release the lock when finished. +func (txmp *TxMempool) Lock() { + txmp.mtx.Lock() +} + +// Unlock releases a write-lock on the mempool. +func (txmp *TxMempool) Unlock() { + txmp.mtx.Unlock() +} + +// Size returns the number of valid transactions in the mempool. It is +// thread-safe. +func (txmp *TxMempool) Size() int { + return txmp.txStore.Size() +} + +// SizeBytes return the total sum in bytes of all the valid transactions in the +// mempool. It is thread-safe. +func (txmp *TxMempool) SizeBytes() int64 { + return atomic.LoadInt64(&txmp.sizeBytes) +} + +// FlushAppConn executes FlushSync on the mempool's proxyAppConn. // -// Updates to the mempool need to be synchronized with committing a block so -// applications can reset their transient state on Commit. -type Mempool interface { - // CheckTx executes a new transaction against the application to determine - // its validity and whether it should be added to the mempool. - CheckTx(ctx context.Context, tx types.Tx, callback func(*abci.Response), txInfo TxInfo) error - - // ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes - // bytes total with the condition that the total gasWanted must be less than - // maxGas. - // - // If both maxes are negative, there is no cap on the size of all returned - // transactions (~ all available transactions). - ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs - - // ReapMaxTxs reaps up to max transactions from the mempool. If max is - // negative, there is no cap on the size of all returned transactions - // (~ all available transactions). - ReapMaxTxs(max int) types.Txs - - // Lock locks the mempool. The consensus must be able to hold lock to safely - // update. - Lock() - - // Unlock unlocks the mempool. - Unlock() - - // Update informs the mempool that the given txs were committed and can be - // discarded. - // - // NOTE: - // 1. This should be called *after* block is committed by consensus. - // 2. Lock/Unlock must be managed by the caller. - Update( - blockHeight int64, - blockTxs types.Txs, - deliverTxResponses []*abci.ResponseDeliverTx, - newPreFn PreCheckFunc, - newPostFn PostCheckFunc, - ) error - - // FlushAppConn flushes the mempool connection to ensure async callback calls - // are done, e.g. from CheckTx. - // - // NOTE: - // 1. Lock/Unlock must be managed by caller. - FlushAppConn() error - - // Flush removes all transactions from the mempool and caches. - Flush() - - // TxsAvailable returns a channel which fires once for every height, and only - // when transactions are available in the mempool. - // - // NOTE: - // 1. The returned channel may be nil if EnableTxsAvailable was not called. - TxsAvailable() <-chan struct{} - - // EnableTxsAvailable initializes the TxsAvailable channel, ensuring it will - // trigger once every height when transactions are available. - EnableTxsAvailable() - - // Size returns the number of transactions in the mempool. - Size() int - - // SizeBytes returns the total size of all txs in the mempool. - SizeBytes() int64 +// NOTE: The caller must obtain a write-lock prior to execution. +func (txmp *TxMempool) FlushAppConn() error { + return txmp.proxyAppConn.FlushSync(context.Background()) } -// PreCheckFunc is an optional filter executed before CheckTx and rejects -// transaction if false is returned. An example would be to ensure that a -// transaction doesn't exceeded the block size. -type PreCheckFunc func(types.Tx) error +// WaitForNextTx returns a blocking channel that will be closed when the next +// valid transaction is available to gossip. It is thread-safe. +func (txmp *TxMempool) WaitForNextTx() <-chan struct{} { + return txmp.gossipIndex.WaitChan() +} -// PostCheckFunc is an optional filter executed after CheckTx and rejects -// transaction if false is returned. An example would be to ensure a -// transaction doesn't require more gas than available for the block. -type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error +// NextGossipTx returns the next valid transaction to gossip. A caller must wait +// for WaitForNextTx to signal a transaction is available to gossip first. It is +// thread-safe. +func (txmp *TxMempool) NextGossipTx() *clist.CElement { + return txmp.gossipIndex.Front() +} -// PreCheckMaxBytes checks that the size of the transaction is smaller or equal -// to the expected maxBytes. -func PreCheckMaxBytes(maxBytes int64) PreCheckFunc { - return func(tx types.Tx) error { - txSize := types.ComputeProtoSizeForTxs([]types.Tx{tx}) +// EnableTxsAvailable enables the mempool to trigger events when transactions +// are available on a block by block basis. +func (txmp *TxMempool) EnableTxsAvailable() { + txmp.mtx.Lock() + defer txmp.mtx.Unlock() - if txSize > maxBytes { - return fmt.Errorf("tx size is too big: %d, max: %d", txSize, maxBytes) + txmp.txsAvailable = make(chan struct{}, 1) +} + +// TxsAvailable returns a channel which fires once for every height, and only +// when transactions are available in the mempool. It is thread-safe. +func (txmp *TxMempool) TxsAvailable() <-chan struct{} { + return txmp.txsAvailable +} + +// CheckTx executes the ABCI CheckTx method for a given transaction. It acquires +// a read-lock attempts to execute the application's CheckTx ABCI method via +// CheckTxAsync. We return an error if any of the following happen: +// +// - The CheckTxAsync execution fails. +// - The transaction already exists in the cache and we've already received the +// transaction from the peer. Otherwise, if it solely exists in the cache, we +// return nil. +// - The transaction size exceeds the maximum transaction size as defined by the +// configuration provided to the mempool. +// - The transaction fails Pre-Check (if it is defined). +// - The proxyAppConn fails, e.g. the buffer is full. +// +// If the mempool is full, we still execute CheckTx and attempt to find a lower +// priority transaction to evict. If such a transaction exists, we remove the +// lower priority transaction and add the new one with higher priority. +// +// NOTE: +// - The applications' CheckTx implementation may panic. +// - The caller is not to explicitly require any locks for executing CheckTx. +func (txmp *TxMempool) CheckTx( + ctx context.Context, + tx types.Tx, + cb func(*abci.Response), + txInfo TxInfo, +) error { + if ctx == nil { + ctx = context.TODO() + } + + txmp.mtx.RLock() + defer txmp.mtx.RUnlock() + + if txSize := len(tx); txSize > txmp.config.MaxTxBytes { + return types.ErrTxTooLarge{ + Max: txmp.config.MaxTxBytes, + Actual: txSize, + } + } + + if txmp.preCheck != nil { + if err := txmp.preCheck(tx); err != nil { + return types.ErrPreCheck{Reason: err} + } + } + + if err := txmp.proxyAppConn.Error(); err != nil { + return err + } + + txHash := tx.Key() + + // We add the transaction to the mempool's cache and if the transaction already + // exists, i.e. false is returned, then we check if we've seen this transaction + // from the same sender and error if we have. Otherwise, we return nil. + if !txmp.cache.Push(tx) { + wtx, ok := txmp.txStore.GetOrSetPeerByTxHash(txHash, txInfo.SenderID) + if wtx != nil && ok { + // We already have the transaction stored and the we've already seen this + // transaction from txInfo.SenderID. + return types.ErrTxInCache } + txmp.logger.Debug("tx exists already in cache", "tx_hash", tx.Hash()) return nil } + + reqRes, err := txmp.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{Tx: tx}) + if err != nil { + txmp.cache.Remove(tx) + return err + } + + reqRes.SetCallback(func(res *abci.Response) { + if txmp.recheckCursor != nil { + panic("recheck cursor is non-nil in CheckTx callback") + } + + wtx := &WrappedTx{ + tx: tx, + hash: txHash, + timestamp: time.Now().UTC(), + height: txmp.height, + } + txmp.initTxCallback(wtx, res, txInfo) + + if cb != nil { + cb(res) + } + }) + + return nil +} + +func (txmp *TxMempool) RemoveTxByKey(txKey types.TxKey) error { + txmp.Lock() + defer txmp.Unlock() + + // remove the committed transaction from the transaction store and indexes + if wtx := txmp.txStore.GetTxByHash(txKey); wtx != nil { + txmp.removeTx(wtx, false) + return nil + } + + return errors.New("transaction not found") +} + +// Flush empties the mempool. It acquires a read-lock, fetches all the +// transactions currently in the transaction store and removes each transaction +// from the store and all indexes and finally resets the cache. +// +// NOTE: +// - Flushing the mempool may leave the mempool in an inconsistent state. +func (txmp *TxMempool) Flush() { + txmp.mtx.RLock() + defer txmp.mtx.RUnlock() + + txmp.heightIndex.Reset() + txmp.timestampIndex.Reset() + + for _, wtx := range txmp.txStore.GetAllTxs() { + txmp.removeTx(wtx, false) + } + + atomic.SwapInt64(&txmp.sizeBytes, 0) + txmp.cache.Reset() +} + +// ReapMaxBytesMaxGas returns a list of transactions within the provided size +// and gas constraints. Transaction are retrieved in priority order. +// +// NOTE: +// - Transactions returned are not removed from the mempool transaction +// store or indexes. +func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { + txmp.mtx.RLock() + defer txmp.mtx.RUnlock() + + var ( + totalGas int64 + totalSize int64 + ) + + // wTxs contains a list of *WrappedTx retrieved from the priority queue that + // need to be re-enqueued prior to returning. + wTxs := make([]*WrappedTx, 0, txmp.priorityIndex.NumTxs()) + defer func() { + for _, wtx := range wTxs { + txmp.priorityIndex.PushTx(wtx) + } + }() + + txs := make([]types.Tx, 0, txmp.priorityIndex.NumTxs()) + for txmp.priorityIndex.NumTxs() > 0 { + wtx := txmp.priorityIndex.PopTx() + txs = append(txs, wtx.tx) + wTxs = append(wTxs, wtx) + size := types.ComputeProtoSizeForTxs([]types.Tx{wtx.tx}) + + // Ensure we have capacity for the transaction with respect to the + // transaction size. + if maxBytes > -1 && totalSize+size > maxBytes { + return txs[:len(txs)-1] + } + + totalSize += size + + // ensure we have capacity for the transaction with respect to total gas + gas := totalGas + wtx.gasWanted + if maxGas > -1 && gas > maxGas { + return txs[:len(txs)-1] + } + + totalGas = gas + } + + return txs +} + +// ReapMaxTxs returns a list of transactions within the provided number of +// transactions bound. Transaction are retrieved in priority order. +// +// NOTE: +// - Transactions returned are not removed from the mempool transaction +// store or indexes. +func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs { + txmp.mtx.RLock() + defer txmp.mtx.RUnlock() + + numTxs := txmp.priorityIndex.NumTxs() + if max < 0 { + max = numTxs + } + + cap := tmmath.MinInt(numTxs, max) + + // wTxs contains a list of *WrappedTx retrieved from the priority queue that + // need to be re-enqueued prior to returning. + wTxs := make([]*WrappedTx, 0, cap) + txs := make([]types.Tx, 0, cap) + for txmp.priorityIndex.NumTxs() > 0 && len(txs) < max { + wtx := txmp.priorityIndex.PopTx() + txs = append(txs, wtx.tx) + wTxs = append(wTxs, wtx) + } + for _, wtx := range wTxs { + txmp.priorityIndex.PushTx(wtx) + } + return txs +} + +// Update iterates over all the transactions provided by the block producer, +// removes them from the cache (if applicable), and removes +// the transactions from the main transaction store and associated indexes. +// If there are transactions remaining in the mempool, we initiate a +// re-CheckTx for them (if applicable), otherwise, we notify the caller more +// transactions are available. +// +// NOTE: +// - The caller must explicitly acquire a write-lock. +func (txmp *TxMempool) Update( + blockHeight int64, + blockTxs types.Txs, + deliverTxResponses []*abci.ResponseDeliverTx, + newPreFn PreCheckFunc, + newPostFn PostCheckFunc, +) error { + + txmp.height = blockHeight + txmp.notifiedTxsAvailable = false + + if newPreFn != nil { + txmp.preCheck = newPreFn + } + if newPostFn != nil { + txmp.postCheck = newPostFn + } + + for i, tx := range blockTxs { + if deliverTxResponses[i].Code == abci.CodeTypeOK { + // add the valid committed transaction to the cache (if missing) + _ = txmp.cache.Push(tx) + } else if !txmp.config.KeepInvalidTxsInCache { + // allow invalid transactions to be re-submitted + txmp.cache.Remove(tx) + } + + // remove the committed transaction from the transaction store and indexes + if wtx := txmp.txStore.GetTxByHash(tx.Key()); wtx != nil { + txmp.removeTx(wtx, false) + } + } + + txmp.purgeExpiredTxs(blockHeight) + + // If there any uncommitted transactions left in the mempool, we either + // initiate re-CheckTx per remaining transaction or notify that remaining + // transactions are left. + if txmp.Size() > 0 { + if txmp.config.Recheck { + txmp.logger.Debug( + "executing re-CheckTx for all remaining transactions", + "num_txs", txmp.Size(), + "height", blockHeight, + ) + txmp.updateReCheckTxs() + } else { + txmp.notifyTxsAvailable() + } + } + + txmp.metrics.Size.Set(float64(txmp.Size())) + return nil +} + +// initTxCallback is the callback invoked for a new unique transaction after CheckTx +// has been executed by the ABCI application for the first time on that transaction. +// CheckTx can be called again for the same transaction later when re-checking; +// however, this callback will not be called. +// +// initTxCallback runs after the ABCI application executes CheckTx. +// It runs the postCheck hook if one is defined on the mempool. +// If the CheckTx response response code is not OK, or if the postCheck hook +// reports an error, the transaction is rejected. Otherwise, we attempt to insert +// the transaction into the mempool. +// +// When inserting a transaction, we first check if there is sufficient capacity. +// If there is, the transaction is added to the txStore and all indexes. +// Otherwise, if the mempool is full, we attempt to find a lower priority transaction +// to evict in place of the new incoming transaction. If no such transaction exists, +// the new incoming transaction is rejected. +// +// NOTE: +// - An explicit lock is NOT required. +func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.Response, txInfo TxInfo) { + checkTxRes, ok := res.Value.(*abci.Response_CheckTx) + if !ok { + return + } + + var err error + if txmp.postCheck != nil { + err = txmp.postCheck(wtx.tx, checkTxRes.CheckTx) + } + + if err != nil || checkTxRes.CheckTx.Code != abci.CodeTypeOK { + // ignore bad transactions + txmp.logger.Info( + "rejected bad transaction", + "priority", wtx.priority, + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "peer_id", txInfo.SenderNodeID, + "code", checkTxRes.CheckTx.Code, + "post_check_err", err, + ) + + txmp.metrics.FailedTxs.Add(1) + + if !txmp.config.KeepInvalidTxsInCache { + txmp.cache.Remove(wtx.tx) + } + if err != nil { + checkTxRes.CheckTx.MempoolError = err.Error() + } + return + } + + sender := checkTxRes.CheckTx.Sender + priority := checkTxRes.CheckTx.Priority + + if len(sender) > 0 { + if wtx := txmp.txStore.GetTxBySender(sender); wtx != nil { + txmp.logger.Error( + "rejected incoming good transaction; tx already exists for sender", + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "sender", sender, + ) + txmp.metrics.RejectedTxs.Add(1) + return + } + } + + if err := txmp.canAddTx(wtx); err != nil { + evictTxs := txmp.priorityIndex.GetEvictableTxs( + priority, + int64(wtx.Size()), + txmp.SizeBytes(), + txmp.config.MaxTxsBytes, + ) + if len(evictTxs) == 0 { + // No room for the new incoming transaction so we just remove it from + // the cache. + txmp.cache.Remove(wtx.tx) + txmp.logger.Error( + "rejected incoming good transaction; mempool full", + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "err", err.Error(), + ) + txmp.metrics.RejectedTxs.Add(1) + return + } + + // evict an existing transaction(s) + // + // NOTE: + // - The transaction, toEvict, can be removed while a concurrent + // reCheckTx callback is being executed for the same transaction. + for _, toEvict := range evictTxs { + txmp.removeTx(toEvict, true) + txmp.logger.Debug( + "evicted existing good transaction; mempool full", + "old_tx", fmt.Sprintf("%X", toEvict.tx.Hash()), + "old_priority", toEvict.priority, + "new_tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "new_priority", wtx.priority, + ) + txmp.metrics.EvictedTxs.Add(1) + } + } + + wtx.gasWanted = checkTxRes.CheckTx.GasWanted + wtx.priority = priority + wtx.sender = sender + wtx.peers = map[uint16]struct{}{ + txInfo.SenderID: {}, + } + + txmp.metrics.TxSizeBytes.Observe(float64(wtx.Size())) + txmp.metrics.Size.Set(float64(txmp.Size())) + + txmp.insertTx(wtx) + txmp.logger.Debug( + "inserted good transaction", + "priority", wtx.priority, + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "height", txmp.height, + "num_txs", txmp.Size(), + ) + txmp.notifyTxsAvailable() +} + +// defaultTxCallback is the CheckTx application callback used when a transaction +// is being re-checked (if re-checking is enabled). The caller must hold a mempool +// write-lock (via Lock()) and when executing Update(), if the mempool is non-empty +// and Recheck is enabled, then all remaining transactions will be rechecked via +// CheckTxAsync. The order transactions are rechecked must be the same as the +// order in which this callback is called. +func (txmp *TxMempool) defaultTxCallback(req *abci.Request, res *abci.Response) { + if txmp.recheckCursor == nil { + return + } + + txmp.metrics.RecheckTimes.Add(1) + + checkTxRes, ok := res.Value.(*abci.Response_CheckTx) + if !ok { + txmp.logger.Error("received incorrect type in mempool callback", + "expected", reflect.TypeOf(&abci.Response_CheckTx{}).Name(), + "got", reflect.TypeOf(res.Value).Name(), + ) + return + } + tx := req.GetCheckTx().Tx + wtx := txmp.recheckCursor.Value.(*WrappedTx) + + // Search through the remaining list of tx to recheck for a transaction that matches + // the one we received from the ABCI application. + for { + if bytes.Equal(tx, wtx.tx) { + // We've found a tx in the recheck list that matches the tx that we + // received from the ABCI application. + // Break, and use this transaction for further checks. + break + } + + txmp.logger.Error( + "re-CheckTx transaction mismatch", + "got", wtx.tx.Hash(), + "expected", types.Tx(tx).Key(), + ) + + if txmp.recheckCursor == txmp.recheckEnd { + // we reached the end of the recheckTx list without finding a tx + // matching the one we received from the ABCI application. + // Return without processing any tx. + txmp.recheckCursor = nil + return + } + + txmp.recheckCursor = txmp.recheckCursor.Next() + wtx = txmp.recheckCursor.Value.(*WrappedTx) + } + + // Only evaluate transactions that have not been removed. This can happen + // if an existing transaction is evicted during CheckTx and while this + // callback is being executed for the same evicted transaction. + if !txmp.txStore.IsTxRemoved(wtx.hash) { + var err error + if txmp.postCheck != nil { + err = txmp.postCheck(tx, checkTxRes.CheckTx) + } + + if checkTxRes.CheckTx.Code == abci.CodeTypeOK && err == nil { + wtx.priority = checkTxRes.CheckTx.Priority + } else { + txmp.logger.Debug( + "existing transaction no longer valid; failed re-CheckTx callback", + "priority", wtx.priority, + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "err", err, + "code", checkTxRes.CheckTx.Code, + ) + + if wtx.gossipEl != txmp.recheckCursor { + panic("corrupted reCheckTx cursor") + } + + txmp.removeTx(wtx, !txmp.config.KeepInvalidTxsInCache) + } + } + + // move reCheckTx cursor to next element + if txmp.recheckCursor == txmp.recheckEnd { + txmp.recheckCursor = nil + } else { + txmp.recheckCursor = txmp.recheckCursor.Next() + } + + if txmp.recheckCursor == nil { + txmp.logger.Debug("finished rechecking transactions") + + if txmp.Size() > 0 { + txmp.notifyTxsAvailable() + } + } + + txmp.metrics.Size.Set(float64(txmp.Size())) +} + +// updateReCheckTxs updates the recheck cursors using the gossipIndex. For +// each transaction, it executes CheckTxAsync. The global callback defined on +// the proxyAppConn will be executed for each transaction after CheckTx is +// executed. +// +// NOTE: +// - The caller must have a write-lock when executing updateReCheckTxs. +func (txmp *TxMempool) updateReCheckTxs() { + if txmp.Size() == 0 { + panic("attempted to update re-CheckTx txs when mempool is empty") + } + + txmp.recheckCursor = txmp.gossipIndex.Front() + txmp.recheckEnd = txmp.gossipIndex.Back() + ctx := context.Background() + + for e := txmp.gossipIndex.Front(); e != nil; e = e.Next() { + wtx := e.Value.(*WrappedTx) + + // Only execute CheckTx if the transaction is not marked as removed which + // could happen if the transaction was evicted. + if !txmp.txStore.IsTxRemoved(wtx.hash) { + _, err := txmp.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{ + Tx: wtx.tx, + Type: abci.CheckTxType_Recheck, + }) + if err != nil { + // no need in retrying since the tx will be rechecked after the next block + txmp.logger.Error("failed to execute CheckTx during rechecking", "err", err) + } + } + } + + if _, err := txmp.proxyAppConn.FlushAsync(ctx); err != nil { + txmp.logger.Error("failed to flush transactions during rechecking", "err", err) + } } -// PostCheckMaxGas checks that the wanted gas is smaller or equal to the passed -// maxGas. Returns nil if maxGas is -1. -func PostCheckMaxGas(maxGas int64) PostCheckFunc { - return func(tx types.Tx, res *abci.ResponseCheckTx) error { - if maxGas == -1 { - return nil - } - if res.GasWanted < 0 { - return fmt.Errorf("gas wanted %d is negative", - res.GasWanted) - } - if res.GasWanted > maxGas { - return fmt.Errorf("gas wanted %d is greater than max gas %d", - res.GasWanted, maxGas) - } +// canAddTx returns an error if we cannot insert the provided *WrappedTx into +// the mempool due to mempool configured constraints. If it returns nil, +// the transaction can be inserted into the mempool. +func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { + var ( + numTxs = txmp.Size() + sizeBytes = txmp.SizeBytes() + ) - return nil + if numTxs >= txmp.config.Size || int64(wtx.Size())+sizeBytes > txmp.config.MaxTxsBytes { + return types.ErrMempoolIsFull{ + NumTxs: numTxs, + MaxTxs: txmp.config.Size, + TxsBytes: sizeBytes, + MaxTxsBytes: txmp.config.MaxTxsBytes, + } + } + + return nil +} + +func (txmp *TxMempool) insertTx(wtx *WrappedTx) { + txmp.txStore.SetTx(wtx) + txmp.priorityIndex.PushTx(wtx) + txmp.heightIndex.Insert(wtx) + txmp.timestampIndex.Insert(wtx) + + // Insert the transaction into the gossip index and mark the reference to the + // linked-list element, which will be needed at a later point when the + // transaction is removed. + gossipEl := txmp.gossipIndex.PushBack(wtx) + wtx.gossipEl = gossipEl + + atomic.AddInt64(&txmp.sizeBytes, int64(wtx.Size())) +} + +func (txmp *TxMempool) removeTx(wtx *WrappedTx, removeFromCache bool) { + if txmp.txStore.IsTxRemoved(wtx.hash) { + return + } + + txmp.txStore.RemoveTx(wtx) + txmp.priorityIndex.RemoveTx(wtx) + txmp.heightIndex.Remove(wtx) + txmp.timestampIndex.Remove(wtx) + + // Remove the transaction from the gossip index and cleanup the linked-list + // element so it can be garbage collected. + txmp.gossipIndex.Remove(wtx.gossipEl) + wtx.gossipEl.DetachPrev() + + atomic.AddInt64(&txmp.sizeBytes, int64(-wtx.Size())) + + if removeFromCache { + txmp.cache.Remove(wtx.tx) + } +} + +// purgeExpiredTxs removes all transactions that have exceeded their respective +// height- and/or time-based TTLs from their respective indexes. Every expired +// transaction will be removed from the mempool, but preserved in the cache. +// +// NOTE: purgeExpiredTxs must only be called during TxMempool#Update in which +// the caller has a write-lock on the mempool and so we can safely iterate over +// the height and time based indexes. +func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { + now := time.Now() + expiredTxs := make(map[types.TxKey]*WrappedTx) + + if txmp.config.TTLNumBlocks > 0 { + purgeIdx := -1 + for i, wtx := range txmp.heightIndex.txs { + if (blockHeight - wtx.height) > txmp.config.TTLNumBlocks { + expiredTxs[wtx.tx.Key()] = wtx + purgeIdx = i + } else { + // since the index is sorted, we know no other txs can be be purged + break + } + } + + if purgeIdx >= 0 { + txmp.heightIndex.txs = txmp.heightIndex.txs[purgeIdx+1:] + } + } + + if txmp.config.TTLDuration > 0 { + purgeIdx := -1 + for i, wtx := range txmp.timestampIndex.txs { + if now.Sub(wtx.timestamp) > txmp.config.TTLDuration { + expiredTxs[wtx.tx.Key()] = wtx + purgeIdx = i + } else { + // since the index is sorted, we know no other txs can be be purged + break + } + } + + if purgeIdx >= 0 { + txmp.timestampIndex.txs = txmp.timestampIndex.txs[purgeIdx+1:] + } + } + + for _, wtx := range expiredTxs { + txmp.removeTx(wtx, false) + } +} + +func (txmp *TxMempool) notifyTxsAvailable() { + if txmp.Size() == 0 { + panic("attempt to notify txs available but mempool is empty!") + } + + if txmp.txsAvailable != nil && !txmp.notifiedTxsAvailable { + // channel cap is 1, so this will send once + txmp.notifiedTxsAvailable = true + + select { + case txmp.txsAvailable <- struct{}{}: + default: + } } } diff --git a/internal/mempool/v1/mempool_bench_test.go b/internal/mempool/mempool_bench_test.go similarity index 80% rename from internal/mempool/v1/mempool_bench_test.go rename to internal/mempool/mempool_bench_test.go index ca23f1479..ed4740011 100644 --- a/internal/mempool/v1/mempool_bench_test.go +++ b/internal/mempool/mempool_bench_test.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "context" @@ -8,11 +8,13 @@ import ( "time" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/internal/mempool" ) func BenchmarkTxMempool_CheckTx(b *testing.B) { - txmp := setup(b, 10000) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, b, 10000) rng := rand.New(rand.NewSource(time.Now().UnixNano())) b.ResetTimer() @@ -27,6 +29,6 @@ func BenchmarkTxMempool_CheckTx(b *testing.B) { tx := []byte(fmt.Sprintf("%X=%d", prefix, priority)) b.StartTimer() - require.NoError(b, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{})) + require.NoError(b, txmp.CheckTx(context.Background(), tx, nil, TxInfo{})) } } diff --git a/internal/mempool/v1/mempool_test.go b/internal/mempool/mempool_test.go similarity index 83% rename from internal/mempool/v1/mempool_test.go rename to internal/mempool/mempool_test.go index df26be726..f06ee18d9 100644 --- a/internal/mempool/v1/mempool_test.go +++ b/internal/mempool/mempool_test.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "bytes" @@ -15,13 +15,13 @@ import ( "time" "github.com/stretchr/testify/require" + + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -72,30 +72,35 @@ func (app *application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { } } -func setup(t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool { +func setup(ctx context.Context, t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool { t.Helper() + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + app := &application{kvstore.NewApplication()} - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) + logger := log.TestingLogger() - cfg := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|")) - cfg.Mempool.CacheSize = cacheSize - - appConnMem, err := cc.NewABCIClient() + cfg, err := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|")) require.NoError(t, err) - require.NoError(t, appConnMem.Start()) + cfg.Mempool.CacheSize = cacheSize + appConnMem, err := cc(logger) + require.NoError(t, err) + require.NoError(t, appConnMem.Start(ctx)) t.Cleanup(func() { os.RemoveAll(cfg.RootDir) - require.NoError(t, appConnMem.Stop()) + cancel() + appConnMem.Wait() }) - return NewTxMempool(log.TestingLogger().With("test", t.Name()), cfg.Mempool, appConnMem, 0, options...) + return NewTxMempool(logger.With("test", t.Name()), cfg.Mempool, appConnMem, 0, options...) } func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx { txs := make([]testTx, numTxs) - txInfo := mempool.TxInfo{SenderID: peerID} + txInfo := TxInfo{SenderID: peerID} rng := rand.New(rand.NewSource(time.Now().UnixNano())) @@ -116,8 +121,21 @@ func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx return txs } +func convertTex(in []testTx) types.Txs { + out := make([]types.Tx, len(in)) + + for idx := range in { + out[idx] = in[idx].tx + } + + return out +} + func TestTxMempool_TxsAvailable(t *testing.T) { - txmp := setup(t, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 0) txmp.EnableTxsAvailable() ensureNoTxFire := func() { @@ -171,7 +189,10 @@ func TestTxMempool_TxsAvailable(t *testing.T) { } func TestTxMempool_Size(t *testing.T) { - txmp := setup(t, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 0) txs := checkTxs(t, txmp, 100, 0) require.Equal(t, len(txs), txmp.Size()) require.Equal(t, int64(5690), txmp.SizeBytes()) @@ -195,7 +216,10 @@ func TestTxMempool_Size(t *testing.T) { } func TestTxMempool_Flush(t *testing.T) { - txmp := setup(t, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 0) txs := checkTxs(t, txmp, 100, 0) require.Equal(t, len(txs), txmp.Size()) require.Equal(t, int64(5690), txmp.SizeBytes()) @@ -220,15 +244,18 @@ func TestTxMempool_Flush(t *testing.T) { } func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) { - txmp := setup(t, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 0) tTxs := checkTxs(t, txmp, 100, 0) // all txs request 1 gas unit require.Equal(t, len(tTxs), txmp.Size()) require.Equal(t, int64(5690), txmp.SizeBytes()) - txMap := make(map[[mempool.TxKeySize]byte]testTx) + txMap := make(map[types.TxKey]testTx) priorities := make([]int64, len(tTxs)) for i, tTx := range tTxs { - txMap[mempool.TxKey(tTx.tx)] = tTx + txMap[tTx.tx.Key()] = tTx priorities[i] = tTx.priority } @@ -240,7 +267,7 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) { ensurePrioritized := func(reapedTxs types.Txs) { reapedPriorities := make([]int64, len(reapedTxs)) for i, rTx := range reapedTxs { - reapedPriorities[i] = txMap[mempool.TxKey(rTx)].priority + reapedPriorities[i] = txMap[rTx.Key()].priority } require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities) @@ -270,15 +297,18 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) { } func TestTxMempool_ReapMaxTxs(t *testing.T) { - txmp := setup(t, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 0) tTxs := checkTxs(t, txmp, 100, 0) require.Equal(t, len(tTxs), txmp.Size()) require.Equal(t, int64(5690), txmp.SizeBytes()) - txMap := make(map[[mempool.TxKeySize]byte]testTx) + txMap := make(map[types.TxKey]testTx) priorities := make([]int64, len(tTxs)) for i, tTx := range tTxs { - txMap[mempool.TxKey(tTx.tx)] = tTx + txMap[tTx.tx.Key()] = tTx priorities[i] = tTx.priority } @@ -290,7 +320,7 @@ func TestTxMempool_ReapMaxTxs(t *testing.T) { ensurePrioritized := func(reapedTxs types.Txs) { reapedPriorities := make([]int64, len(reapedTxs)) for i, rTx := range reapedTxs { - reapedPriorities[i] = txMap[mempool.TxKey(rTx)].priority + reapedPriorities[i] = txMap[rTx.Key()].priority } require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities) @@ -319,24 +349,30 @@ func TestTxMempool_ReapMaxTxs(t *testing.T) { } func TestTxMempool_CheckTxExceedsMaxSize(t *testing.T) { - txmp := setup(t, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 0) rng := rand.New(rand.NewSource(time.Now().UnixNano())) tx := make([]byte, txmp.config.MaxTxBytes+1) _, err := rng.Read(tx) require.NoError(t, err) - require.Error(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: 0})) + require.Error(t, txmp.CheckTx(context.Background(), tx, nil, TxInfo{SenderID: 0})) tx = make([]byte, txmp.config.MaxTxBytes-1) _, err = rng.Read(tx) require.NoError(t, err) - require.NoError(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: 0})) + require.NoError(t, txmp.CheckTx(context.Background(), tx, nil, TxInfo{SenderID: 0})) } func TestTxMempool_CheckTxSamePeer(t *testing.T) { - txmp := setup(t, 100) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 100) peerID := uint16(1) rng := rand.New(rand.NewSource(time.Now().UnixNano())) @@ -346,12 +382,15 @@ func TestTxMempool_CheckTxSamePeer(t *testing.T) { tx := []byte(fmt.Sprintf("sender-0=%X=%d", prefix, 50)) - require.NoError(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: peerID})) - require.Error(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: peerID})) + require.NoError(t, txmp.CheckTx(context.Background(), tx, nil, TxInfo{SenderID: peerID})) + require.Error(t, txmp.CheckTx(context.Background(), tx, nil, TxInfo{SenderID: peerID})) } func TestTxMempool_CheckTxSameSender(t *testing.T) { - txmp := setup(t, 100) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 100) peerID := uint16(1) rng := rand.New(rand.NewSource(time.Now().UnixNano())) @@ -366,14 +405,17 @@ func TestTxMempool_CheckTxSameSender(t *testing.T) { tx1 := []byte(fmt.Sprintf("sender-0=%X=%d", prefix1, 50)) tx2 := []byte(fmt.Sprintf("sender-0=%X=%d", prefix2, 50)) - require.NoError(t, txmp.CheckTx(context.Background(), tx1, nil, mempool.TxInfo{SenderID: peerID})) + require.NoError(t, txmp.CheckTx(context.Background(), tx1, nil, TxInfo{SenderID: peerID})) require.Equal(t, 1, txmp.Size()) - require.NoError(t, txmp.CheckTx(context.Background(), tx2, nil, mempool.TxInfo{SenderID: peerID})) + require.NoError(t, txmp.CheckTx(context.Background(), tx2, nil, TxInfo{SenderID: peerID})) require.Equal(t, 1, txmp.Size()) } func TestTxMempool_ConcurrentTxs(t *testing.T) { - txmp := setup(t, 100) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 100) rng := rand.New(rand.NewSource(time.Now().UnixNano())) checkTxDone := make(chan struct{}) @@ -437,7 +479,10 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) { } func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { - txmp := setup(t, 500) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 500) txmp.height = 100 txmp.config.TTLNumBlocks = 10 @@ -487,6 +532,9 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { } func TestTxMempool_CheckTxPostCheckError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cases := []struct { name string err error @@ -503,10 +551,13 @@ func TestTxMempool_CheckTxPostCheckError(t *testing.T) { for _, tc := range cases { testCase := tc t.Run(testCase.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + postCheckFn := func(_ types.Tx, _ *abci.ResponseCheckTx) error { return testCase.err } - txmp := setup(t, 0, WithPostCheck(postCheckFn)) + txmp := setup(ctx, t, 0, WithPostCheck(postCheckFn)) rng := rand.New(rand.NewSource(time.Now().UnixNano())) tx := make([]byte, txmp.config.MaxTxBytes-1) _, err := rng.Read(tx) @@ -521,7 +572,7 @@ func TestTxMempool_CheckTxPostCheckError(t *testing.T) { } require.Equal(t, expectedErrString, checkTxRes.CheckTx.MempoolError) } - require.NoError(t, txmp.CheckTx(context.Background(), tx, callback, mempool.TxInfo{SenderID: 0})) + require.NoError(t, txmp.CheckTx(ctx, tx, callback, TxInfo{SenderID: 0})) }) } } diff --git a/internal/mempool/mock/mempool.go b/internal/mempool/mock/mempool.go index 5749d2d3c..8344220a0 100644 --- a/internal/mempool/mock/mempool.go +++ b/internal/mempool/mock/mempool.go @@ -5,29 +5,30 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/libs/clist" - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/types" ) // Mempool is an empty implementation of a Mempool, useful for testing. type Mempool struct{} -var _ mempl.Mempool = Mempool{} +var _ Mempool = Mempool{} func (Mempool) Lock() {} func (Mempool) Unlock() {} func (Mempool) Size() int { return 0 } -func (Mempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempl.TxInfo) error { +func (Mempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempool.TxInfo) error { return nil } +func (Mempool) RemoveTxByKey(txKey types.TxKey) error { return nil } func (Mempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } func (Mempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } func (Mempool) Update( _ int64, _ types.Txs, _ []*abci.ResponseDeliverTx, - _ mempl.PreCheckFunc, - _ mempl.PostCheckFunc, + _ mempool.PreCheckFunc, + _ mempool.PostCheckFunc, ) error { return nil } diff --git a/internal/mempool/v1/priority_queue.go b/internal/mempool/priority_queue.go similarity index 99% rename from internal/mempool/v1/priority_queue.go rename to internal/mempool/priority_queue.go index df74a92d3..f59715abb 100644 --- a/internal/mempool/v1/priority_queue.go +++ b/internal/mempool/priority_queue.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "container/heap" diff --git a/internal/mempool/v1/priority_queue_test.go b/internal/mempool/priority_queue_test.go similarity index 99% rename from internal/mempool/v1/priority_queue_test.go rename to internal/mempool/priority_queue_test.go index c0048f388..ddc84806d 100644 --- a/internal/mempool/v1/priority_queue_test.go +++ b/internal/mempool/priority_queue_test.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "math/rand" diff --git a/internal/mempool/v1/reactor.go b/internal/mempool/reactor.go similarity index 88% rename from internal/mempool/v1/reactor.go rename to internal/mempool/reactor.go index 3014e0519..2ddb44e7a 100644 --- a/internal/mempool/v1/reactor.go +++ b/internal/mempool/reactor.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "context" @@ -8,10 +8,9 @@ import ( "sync" "time" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/clist" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" @@ -37,9 +36,9 @@ type PeerManager interface { type Reactor struct { service.BaseService - config *cfg.MempoolConfig + cfg *config.MempoolConfig mempool *TxMempool - ids *mempool.MempoolIDs + ids *IDs // XXX: Currently, this is the only way to get information about a peer. Ideally, // we rely on message-oriented communication to get necessary peer data. @@ -65,7 +64,7 @@ type Reactor struct { // NewReactor returns a reference to a new reactor. func NewReactor( logger log.Logger, - config *cfg.MempoolConfig, + cfg *config.MempoolConfig, peerMgr PeerManager, txmp *TxMempool, mempoolCh *p2p.Channel, @@ -73,10 +72,10 @@ func NewReactor( ) *Reactor { r := &Reactor{ - config: config, + cfg: cfg, peerMgr: peerMgr, mempool: txmp, - ids: mempool.NewMempoolIDs(), + ids: NewMempoolIDs(), mempoolCh: mempoolCh, peerUpdates: peerUpdates, closeCh: make(chan struct{}), @@ -90,32 +89,22 @@ func NewReactor( func defaultObservePanic(r interface{}) {} -// GetChannelShims returns a map of ChannelDescriptorShim objects, where each -// object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding -// p2p proto.Message the new p2p Channel is responsible for handling. -// -// -// TODO: Remove once p2p refactor is complete. -// ref: https://github.com/tendermint/tendermint/issues/5670 -func GetChannelShims(config *cfg.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDescriptorShim { - largestTx := make([]byte, config.MaxTxBytes) +// GetChannelDescriptor produces an instance of a descriptor for this +// package's required channels. +func GetChannelDescriptor(cfg *config.MempoolConfig) *p2p.ChannelDescriptor { + largestTx := make([]byte, cfg.MaxTxBytes) batchMsg := protomem.Message{ Sum: &protomem.Message_Txs{ Txs: &protomem.Txs{Txs: [][]byte{largestTx}}, }, } - return map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - mempool.MempoolChannel: { - MsgType: new(protomem.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(mempool.MempoolChannel), - Priority: 5, - RecvMessageCapacity: batchMsg.Size(), - RecvBufferCapacity: 128, - MaxSendBytes: 5000, - }, - }, + return &p2p.ChannelDescriptor{ + ID: MempoolChannel, + MessageType: new(protomem.Message), + Priority: 5, + RecvMessageCapacity: batchMsg.Size(), + RecvBufferCapacity: 128, } } @@ -123,8 +112,8 @@ func GetChannelShims(config *cfg.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDe // envelopes on each. In addition, it also listens for peer updates and handles // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. -func (r *Reactor) OnStart() error { - if !r.config.Broadcast { +func (r *Reactor) OnStart(ctx context.Context) error { + if !r.cfg.Broadcast { r.Logger.Info("tx broadcasting is disabled") } @@ -171,14 +160,14 @@ func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { return errors.New("empty txs received from peer") } - txInfo := mempool.TxInfo{SenderID: r.ids.GetForPeer(envelope.From)} + txInfo := TxInfo{SenderID: r.ids.GetForPeer(envelope.From)} if len(envelope.From) != 0 { txInfo.SenderNodeID = envelope.From } for _, tx := range protoTxs { if err := r.mempool.CheckTx(context.Background(), types.Tx(tx), nil, txInfo); err != nil { - logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(tx)), "err", err) + logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "err", err) } } @@ -208,7 +197,7 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err r.Logger.Debug("received message", "peer", envelope.From) switch chID { - case mempool.MempoolChannel: + case MempoolChannel: err = r.handleMempoolMessage(envelope) default: @@ -262,7 +251,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { return } - if r.config.Broadcast { + if r.cfg.Broadcast { // Check if we've already started a goroutine for this peer, if not we create // a new done channel so we can explicitly close the goroutine if the peer // is later removed, we increment the waitgroup so the reactor can stop @@ -368,7 +357,7 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) height := r.peerMgr.GetHeight(peerID) if height > 0 && height < memTx.height-1 { // allow for a lag of one block - time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond) + time.Sleep(PeerCatchupSleepIntervalMS * time.Millisecond) continue } } @@ -386,7 +375,7 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) } r.Logger.Debug( "gossiped tx to peer", - "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(memTx.tx)), + "tx", fmt.Sprintf("%X", memTx.tx.Hash()), "peer", peerID, ) } diff --git a/internal/mempool/v0/reactor_test.go b/internal/mempool/reactor_test.go similarity index 55% rename from internal/mempool/v0/reactor_test.go rename to internal/mempool/reactor_test.go index 91729b37c..4456424b5 100644 --- a/internal/mempool/v0/reactor_test.go +++ b/internal/mempool/reactor_test.go @@ -1,23 +1,23 @@ -package v0 +package mempool import ( "context" + "os" + "strings" "sync" "testing" "time" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/config" + tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -25,10 +25,10 @@ type reactorTestSuite struct { network *p2ptest.Network logger log.Logger - reactors map[types.NodeID]*Reactor - mempoolChnnels map[types.NodeID]*p2p.Channel - mempools map[types.NodeID]*CListMempool - kvstores map[types.NodeID]*kvstore.Application + reactors map[types.NodeID]*Reactor + mempoolChannels map[types.NodeID]*p2p.Channel + mempools map[types.NodeID]*TxMempool + kvstores map[types.NodeID]*kvstore.Application peerChans map[types.NodeID]chan p2p.PeerUpdate peerUpdates map[types.NodeID]*p2p.PeerUpdates @@ -36,30 +36,31 @@ type reactorTestSuite struct { nodes []types.NodeID } -func setup(t *testing.T, cfg *cfg.MempoolConfig, numNodes int, chBuf uint) *reactorTestSuite { +func setupReactors(ctx context.Context, t *testing.T, numNodes int, chBuf uint) *reactorTestSuite { t.Helper() + cfg, err := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|")) + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(cfg.RootDir) }) + rts := &reactorTestSuite{ - logger: log.TestingLogger().With("testCase", t.Name()), - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), - reactors: make(map[types.NodeID]*Reactor, numNodes), - mempoolChnnels: make(map[types.NodeID]*p2p.Channel, numNodes), - mempools: make(map[types.NodeID]*CListMempool, numNodes), - kvstores: make(map[types.NodeID]*kvstore.Application, numNodes), - peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), - peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), + logger: log.TestingLogger().With("testCase", t.Name()), + network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes}), + reactors: make(map[types.NodeID]*Reactor, numNodes), + mempoolChannels: make(map[types.NodeID]*p2p.Channel, numNodes), + mempools: make(map[types.NodeID]*TxMempool, numNodes), + kvstores: make(map[types.NodeID]*kvstore.Application, numNodes), + peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), + peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), } - chDesc := p2p.ChannelDescriptor{ID: byte(mempool.MempoolChannel)} - rts.mempoolChnnels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(protomem.Message), int(chBuf)) + chDesc := GetChannelDescriptor(cfg.Mempool) + rts.mempoolChannels = rts.network.MakeChannelsNoCleanup(t, chDesc) for nodeID := range rts.network.Nodes { rts.kvstores[nodeID] = kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(rts.kvstores[nodeID]) - mempool, memCleanup := newMempoolWithApp(cc) - t.Cleanup(memCleanup) - mempool.SetLogger(rts.logger) + mempool := setup(ctx, t, 0) rts.mempools[nodeID] = mempool rts.peerChans[nodeID] = make(chan p2p.PeerUpdate) @@ -68,16 +69,16 @@ func setup(t *testing.T, cfg *cfg.MempoolConfig, numNodes int, chBuf uint) *reac rts.reactors[nodeID] = NewReactor( rts.logger.With("nodeID", nodeID), - cfg, + cfg.Mempool, rts.network.Nodes[nodeID].PeerManager, mempool, - rts.mempoolChnnels[nodeID], + rts.mempoolChannels[nodeID], rts.peerUpdates[nodeID], ) rts.nodes = append(rts.nodes, nodeID) - require.NoError(t, rts.reactors[nodeID].Start()) + require.NoError(t, rts.reactors[nodeID].Start(ctx)) require.True(t, rts.reactors[nodeID].IsRunning()) } @@ -107,87 +108,122 @@ func (rts *reactorTestSuite) start(t *testing.T) { func (rts *reactorTestSuite) assertMempoolChannelsDrained(t *testing.T) { t.Helper() + rts.stop(t) + + for _, mch := range rts.mempoolChannels { + require.Empty(t, mch.Out, "checking channel %q (len=%d)", mch.ID, len(mch.Out)) + } +} + +func (rts *reactorTestSuite) stop(t *testing.T) { for id, r := range rts.reactors { require.NoError(t, r.Stop(), "stopping reactor %s", id) r.Wait() require.False(t, r.IsRunning(), "reactor %s did not stop", id) } - - for _, mch := range rts.mempoolChnnels { - require.Empty(t, mch.Out, "checking channel %q (len=%d)", mch.ID, len(mch.Out)) - } } -func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs types.Txs, ids ...types.NodeID) { +func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs []types.Tx, ids ...types.NodeID) { t.Helper() - fn := func(pool *CListMempool) { - for pool.Size() < len(txs) { - time.Sleep(50 * time.Millisecond) - } - - reapedTxs := pool.ReapMaxTxs(len(txs)) - require.Equal(t, len(txs), len(reapedTxs)) - for i, tx := range txs { - require.Equalf(t, - tx, - reapedTxs[i], - "txs at index %d in reactor mempool mismatch; got: %v, expected: %v", i, tx, reapedTxs[i], - ) - } - } - - if len(ids) == 1 { - fn(rts.reactors[ids[0]].mempool) - return - } - + // ensure that the transactions get fully broadcast to the + // rest of the network wg := &sync.WaitGroup{} - for id := range rts.mempools { - if len(ids) > 0 && !p2ptest.NodeInSlice(id, ids) { + for name, pool := range rts.mempools { + if !p2ptest.NodeInSlice(name, ids) { continue } wg.Add(1) - func(nid types.NodeID) { defer wg.Done(); fn(rts.reactors[nid].mempool) }(id) + go func(pool *TxMempool) { + defer wg.Done() + require.Eventually(t, func() bool { return len(txs) == pool.Size() }, + time.Minute, + 100*time.Millisecond, + ) + }(pool) + } + wg.Wait() +} + +func TestReactorBroadcastDoesNotPanic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const numNodes = 2 + rts := setupReactors(ctx, t, numNodes, 0) + + observePanic := func(r interface{}) { + t.Fatal("panic detected in reactor") } + primary := rts.nodes[0] + secondary := rts.nodes[1] + primaryReactor := rts.reactors[primary] + primaryMempool := primaryReactor.mempool + secondaryReactor := rts.reactors[secondary] + + primaryReactor.observePanic = observePanic + secondaryReactor.observePanic = observePanic + + firstTx := &WrappedTx{} + primaryMempool.insertTx(firstTx) + + // run the router + rts.start(t) + + closer := tmsync.NewCloser() + primaryReactor.peerWG.Add(1) + go primaryReactor.broadcastTxRoutine(secondary, closer) + + wg := &sync.WaitGroup{} + for i := 0; i < 50; i++ { + next := &WrappedTx{} + wg.Add(1) + go func() { + defer wg.Done() + primaryMempool.insertTx(next) + }() + } + + err := primaryReactor.Stop() + require.NoError(t, err) + primaryReactor.peerWG.Wait() wg.Wait() } func TestReactorBroadcastTxs(t *testing.T) { numTxs := 1000 numNodes := 10 - config := cfg.TestConfig() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - rts := setup(t, config.Mempool, numNodes, 0) + rts := setupReactors(ctx, t, numNodes, 0) primary := rts.nodes[0] secondaries := rts.nodes[1:] - txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, mempool.UnknownPeerID) + txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, UnknownPeerID) // run the router rts.start(t) // Wait till all secondary suites (reactor) received all mempool txs from the // primary suite (node). - rts.waitForTxns(t, txs, secondaries...) + rts.waitForTxns(t, convertTex(txs), secondaries...) - for _, pool := range rts.mempools { - require.Equal(t, len(txs), pool.Size()) - } - - rts.assertMempoolChannelsDrained(t) + rts.stop(t) } // regression test for https://github.com/tendermint/tendermint/issues/5408 func TestReactorConcurrency(t *testing.T) { numTxs := 5 numNodes := 2 - config := cfg.TestConfig() - rts := setup(t, config.Mempool, numNodes, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setupReactors(ctx, t, numNodes, 0) primary := rts.nodes[0] secondary := rts.nodes[1] @@ -202,7 +238,7 @@ func TestReactorConcurrency(t *testing.T) { // 1. submit a bunch of txs // 2. update the whole mempool - txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, mempool.UnknownPeerID) + txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, UnknownPeerID) go func() { defer wg.Done() @@ -216,12 +252,12 @@ func TestReactorConcurrency(t *testing.T) { deliverTxResponses[i] = &abci.ResponseDeliverTx{Code: 0} } - require.NoError(t, mempool.Update(1, txs, deliverTxResponses, nil, nil)) + require.NoError(t, mempool.Update(1, convertTex(txs), deliverTxResponses, nil, nil)) }() // 1. submit a bunch of txs // 2. update none - _ = checkTxs(t, rts.reactors[secondary].mempool, numTxs, mempool.UnknownPeerID) + _ = checkTxs(t, rts.reactors[secondary].mempool, numTxs, UnknownPeerID) go func() { defer wg.Done() @@ -244,9 +280,11 @@ func TestReactorConcurrency(t *testing.T) { func TestReactorNoBroadcastToSender(t *testing.T) { numTxs := 1000 numNodes := 2 - config := cfg.TestConfig() - rts := setup(t, config.Mempool, numNodes, uint(numTxs)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setupReactors(ctx, t, numNodes, uint(numTxs)) primary := rts.nodes[0] secondary := rts.nodes[1] @@ -267,49 +305,50 @@ func TestReactorNoBroadcastToSender(t *testing.T) { func TestReactor_MaxTxBytes(t *testing.T) { numNodes := 2 - config := cfg.TestConfig() + cfg := config.TestConfig() - rts := setup(t, config.Mempool, numNodes, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setupReactors(ctx, t, numNodes, 0) primary := rts.nodes[0] secondary := rts.nodes[1] // Broadcast a tx, which has the max size and ensure it's received by the // second reactor. - tx1 := tmrand.Bytes(config.Mempool.MaxTxBytes) + tx1 := tmrand.Bytes(cfg.Mempool.MaxTxBytes) err := rts.reactors[primary].mempool.CheckTx( - context.Background(), + ctx, tx1, nil, - mempool.TxInfo{ - SenderID: mempool.UnknownPeerID, + TxInfo{ + SenderID: UnknownPeerID, }, ) require.NoError(t, err) rts.start(t) - // Wait till all secondary suites (reactor) received all mempool txs from the - // primary suite (node). - rts.waitForTxns(t, []types.Tx{tx1}, secondary) - rts.reactors[primary].mempool.Flush() rts.reactors[secondary].mempool.Flush() // broadcast a tx, which is beyond the max size and ensure it's not sent - tx2 := tmrand.Bytes(config.Mempool.MaxTxBytes + 1) - err = rts.mempools[primary].CheckTx(context.Background(), tx2, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID}) + tx2 := tmrand.Bytes(cfg.Mempool.MaxTxBytes + 1) + err = rts.mempools[primary].CheckTx(ctx, tx2, nil, TxInfo{SenderID: UnknownPeerID}) require.Error(t, err) rts.assertMempoolChannelsDrained(t) } func TestDontExhaustMaxActiveIDs(t *testing.T) { - config := cfg.TestConfig() - // we're creating a single node network, but not starting the // network. - rts := setup(t, config.Mempool, 1, mempool.MaxActiveIDs+1) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setupReactors(ctx, t, 1, MaxActiveIDs+1) nodeID := rts.nodes[0] @@ -317,13 +356,13 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) { require.NoError(t, err) // ensure the reactor does not panic (i.e. exhaust active IDs) - for i := 0; i < mempool.MaxActiveIDs+1; i++ { + for i := 0; i < MaxActiveIDs+1; i++ { rts.peerChans[nodeID] <- p2p.PeerUpdate{ Status: p2p.PeerStatusUp, NodeID: peerID, } - rts.mempoolChnnels[nodeID].Out <- p2p.Envelope{ + rts.mempoolChannels[nodeID].Out <- p2p.Envelope{ To: peerID, Message: &protomem.Txs{ Txs: [][]byte{}, @@ -334,7 +373,7 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) { require.Eventually( t, func() bool { - for _, mch := range rts.mempoolChnnels { + for _, mch := range rts.mempoolChannels { if len(mch.Out) > 0 { return false } @@ -355,12 +394,12 @@ func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) { } // 0 is already reserved for UnknownPeerID - ids := mempool.NewMempoolIDs() + ids := NewMempoolIDs() peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") require.NoError(t, err) - for i := 0; i < mempool.MaxActiveIDs-1; i++ { + for i := 0; i < MaxActiveIDs-1; i++ { ids.ReserveForPeer(peerID) } @@ -374,9 +413,10 @@ func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { t.Skip("skipping test in short mode") } - config := cfg.TestConfig() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - rts := setup(t, config.Mempool, 2, 0) + rts := setupReactors(ctx, t, 2, 0) primary := rts.nodes[0] secondary := rts.nodes[1] @@ -388,4 +428,10 @@ func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { Status: p2p.PeerStatusDown, NodeID: secondary, } + time.Sleep(500 * time.Millisecond) + + txs := checkTxs(t, rts.reactors[primary].mempool, 4, UnknownPeerID) + require.Equal(t, 4, len(txs)) + require.Equal(t, 4, rts.mempools[primary].Size()) + require.Equal(t, 0, rts.mempools[secondary].Size()) } diff --git a/internal/mempool/tx.go b/internal/mempool/tx.go index 860d3d3b4..af48c9ccc 100644 --- a/internal/mempool/tx.go +++ b/internal/mempool/tx.go @@ -1,24 +1,14 @@ package mempool import ( - "crypto/sha256" + "sort" + "time" + "github.com/tendermint/tendermint/internal/libs/clist" + tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/types" ) -// TxKeySize defines the size of the transaction's key used for indexing. -const TxKeySize = sha256.Size - -// TxKey is the fixed length array key used as an index. -func TxKey(tx types.Tx) [TxKeySize]byte { - return sha256.Sum256(tx) -} - -// TxHashFromBytes returns the hash of a transaction from raw bytes. -func TxHashFromBytes(tx []byte) []byte { - return types.Tx(tx).Hash() -} - // TxInfo are parameters that get passed when attempting to add a tx to the // mempool. type TxInfo struct { @@ -30,3 +20,274 @@ type TxInfo struct { // SenderNodeID is the actual types.NodeID of the sender. SenderNodeID types.NodeID } + +// WrappedTx defines a wrapper around a raw transaction with additional metadata +// that is used for indexing. +type WrappedTx struct { + // tx represents the raw binary transaction data + tx types.Tx + + // hash defines the transaction hash and the primary key used in the mempool + hash types.TxKey + + // height defines the height at which the transaction was validated at + height int64 + + // gasWanted defines the amount of gas the transaction sender requires + gasWanted int64 + + // priority defines the transaction's priority as specified by the application + // in the ResponseCheckTx response. + priority int64 + + // sender defines the transaction's sender as specified by the application in + // the ResponseCheckTx response. + sender string + + // timestamp is the time at which the node first received the transaction from + // a peer. It is used as a second dimension is prioritizing transactions when + // two transactions have the same priority. + timestamp time.Time + + // peers records a mapping of all peers that sent a given transaction + peers map[uint16]struct{} + + // heapIndex defines the index of the item in the heap + heapIndex int + + // gossipEl references the linked-list element in the gossip index + gossipEl *clist.CElement + + // removed marks the transaction as removed from the mempool. This is set + // during RemoveTx and is needed due to the fact that a given existing + // transaction in the mempool can be evicted when it is simultaneously having + // a reCheckTx callback executed. + removed bool +} + +func (wtx *WrappedTx) Size() int { + return len(wtx.tx) +} + +// TxStore implements a thread-safe mapping of valid transaction(s). +// +// NOTE: +// - Concurrent read-only access to a *WrappedTx object is OK. However, mutative +// access is not allowed. Regardless, it is not expected for the mempool to +// need mutative access. +type TxStore struct { + mtx tmsync.RWMutex + hashTxs map[types.TxKey]*WrappedTx // primary index + senderTxs map[string]*WrappedTx // sender is defined by the ABCI application +} + +func NewTxStore() *TxStore { + return &TxStore{ + senderTxs: make(map[string]*WrappedTx), + hashTxs: make(map[types.TxKey]*WrappedTx), + } +} + +// Size returns the total number of transactions in the store. +func (txs *TxStore) Size() int { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + return len(txs.hashTxs) +} + +// GetAllTxs returns all the transactions currently in the store. +func (txs *TxStore) GetAllTxs() []*WrappedTx { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + wTxs := make([]*WrappedTx, len(txs.hashTxs)) + i := 0 + for _, wtx := range txs.hashTxs { + wTxs[i] = wtx + i++ + } + + return wTxs +} + +// GetTxBySender returns a *WrappedTx by the transaction's sender property +// defined by the ABCI application. +func (txs *TxStore) GetTxBySender(sender string) *WrappedTx { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + return txs.senderTxs[sender] +} + +// GetTxByHash returns a *WrappedTx by the transaction's hash. +func (txs *TxStore) GetTxByHash(hash types.TxKey) *WrappedTx { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + return txs.hashTxs[hash] +} + +// IsTxRemoved returns true if a transaction by hash is marked as removed and +// false otherwise. +func (txs *TxStore) IsTxRemoved(hash types.TxKey) bool { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + wtx, ok := txs.hashTxs[hash] + if ok { + return wtx.removed + } + + return false +} + +// SetTx stores a *WrappedTx by it's hash. If the transaction also contains a +// non-empty sender, we additionally store the transaction by the sender as +// defined by the ABCI application. +func (txs *TxStore) SetTx(wtx *WrappedTx) { + txs.mtx.Lock() + defer txs.mtx.Unlock() + + if len(wtx.sender) > 0 { + txs.senderTxs[wtx.sender] = wtx + } + + txs.hashTxs[wtx.tx.Key()] = wtx +} + +// RemoveTx removes a *WrappedTx from the transaction store. It deletes all +// indexes of the transaction. +func (txs *TxStore) RemoveTx(wtx *WrappedTx) { + txs.mtx.Lock() + defer txs.mtx.Unlock() + + if len(wtx.sender) > 0 { + delete(txs.senderTxs, wtx.sender) + } + + delete(txs.hashTxs, wtx.tx.Key()) + wtx.removed = true +} + +// TxHasPeer returns true if a transaction by hash has a given peer ID and false +// otherwise. If the transaction does not exist, false is returned. +func (txs *TxStore) TxHasPeer(hash types.TxKey, peerID uint16) bool { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + wtx := txs.hashTxs[hash] + if wtx == nil { + return false + } + + _, ok := wtx.peers[peerID] + return ok +} + +// GetOrSetPeerByTxHash looks up a WrappedTx by transaction hash and adds the +// given peerID to the WrappedTx's set of peers that sent us this transaction. +// We return true if we've already recorded the given peer for this transaction +// and false otherwise. If the transaction does not exist by hash, we return +// (nil, false). +func (txs *TxStore) GetOrSetPeerByTxHash(hash types.TxKey, peerID uint16) (*WrappedTx, bool) { + txs.mtx.Lock() + defer txs.mtx.Unlock() + + wtx := txs.hashTxs[hash] + if wtx == nil { + return nil, false + } + + if wtx.peers == nil { + wtx.peers = make(map[uint16]struct{}) + } + + if _, ok := wtx.peers[peerID]; ok { + return wtx, true + } + + wtx.peers[peerID] = struct{}{} + return wtx, false +} + +// WrappedTxList implements a thread-safe list of *WrappedTx objects that can be +// used to build generic transaction indexes in the mempool. It accepts a +// comparator function, less(a, b *WrappedTx) bool, that compares two WrappedTx +// references which is used during Insert in order to determine sorted order. If +// less returns true, a <= b. +type WrappedTxList struct { + mtx tmsync.RWMutex + txs []*WrappedTx + less func(*WrappedTx, *WrappedTx) bool +} + +func NewWrappedTxList(less func(*WrappedTx, *WrappedTx) bool) *WrappedTxList { + return &WrappedTxList{ + txs: make([]*WrappedTx, 0), + less: less, + } +} + +// Size returns the number of WrappedTx objects in the list. +func (wtl *WrappedTxList) Size() int { + wtl.mtx.RLock() + defer wtl.mtx.RUnlock() + + return len(wtl.txs) +} + +// Reset resets the list of transactions to an empty list. +func (wtl *WrappedTxList) Reset() { + wtl.mtx.Lock() + defer wtl.mtx.Unlock() + + wtl.txs = make([]*WrappedTx, 0) +} + +// Insert inserts a WrappedTx reference into the sorted list based on the list's +// comparator function. +func (wtl *WrappedTxList) Insert(wtx *WrappedTx) { + wtl.mtx.Lock() + defer wtl.mtx.Unlock() + + i := sort.Search(len(wtl.txs), func(i int) bool { + return wtl.less(wtl.txs[i], wtx) + }) + + if i == len(wtl.txs) { + // insert at the end + wtl.txs = append(wtl.txs, wtx) + return + } + + // Make space for the inserted element by shifting values at the insertion + // index up one index. + // + // NOTE: The call to append does not allocate memory when cap(wtl.txs) > len(wtl.txs). + wtl.txs = append(wtl.txs[:i+1], wtl.txs[i:]...) + wtl.txs[i] = wtx +} + +// Remove attempts to remove a WrappedTx from the sorted list. +func (wtl *WrappedTxList) Remove(wtx *WrappedTx) { + wtl.mtx.Lock() + defer wtl.mtx.Unlock() + + i := sort.Search(len(wtl.txs), func(i int) bool { + return wtl.less(wtl.txs[i], wtx) + }) + + // Since the list is sorted, we evaluate all elements starting at i. Note, if + // the element does not exist, we may potentially evaluate the entire remainder + // of the list. However, a caller should not be expected to call Remove with a + // non-existing element. + for i < len(wtl.txs) { + if wtl.txs[i] == wtx { + wtl.txs = append(wtl.txs[:i], wtl.txs[i+1:]...) + return + } + + i++ + } +} diff --git a/internal/mempool/v1/tx_test.go b/internal/mempool/tx_test.go similarity index 94% rename from internal/mempool/v1/tx_test.go rename to internal/mempool/tx_test.go index c5d488669..b68246076 100644 --- a/internal/mempool/v1/tx_test.go +++ b/internal/mempool/tx_test.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "fmt" @@ -8,7 +8,7 @@ import ( "time" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/types" ) func TestTxStore_GetTxBySender(t *testing.T) { @@ -39,7 +39,7 @@ func TestTxStore_GetTxByHash(t *testing.T) { timestamp: time.Now(), } - key := mempool.TxKey(wtx.tx) + key := wtx.tx.Key() res := txs.GetTxByHash(key) require.Nil(t, res) @@ -58,7 +58,7 @@ func TestTxStore_SetTx(t *testing.T) { timestamp: time.Now(), } - key := mempool.TxKey(wtx.tx) + key := wtx.tx.Key() txs.SetTx(wtx) res := txs.GetTxByHash(key) @@ -81,10 +81,10 @@ func TestTxStore_GetOrSetPeerByTxHash(t *testing.T) { timestamp: time.Now(), } - key := mempool.TxKey(wtx.tx) + key := wtx.tx.Key() txs.SetTx(wtx) - res, ok := txs.GetOrSetPeerByTxHash(mempool.TxKey([]byte("test_tx_2")), 15) + res, ok := txs.GetOrSetPeerByTxHash(types.Tx([]byte("test_tx_2")).Key(), 15) require.Nil(t, res) require.False(t, ok) @@ -110,7 +110,7 @@ func TestTxStore_RemoveTx(t *testing.T) { txs.SetTx(wtx) - key := mempool.TxKey(wtx.tx) + key := wtx.tx.Key() res := txs.GetTxByHash(key) require.NotNil(t, res) diff --git a/internal/mempool/types.go b/internal/mempool/types.go new file mode 100644 index 000000000..6e3955dc3 --- /dev/null +++ b/internal/mempool/types.go @@ -0,0 +1,143 @@ +package mempool + +import ( + "context" + "fmt" + "math" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/types" +) + +const ( + MempoolChannel = p2p.ChannelID(0x30) + + // PeerCatchupSleepIntervalMS defines how much time to sleep if a peer is behind + PeerCatchupSleepIntervalMS = 100 + + // UnknownPeerID is the peer ID to use when running CheckTx when there is + // no peer (e.g. RPC) + UnknownPeerID uint16 = 0 + + MaxActiveIDs = math.MaxUint16 +) + +// Mempool defines the mempool interface. +// +// Updates to the mempool need to be synchronized with committing a block so +// applications can reset their transient state on Commit. +type Mempool interface { + // CheckTx executes a new transaction against the application to determine + // its validity and whether it should be added to the mempool. + CheckTx(ctx context.Context, tx types.Tx, callback func(*abci.Response), txInfo TxInfo) error + + // RemoveTxByKey removes a transaction, identified by its key, + // from the mempool. + RemoveTxByKey(txKey types.TxKey) error + + // ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes + // bytes total with the condition that the total gasWanted must be less than + // maxGas. + // + // If both maxes are negative, there is no cap on the size of all returned + // transactions (~ all available transactions). + ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs + + // ReapMaxTxs reaps up to max transactions from the mempool. If max is + // negative, there is no cap on the size of all returned transactions + // (~ all available transactions). + ReapMaxTxs(max int) types.Txs + + // Lock locks the mempool. The consensus must be able to hold lock to safely + // update. + Lock() + + // Unlock unlocks the mempool. + Unlock() + + // Update informs the mempool that the given txs were committed and can be + // discarded. + // + // NOTE: + // 1. This should be called *after* block is committed by consensus. + // 2. Lock/Unlock must be managed by the caller. + Update( + blockHeight int64, + blockTxs types.Txs, + deliverTxResponses []*abci.ResponseDeliverTx, + newPreFn PreCheckFunc, + newPostFn PostCheckFunc, + ) error + + // FlushAppConn flushes the mempool connection to ensure async callback calls + // are done, e.g. from CheckTx. + // + // NOTE: + // 1. Lock/Unlock must be managed by caller. + FlushAppConn() error + + // Flush removes all transactions from the mempool and caches. + Flush() + + // TxsAvailable returns a channel which fires once for every height, and only + // when transactions are available in the mempool. + // + // NOTE: + // 1. The returned channel may be nil if EnableTxsAvailable was not called. + TxsAvailable() <-chan struct{} + + // EnableTxsAvailable initializes the TxsAvailable channel, ensuring it will + // trigger once every height when transactions are available. + EnableTxsAvailable() + + // Size returns the number of transactions in the mempool. + Size() int + + // SizeBytes returns the total size of all txs in the mempool. + SizeBytes() int64 +} + +// PreCheckFunc is an optional filter executed before CheckTx and rejects +// transaction if false is returned. An example would be to ensure that a +// transaction doesn't exceeded the block size. +type PreCheckFunc func(types.Tx) error + +// PostCheckFunc is an optional filter executed after CheckTx and rejects +// transaction if false is returned. An example would be to ensure a +// transaction doesn't require more gas than available for the block. +type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error + +// PreCheckMaxBytes checks that the size of the transaction is smaller or equal +// to the expected maxBytes. +func PreCheckMaxBytes(maxBytes int64) PreCheckFunc { + return func(tx types.Tx) error { + txSize := types.ComputeProtoSizeForTxs([]types.Tx{tx}) + + if txSize > maxBytes { + return fmt.Errorf("tx size is too big: %d, max: %d", txSize, maxBytes) + } + + return nil + } +} + +// PostCheckMaxGas checks that the wanted gas is smaller or equal to the passed +// maxGas. Returns nil if maxGas is -1. +func PostCheckMaxGas(maxGas int64) PostCheckFunc { + return func(tx types.Tx, res *abci.ResponseCheckTx) error { + if maxGas == -1 { + return nil + } + if res.GasWanted < 0 { + return fmt.Errorf("gas wanted %d is negative", + res.GasWanted) + } + if res.GasWanted > maxGas { + return fmt.Errorf("gas wanted %d is greater than max gas %d", + res.GasWanted, maxGas) + } + + return nil + } +} diff --git a/internal/mempool/v0/bench_test.go b/internal/mempool/v0/bench_test.go deleted file mode 100644 index 45123c9f6..000000000 --- a/internal/mempool/v0/bench_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package v0 - -import ( - "context" - "encoding/binary" - "sync/atomic" - "testing" - - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/proxy" -) - -func BenchmarkReap(b *testing.B) { - app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - mp, cleanup := newMempoolWithApp(cc) - defer cleanup() - mp.config.Size = 100000 - - size := 10000 - for i := 0; i < size; i++ { - tx := make([]byte, 8) - binary.BigEndian.PutUint64(tx, uint64(i)) - if err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err != nil { - b.Fatal(err) - } - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - mp.ReapMaxBytesMaxGas(100000000, 10000000) - } -} - -func BenchmarkCheckTx(b *testing.B) { - app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - mp, cleanup := newMempoolWithApp(cc) - defer cleanup() - - mp.config.Size = 1000000 - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - b.StopTimer() - tx := make([]byte, 8) - binary.BigEndian.PutUint64(tx, uint64(i)) - b.StartTimer() - - if err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkParallelCheckTx(b *testing.B) { - app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - mp, cleanup := newMempoolWithApp(cc) - defer cleanup() - - mp.config.Size = 100000000 - - var txcnt uint64 - next := func() uint64 { - return atomic.AddUint64(&txcnt, 1) - 1 - } - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - tx := make([]byte, 8) - binary.BigEndian.PutUint64(tx, next()) - if err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err != nil { - b.Fatal(err) - } - } - }) -} - -func BenchmarkCheckDuplicateTx(b *testing.B) { - app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - mp, cleanup := newMempoolWithApp(cc) - defer cleanup() - - mp.config.Size = 1000000 - - for i := 0; i < b.N; i++ { - tx := make([]byte, 8) - binary.BigEndian.PutUint64(tx, uint64(i)) - if err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err != nil { - b.Fatal(err) - } - - if err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err == nil { - b.Fatal("tx should be duplicate") - } - } -} diff --git a/internal/mempool/v0/cache_test.go b/internal/mempool/v0/cache_test.go deleted file mode 100644 index fbb719231..000000000 --- a/internal/mempool/v0/cache_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package v0 - -import ( - "context" - "crypto/sha256" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/abci/example/kvstore" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/proxy" - "github.com/tendermint/tendermint/types" -) - -func TestCacheAfterUpdate(t *testing.T) { - app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - mp, cleanup := newMempoolWithApp(cc) - defer cleanup() - - // reAddIndices & txsInCache can have elements > numTxsToCreate - // also assumes max index is 255 for convenience - // txs in cache also checks order of elements - tests := []struct { - numTxsToCreate int - updateIndices []int - reAddIndices []int - txsInCache []int - }{ - {1, []int{}, []int{1}, []int{1, 0}}, // adding new txs works - {2, []int{1}, []int{}, []int{1, 0}}, // update doesn't remove tx from cache - {2, []int{2}, []int{}, []int{2, 1, 0}}, // update adds new tx to cache - {2, []int{1}, []int{1}, []int{1, 0}}, // re-adding after update doesn't make dupe - } - for tcIndex, tc := range tests { - for i := 0; i < tc.numTxsToCreate; i++ { - tx := types.Tx{byte(i)} - err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) - require.NoError(t, err) - } - - updateTxs := []types.Tx{} - for _, v := range tc.updateIndices { - tx := types.Tx{byte(v)} - updateTxs = append(updateTxs, tx) - } - err := mp.Update(int64(tcIndex), updateTxs, abciResponses(len(updateTxs), abci.CodeTypeOK), nil, nil) - require.NoError(t, err) - - for _, v := range tc.reAddIndices { - tx := types.Tx{byte(v)} - _ = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) - } - - cache := mp.cache.(*mempool.LRUTxCache) - node := cache.GetList().Front() - counter := 0 - for node != nil { - require.NotEqual(t, len(tc.txsInCache), counter, - "cache larger than expected on testcase %d", tcIndex) - - nodeVal := node.Value.([sha256.Size]byte) - expectedBz := sha256.Sum256([]byte{byte(tc.txsInCache[len(tc.txsInCache)-counter-1])}) - // Reference for reading the errors: - // >>> sha256('\x00').hexdigest() - // '6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d' - // >>> sha256('\x01').hexdigest() - // '4bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a' - // >>> sha256('\x02').hexdigest() - // 'dbc1b4c900ffe48d575b5da5c638040125f65db0fe3e24494b76ea986457d986' - - require.Equal(t, expectedBz, nodeVal, "Equality failed on index %d, tc %d", counter, tcIndex) - counter++ - node = node.Next() - } - require.Equal(t, len(tc.txsInCache), counter, - "cache smaller than expected on testcase %d", tcIndex) - mp.Flush() - } -} diff --git a/internal/mempool/v0/clist_mempool.go b/internal/mempool/v0/clist_mempool.go deleted file mode 100644 index 167fe0410..000000000 --- a/internal/mempool/v0/clist_mempool.go +++ /dev/null @@ -1,672 +0,0 @@ -package v0 - -import ( - "bytes" - "context" - "fmt" - "sync" - "sync/atomic" - - abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/libs/clist" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/libs/log" - tmmath "github.com/tendermint/tendermint/libs/math" - "github.com/tendermint/tendermint/proxy" - "github.com/tendermint/tendermint/types" -) - -// CListMempool is an ordered in-memory pool for transactions before they are -// proposed in a consensus round. Transaction validity is checked using the -// CheckTx abci message before the transaction is added to the pool. The -// mempool uses a concurrent list structure for storing transactions that can -// be efficiently accessed by multiple concurrent readers. -type CListMempool struct { - // Atomic integers - height int64 // the last block Update()'d to - txsBytes int64 // total size of mempool, in bytes - - // notify listeners (ie. consensus) when txs are available - notifiedTxsAvailable bool - txsAvailable chan struct{} // fires once for each height, when the mempool is not empty - - config *cfg.MempoolConfig - - // Exclusive mutex for Update method to prevent concurrent execution of - // CheckTx or ReapMaxBytesMaxGas(ReapMaxTxs) methods. - updateMtx tmsync.RWMutex - preCheck mempool.PreCheckFunc - postCheck mempool.PostCheckFunc - - txs *clist.CList // concurrent linked-list of good txs - proxyAppConn proxy.AppConnMempool - - // Track whether we're rechecking txs. - // These are not protected by a mutex and are expected to be mutated in - // serial (ie. by abci responses which are called in serial). - recheckCursor *clist.CElement // next expected response - recheckEnd *clist.CElement // re-checking stops here - - // Map for quick access to txs to record sender in CheckTx. - // txsMap: txKey -> CElement - txsMap sync.Map - - // Keep a cache of already-seen txs. - // This reduces the pressure on the proxyApp. - cache mempool.TxCache - - logger log.Logger - metrics *mempool.Metrics -} - -var _ mempool.Mempool = &CListMempool{} - -// CListMempoolOption sets an optional parameter on the mempool. -type CListMempoolOption func(*CListMempool) - -// NewCListMempool returns a new mempool with the given configuration and -// connection to an application. -func NewCListMempool( - config *cfg.MempoolConfig, - proxyAppConn proxy.AppConnMempool, - height int64, - options ...CListMempoolOption, -) *CListMempool { - - mp := &CListMempool{ - config: config, - proxyAppConn: proxyAppConn, - txs: clist.New(), - height: height, - recheckCursor: nil, - recheckEnd: nil, - logger: log.NewNopLogger(), - metrics: mempool.NopMetrics(), - } - - if config.CacheSize > 0 { - mp.cache = mempool.NewLRUTxCache(config.CacheSize) - } else { - mp.cache = mempool.NopTxCache{} - } - - proxyAppConn.SetResponseCallback(mp.globalCb) - - for _, option := range options { - option(mp) - } - - return mp -} - -// NOTE: not thread safe - should only be called once, on startup -func (mem *CListMempool) EnableTxsAvailable() { - mem.txsAvailable = make(chan struct{}, 1) -} - -// SetLogger sets the Logger. -func (mem *CListMempool) SetLogger(l log.Logger) { - mem.logger = l -} - -// WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns -// false. This is ran before CheckTx. Only applies to the first created block. -// After that, Update overwrites the existing value. -func WithPreCheck(f mempool.PreCheckFunc) CListMempoolOption { - return func(mem *CListMempool) { mem.preCheck = f } -} - -// WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns -// false. This is ran after CheckTx. Only applies to the first created block. -// After that, Update overwrites the existing value. -func WithPostCheck(f mempool.PostCheckFunc) CListMempoolOption { - return func(mem *CListMempool) { mem.postCheck = f } -} - -// WithMetrics sets the metrics. -func WithMetrics(metrics *mempool.Metrics) CListMempoolOption { - return func(mem *CListMempool) { mem.metrics = metrics } -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) Lock() { - mem.updateMtx.Lock() -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) Unlock() { - mem.updateMtx.Unlock() -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) Size() int { - return mem.txs.Len() -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) SizeBytes() int64 { - return atomic.LoadInt64(&mem.txsBytes) -} - -// Lock() must be help by the caller during execution. -func (mem *CListMempool) FlushAppConn() error { - return mem.proxyAppConn.FlushSync(context.Background()) -} - -// XXX: Unsafe! Calling Flush may leave mempool in inconsistent state. -func (mem *CListMempool) Flush() { - mem.updateMtx.RLock() - defer mem.updateMtx.RUnlock() - - _ = atomic.SwapInt64(&mem.txsBytes, 0) - mem.cache.Reset() - - for e := mem.txs.Front(); e != nil; e = e.Next() { - mem.txs.Remove(e) - e.DetachPrev() - } - - mem.txsMap.Range(func(key, _ interface{}) bool { - mem.txsMap.Delete(key) - return true - }) -} - -// TxsFront returns the first transaction in the ordered list for peer -// goroutines to call .NextWait() on. -// FIXME: leaking implementation details! -// -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) TxsFront() *clist.CElement { - return mem.txs.Front() -} - -// TxsWaitChan returns a channel to wait on transactions. It will be closed -// once the mempool is not empty (ie. the internal `mem.txs` has at least one -// element) -// -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) TxsWaitChan() <-chan struct{} { - return mem.txs.WaitChan() -} - -// It blocks if we're waiting on Update() or Reap(). -// cb: A callback from the CheckTx command. -// It gets called from another goroutine. -// CONTRACT: Either cb will get called, or err returned. -// -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) CheckTx( - ctx context.Context, - tx types.Tx, - cb func(*abci.Response), - txInfo mempool.TxInfo, -) error { - - mem.updateMtx.RLock() - // use defer to unlock mutex because application (*local client*) might panic - defer mem.updateMtx.RUnlock() - - txSize := len(tx) - - if err := mem.isFull(txSize); err != nil { - return err - } - - if txSize > mem.config.MaxTxBytes { - return types.ErrTxTooLarge{ - Max: mem.config.MaxTxBytes, - Actual: txSize, - } - } - - if mem.preCheck != nil { - if err := mem.preCheck(tx); err != nil { - return types.ErrPreCheck{ - Reason: err, - } - } - } - - // NOTE: proxyAppConn may error if tx buffer is full - if err := mem.proxyAppConn.Error(); err != nil { - return err - } - - if !mem.cache.Push(tx) { // if the transaction already exists in the cache - // Record a new sender for a tx we've already seen. - // Note it's possible a tx is still in the cache but no longer in the mempool - // (eg. after committing a block, txs are removed from mempool but not cache), - // so we only record the sender for txs still in the mempool. - if e, ok := mem.txsMap.Load(mempool.TxKey(tx)); ok { - memTx := e.(*clist.CElement).Value.(*mempoolTx) - _, loaded := memTx.senders.LoadOrStore(txInfo.SenderID, true) - // TODO: consider punishing peer for dups, - // its non-trivial since invalid txs can become valid, - // but they can spam the same tx with little cost to them atm. - if loaded { - return types.ErrTxInCache - } - } - - mem.logger.Debug("tx exists already in cache", "tx_hash", tx.Hash()) - return nil - } - - if ctx == nil { - ctx = context.Background() - } - - reqRes, err := mem.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{Tx: tx}) - if err != nil { - mem.cache.Remove(tx) - return err - } - reqRes.SetCallback(mem.reqResCb(tx, txInfo.SenderID, txInfo.SenderNodeID, cb)) - - return nil -} - -// Global callback that will be called after every ABCI response. -// Having a single global callback avoids needing to set a callback for each request. -// However, processing the checkTx response requires the peerID (so we can track which txs we heard from who), -// and peerID is not included in the ABCI request, so we have to set request-specific callbacks that -// include this information. If we're not in the midst of a recheck, this function will just return, -// so the request specific callback can do the work. -// -// When rechecking, we don't need the peerID, so the recheck callback happens -// here. -func (mem *CListMempool) globalCb(req *abci.Request, res *abci.Response) { - if mem.recheckCursor == nil { - return - } - - mem.metrics.RecheckTimes.Add(1) - mem.resCbRecheck(req, res) - - // update metrics - mem.metrics.Size.Set(float64(mem.Size())) -} - -// Request specific callback that should be set on individual reqRes objects -// to incorporate local information when processing the response. -// This allows us to track the peer that sent us this tx, so we can avoid sending it back to them. -// NOTE: alternatively, we could include this information in the ABCI request itself. -// -// External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called -// when all other response processing is complete. -// -// Used in CheckTx to record PeerID who sent us the tx. -func (mem *CListMempool) reqResCb( - tx []byte, - peerID uint16, - peerP2PID types.NodeID, - externalCb func(*abci.Response), -) func(res *abci.Response) { - return func(res *abci.Response) { - if mem.recheckCursor != nil { - // this should never happen - panic("recheck cursor is not nil in reqResCb") - } - - mem.resCbFirstTime(tx, peerID, peerP2PID, res) - - // update metrics - mem.metrics.Size.Set(float64(mem.Size())) - - // passed in by the caller of CheckTx, eg. the RPC - if externalCb != nil { - externalCb(res) - } - } -} - -// Called from: -// - resCbFirstTime (lock not held) if tx is valid -func (mem *CListMempool) addTx(memTx *mempoolTx) { - e := mem.txs.PushBack(memTx) - mem.txsMap.Store(mempool.TxKey(memTx.tx), e) - atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) - mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) -} - -// Called from: -// - Update (lock held) if tx was committed -// - resCbRecheck (lock not held) if tx was invalidated -func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) { - mem.txs.Remove(elem) - elem.DetachPrev() - mem.txsMap.Delete(mempool.TxKey(tx)) - atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) - - if removeFromCache { - mem.cache.Remove(tx) - } -} - -// RemoveTxByKey removes a transaction from the mempool by its TxKey index. -func (mem *CListMempool) RemoveTxByKey(txKey [mempool.TxKeySize]byte, removeFromCache bool) { - if e, ok := mem.txsMap.Load(txKey); ok { - memTx := e.(*clist.CElement).Value.(*mempoolTx) - if memTx != nil { - mem.removeTx(memTx.tx, e.(*clist.CElement), removeFromCache) - } - } -} - -func (mem *CListMempool) isFull(txSize int) error { - var ( - memSize = mem.Size() - txsBytes = mem.SizeBytes() - ) - - if memSize >= mem.config.Size || int64(txSize)+txsBytes > mem.config.MaxTxsBytes { - return types.ErrMempoolIsFull{ - NumTxs: memSize, - MaxTxs: mem.config.Size, - TxsBytes: txsBytes, - MaxTxsBytes: mem.config.MaxTxsBytes, - } - } - - return nil -} - -// callback, which is called after the app checked the tx for the first time. -// -// The case where the app checks the tx for the second and subsequent times is -// handled by the resCbRecheck callback. -func (mem *CListMempool) resCbFirstTime( - tx []byte, - peerID uint16, - peerP2PID types.NodeID, - res *abci.Response, -) { - switch r := res.Value.(type) { - case *abci.Response_CheckTx: - var postCheckErr error - if mem.postCheck != nil { - postCheckErr = mem.postCheck(tx, r.CheckTx) - } - if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { - // Check mempool isn't full again to reduce the chance of exceeding the - // limits. - if err := mem.isFull(len(tx)); err != nil { - // remove from cache (mempool might have a space later) - mem.cache.Remove(tx) - mem.logger.Error(err.Error()) - return - } - - memTx := &mempoolTx{ - height: mem.height, - gasWanted: r.CheckTx.GasWanted, - tx: tx, - } - memTx.senders.Store(peerID, true) - mem.addTx(memTx) - mem.logger.Debug( - "added good transaction", - "tx", mempool.TxHashFromBytes(tx), - "res", r, - "height", memTx.height, - "total", mem.Size(), - ) - mem.notifyTxsAvailable() - } else { - // ignore bad transaction - mem.logger.Debug( - "rejected bad transaction", - "tx", mempool.TxHashFromBytes(tx), - "peerID", peerP2PID, - "res", r, - "err", postCheckErr, - ) - mem.metrics.FailedTxs.Add(1) - - if !mem.config.KeepInvalidTxsInCache { - // remove from cache (it might be good later) - mem.cache.Remove(tx) - } - } - - default: - // ignore other messages - } -} - -// callback, which is called after the app rechecked the tx. -// -// The case where the app checks the tx for the first time is handled by the -// resCbFirstTime callback. -func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) { - switch r := res.Value.(type) { - case *abci.Response_CheckTx: - tx := req.GetCheckTx().Tx - memTx := mem.recheckCursor.Value.(*mempoolTx) - if !bytes.Equal(tx, memTx.tx) { - panic(fmt.Sprintf( - "Unexpected tx response from proxy during recheck\nExpected %X, got %X", - memTx.tx, - tx)) - } - var postCheckErr error - if mem.postCheck != nil { - postCheckErr = mem.postCheck(tx, r.CheckTx) - } - if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { - // Good, nothing to do. - } else { - // Tx became invalidated due to newly committed block. - mem.logger.Debug("tx is no longer valid", "tx", mempool.TxHashFromBytes(tx), "res", r, "err", postCheckErr) - // NOTE: we remove tx from the cache because it might be good later - mem.removeTx(tx, mem.recheckCursor, !mem.config.KeepInvalidTxsInCache) - } - if mem.recheckCursor == mem.recheckEnd { - mem.recheckCursor = nil - } else { - mem.recheckCursor = mem.recheckCursor.Next() - } - if mem.recheckCursor == nil { - // Done! - mem.logger.Debug("done rechecking txs") - - // incase the recheck removed all txs - if mem.Size() > 0 { - mem.notifyTxsAvailable() - } - } - default: - // ignore other messages - } -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) TxsAvailable() <-chan struct{} { - return mem.txsAvailable -} - -func (mem *CListMempool) notifyTxsAvailable() { - if mem.Size() == 0 { - panic("notified txs available but mempool is empty!") - } - if mem.txsAvailable != nil && !mem.notifiedTxsAvailable { - // channel cap is 1, so this will send once - mem.notifiedTxsAvailable = true - select { - case mem.txsAvailable <- struct{}{}: - default: - } - } -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { - mem.updateMtx.RLock() - defer mem.updateMtx.RUnlock() - - var ( - totalGas int64 - runningSize int64 - ) - - // TODO: we will get a performance boost if we have a good estimate of avg - // size per tx, and set the initial capacity based off of that. - // txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max/mem.avgTxSize)) - txs := make([]types.Tx, 0, mem.txs.Len()) - for e := mem.txs.Front(); e != nil; e = e.Next() { - memTx := e.Value.(*mempoolTx) - - txs = append(txs, memTx.tx) - - dataSize := types.ComputeProtoSizeForTxs([]types.Tx{memTx.tx}) - - // Check total size requirement - if maxBytes > -1 && runningSize+dataSize > maxBytes { - return txs[:len(txs)-1] - } - - runningSize += dataSize - - // Check total gas requirement. - // If maxGas is negative, skip this check. - // Since newTotalGas < masGas, which - // must be non-negative, it follows that this won't overflow. - newTotalGas := totalGas + memTx.gasWanted - if maxGas > -1 && newTotalGas > maxGas { - return txs[:len(txs)-1] - } - totalGas = newTotalGas - } - return txs -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) ReapMaxTxs(max int) types.Txs { - mem.updateMtx.RLock() - defer mem.updateMtx.RUnlock() - - if max < 0 { - max = mem.txs.Len() - } - - txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max)) - for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() { - memTx := e.Value.(*mempoolTx) - txs = append(txs, memTx.tx) - } - return txs -} - -// Lock() must be help by the caller during execution. -func (mem *CListMempool) Update( - height int64, - txs types.Txs, - deliverTxResponses []*abci.ResponseDeliverTx, - preCheck mempool.PreCheckFunc, - postCheck mempool.PostCheckFunc, -) error { - // Set height - mem.height = height - mem.notifiedTxsAvailable = false - - if preCheck != nil { - mem.preCheck = preCheck - } - if postCheck != nil { - mem.postCheck = postCheck - } - - for i, tx := range txs { - if deliverTxResponses[i].Code == abci.CodeTypeOK { - // Add valid committed tx to the cache (if missing). - _ = mem.cache.Push(tx) - } else if !mem.config.KeepInvalidTxsInCache { - // Allow invalid transactions to be resubmitted. - mem.cache.Remove(tx) - } - - // Remove committed tx from the mempool. - // - // Note an evil proposer can drop valid txs! - // Mempool before: - // 100 -> 101 -> 102 - // Block, proposed by an evil proposer: - // 101 -> 102 - // Mempool after: - // 100 - // https://github.com/tendermint/tendermint/issues/3322. - if e, ok := mem.txsMap.Load(mempool.TxKey(tx)); ok { - mem.removeTx(tx, e.(*clist.CElement), false) - } - } - - // Either recheck non-committed txs to see if they became invalid - // or just notify there're some txs left. - if mem.Size() > 0 { - if mem.config.Recheck { - mem.logger.Debug("recheck txs", "numtxs", mem.Size(), "height", height) - mem.recheckTxs() - // At this point, mem.txs are being rechecked. - // mem.recheckCursor re-scans mem.txs and possibly removes some txs. - // Before mem.Reap(), we should wait for mem.recheckCursor to be nil. - } else { - mem.notifyTxsAvailable() - } - } - - // Update metrics - mem.metrics.Size.Set(float64(mem.Size())) - - return nil -} - -func (mem *CListMempool) recheckTxs() { - if mem.Size() == 0 { - panic("recheckTxs is called, but the mempool is empty") - } - - mem.recheckCursor = mem.txs.Front() - mem.recheckEnd = mem.txs.Back() - - ctx := context.Background() - - // Push txs to proxyAppConn - // NOTE: globalCb may be called concurrently. - for e := mem.txs.Front(); e != nil; e = e.Next() { - memTx := e.Value.(*mempoolTx) - _, err := mem.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{ - Tx: memTx.tx, - Type: abci.CheckTxType_Recheck, - }) - if err != nil { - // No need in retrying since memTx will be rechecked after next block. - mem.logger.Error("Can't check tx", "err", err) - } - } - - _, err := mem.proxyAppConn.FlushAsync(ctx) - if err != nil { - mem.logger.Error("Can't flush txs", "err", err) - } -} - -//-------------------------------------------------------------------------------- - -// mempoolTx is a transaction that successfully ran -type mempoolTx struct { - height int64 // height that this tx had been validated in - gasWanted int64 // amount of gas this tx states it will require - tx types.Tx // - - // ids of peers who've sent us this tx (as a map for quick lookups). - // senders: PeerID -> bool - senders sync.Map -} - -// Height returns the height for this transaction -func (memTx *mempoolTx) Height() int64 { - return atomic.LoadInt64(&memTx.height) -} diff --git a/internal/mempool/v0/clist_mempool_test.go b/internal/mempool/v0/clist_mempool_test.go deleted file mode 100644 index 6f3e6ebc0..000000000 --- a/internal/mempool/v0/clist_mempool_test.go +++ /dev/null @@ -1,620 +0,0 @@ -package v0 - -import ( - "context" - "crypto/rand" - "encoding/binary" - "fmt" - mrand "math/rand" - "os" - "testing" - "time" - - "github.com/gogo/protobuf/proto" - gogotypes "github.com/gogo/protobuf/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/abci/example/kvstore" - abciserver "github.com/tendermint/tendermint/abci/server" - abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/libs/log" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/proxy" - "github.com/tendermint/tendermint/types" -) - -// A cleanupFunc cleans up any config / test files created for a particular -// test. -type cleanupFunc func() - -func newMempoolWithApp(cc proxy.ClientCreator) (*CListMempool, cleanupFunc) { - return newMempoolWithAppAndConfig(cc, cfg.ResetTestRoot("mempool_test")) -} - -func newMempoolWithAppAndConfig(cc proxy.ClientCreator, config *cfg.Config) (*CListMempool, cleanupFunc) { - appConnMem, _ := cc.NewABCIClient() - appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) - err := appConnMem.Start() - if err != nil { - panic(err) - } - - mp := NewCListMempool(config.Mempool, appConnMem, 0) - mp.SetLogger(log.TestingLogger()) - - return mp, func() { os.RemoveAll(config.RootDir) } -} - -func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { - timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) - select { - case <-ch: - t.Fatal("Expected not to fire") - case <-timer.C: - } -} - -func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { - timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) - select { - case <-ch: - case <-timer.C: - t.Fatal("Expected to fire") - } -} - -func checkTxs(t *testing.T, mp mempool.Mempool, count int, peerID uint16) types.Txs { - txs := make(types.Txs, count) - txInfo := mempool.TxInfo{SenderID: peerID} - for i := 0; i < count; i++ { - txBytes := make([]byte, 20) - txs[i] = txBytes - _, err := rand.Read(txBytes) - if err != nil { - t.Error(err) - } - if err := mp.CheckTx(context.Background(), txBytes, nil, txInfo); err != nil { - // Skip invalid txs. - // TestMempoolFilters will fail otherwise. It asserts a number of txs - // returned. - if types.IsPreCheckError(err) { - continue - } - t.Fatalf("CheckTx failed: %v while checking #%d tx", err, i) - } - } - return txs -} - -func TestReapMaxBytesMaxGas(t *testing.T) { - app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - mp, cleanup := newMempoolWithApp(cc) - defer cleanup() - - // Ensure gas calculation behaves as expected - checkTxs(t, mp, 1, mempool.UnknownPeerID) - tx0 := mp.TxsFront().Value.(*mempoolTx) - // assert that kv store has gas wanted = 1. - require.Equal(t, app.CheckTx(abci.RequestCheckTx{Tx: tx0.tx}).GasWanted, int64(1), "KVStore had a gas value neq to 1") - require.Equal(t, tx0.gasWanted, int64(1), "transactions gas was set incorrectly") - // ensure each tx is 20 bytes long - require.Equal(t, len(tx0.tx), 20, "Tx is longer than 20 bytes") - mp.Flush() - - // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. - // each tx has 20 bytes - tests := []struct { - numTxsToCreate int - maxBytes int64 - maxGas int64 - expectedNumTxs int - }{ - {20, -1, -1, 20}, - {20, -1, 0, 0}, - {20, -1, 10, 10}, - {20, -1, 30, 20}, - {20, 0, -1, 0}, - {20, 0, 10, 0}, - {20, 10, 10, 0}, - {20, 24, 10, 1}, - {20, 240, 5, 5}, - {20, 240, -1, 10}, - {20, 240, 10, 10}, - {20, 240, 15, 10}, - {20, 20000, -1, 20}, - {20, 20000, 5, 5}, - {20, 20000, 30, 20}, - } - for tcIndex, tt := range tests { - checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID) - got := mp.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas) - assert.Equal(t, tt.expectedNumTxs, len(got), "Got %d txs, expected %d, tc #%d", - len(got), tt.expectedNumTxs, tcIndex) - mp.Flush() - } -} - -func TestMempoolFilters(t *testing.T) { - app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - mp, cleanup := newMempoolWithApp(cc) - defer cleanup() - emptyTxArr := []types.Tx{[]byte{}} - - nopPreFilter := func(tx types.Tx) error { return nil } - nopPostFilter := func(tx types.Tx, res *abci.ResponseCheckTx) error { return nil } - - // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. - // each tx has 20 bytes - tests := []struct { - numTxsToCreate int - preFilter mempool.PreCheckFunc - postFilter mempool.PostCheckFunc - expectedNumTxs int - }{ - {10, nopPreFilter, nopPostFilter, 10}, - {10, mempool.PreCheckMaxBytes(10), nopPostFilter, 0}, - {10, mempool.PreCheckMaxBytes(22), nopPostFilter, 10}, - {10, nopPreFilter, mempool.PostCheckMaxGas(-1), 10}, - {10, nopPreFilter, mempool.PostCheckMaxGas(0), 0}, - {10, nopPreFilter, mempool.PostCheckMaxGas(1), 10}, - {10, nopPreFilter, mempool.PostCheckMaxGas(3000), 10}, - {10, mempool.PreCheckMaxBytes(10), mempool.PostCheckMaxGas(20), 0}, - {10, mempool.PreCheckMaxBytes(30), mempool.PostCheckMaxGas(20), 10}, - {10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(1), 10}, - {10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(0), 0}, - } - for tcIndex, tt := range tests { - err := mp.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter) - require.NoError(t, err) - checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID) - require.Equal(t, tt.expectedNumTxs, mp.Size(), "mempool had the incorrect size, on test case %d", tcIndex) - mp.Flush() - } -} - -func TestMempoolUpdate(t *testing.T) { - app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - mp, cleanup := newMempoolWithApp(cc) - defer cleanup() - - // 1. Adds valid txs to the cache - { - err := mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) - require.NoError(t, err) - err = mp.CheckTx(context.Background(), []byte{0x01}, nil, mempool.TxInfo{}) - require.NoError(t, err) - } - - // 2. Removes valid txs from the mempool - { - err := mp.CheckTx(context.Background(), []byte{0x02}, nil, mempool.TxInfo{}) - require.NoError(t, err) - err = mp.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil) - require.NoError(t, err) - assert.Zero(t, mp.Size()) - } - - // 3. Removes invalid transactions from the cache and the mempool (if present) - { - err := mp.CheckTx(context.Background(), []byte{0x03}, nil, mempool.TxInfo{}) - require.NoError(t, err) - err = mp.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil) - require.NoError(t, err) - assert.Zero(t, mp.Size()) - - err = mp.CheckTx(context.Background(), []byte{0x03}, nil, mempool.TxInfo{}) - require.NoError(t, err) - } -} - -func TestMempool_KeepInvalidTxsInCache(t *testing.T) { - app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - wcfg := cfg.DefaultConfig() - wcfg.Mempool.KeepInvalidTxsInCache = true - mp, cleanup := newMempoolWithAppAndConfig(cc, wcfg) - defer cleanup() - - // 1. An invalid transaction must remain in the cache after Update - { - a := make([]byte, 8) - binary.BigEndian.PutUint64(a, 0) - - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, 1) - - err := mp.CheckTx(context.Background(), b, nil, mempool.TxInfo{}) - require.NoError(t, err) - - // simulate new block - _ = app.DeliverTx(abci.RequestDeliverTx{Tx: a}) - _ = app.DeliverTx(abci.RequestDeliverTx{Tx: b}) - err = mp.Update(1, []types.Tx{a, b}, - []*abci.ResponseDeliverTx{{Code: abci.CodeTypeOK}, {Code: 2}}, nil, nil) - require.NoError(t, err) - - // a must be added to the cache - err = mp.CheckTx(context.Background(), a, nil, mempool.TxInfo{}) - require.NoError(t, err) - - // b must remain in the cache - err = mp.CheckTx(context.Background(), b, nil, mempool.TxInfo{}) - require.NoError(t, err) - } - - // 2. An invalid transaction must remain in the cache - { - a := make([]byte, 8) - binary.BigEndian.PutUint64(a, 0) - - // remove a from the cache to test (2) - mp.cache.Remove(a) - - err := mp.CheckTx(context.Background(), a, nil, mempool.TxInfo{}) - require.NoError(t, err) - } -} - -func TestTxsAvailable(t *testing.T) { - app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - mp, cleanup := newMempoolWithApp(cc) - defer cleanup() - mp.EnableTxsAvailable() - - timeoutMS := 500 - - // with no txs, it shouldnt fire - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) - - // send a bunch of txs, it should only fire once - txs := checkTxs(t, mp, 100, mempool.UnknownPeerID) - ensureFire(t, mp.TxsAvailable(), timeoutMS) - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) - - // call update with half the txs. - // it should fire once now for the new height - // since there are still txs left - committedTxs, txs := txs[:50], txs[50:] - if err := mp.Update(1, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { - t.Error(err) - } - ensureFire(t, mp.TxsAvailable(), timeoutMS) - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) - - // send a bunch more txs. we already fired for this height so it shouldnt fire again - moreTxs := checkTxs(t, mp, 50, mempool.UnknownPeerID) - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) - - // now call update with all the txs. it should not fire as there are no txs left - committedTxs = append(txs, moreTxs...) //nolint: gocritic - if err := mp.Update(2, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { - t.Error(err) - } - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) - - // send a bunch more txs, it should only fire once - checkTxs(t, mp, 100, mempool.UnknownPeerID) - ensureFire(t, mp.TxsAvailable(), timeoutMS) - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) -} - -func TestSerialReap(t *testing.T) { - app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - - mp, cleanup := newMempoolWithApp(cc) - defer cleanup() - - appConnCon, _ := cc.NewABCIClient() - appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) - err := appConnCon.Start() - require.Nil(t, err) - - cacheMap := make(map[string]struct{}) - deliverTxsRange := func(start, end int) { - // Deliver some txs. - for i := start; i < end; i++ { - - // This will succeed - txBytes := make([]byte, 8) - binary.BigEndian.PutUint64(txBytes, uint64(i)) - err := mp.CheckTx(context.Background(), txBytes, nil, mempool.TxInfo{}) - _, cached := cacheMap[string(txBytes)] - if cached { - require.NotNil(t, err, "expected error for cached tx") - } else { - require.Nil(t, err, "expected no err for uncached tx") - } - cacheMap[string(txBytes)] = struct{}{} - - // Duplicates are cached and should return error - err = mp.CheckTx(context.Background(), txBytes, nil, mempool.TxInfo{}) - require.NotNil(t, err, "Expected error after CheckTx on duplicated tx") - } - } - - reapCheck := func(exp int) { - txs := mp.ReapMaxBytesMaxGas(-1, -1) - require.Equal(t, len(txs), exp, fmt.Sprintf("Expected to reap %v txs but got %v", exp, len(txs))) - } - - updateRange := func(start, end int) { - txs := make([]types.Tx, 0) - for i := start; i < end; i++ { - txBytes := make([]byte, 8) - binary.BigEndian.PutUint64(txBytes, uint64(i)) - txs = append(txs, txBytes) - } - if err := mp.Update(0, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil); err != nil { - t.Error(err) - } - } - - commitRange := func(start, end int) { - ctx := context.Background() - // Deliver some txs. - for i := start; i < end; i++ { - txBytes := make([]byte, 8) - binary.BigEndian.PutUint64(txBytes, uint64(i)) - res, err := appConnCon.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: txBytes}) - if err != nil { - t.Errorf("client error committing tx: %v", err) - } - if res.IsErr() { - t.Errorf("error committing tx. Code:%v result:%X log:%v", - res.Code, res.Data, res.Log) - } - } - res, err := appConnCon.CommitSync(ctx) - if err != nil { - t.Errorf("client error committing: %v", err) - } - if len(res.Data) != 8 { - t.Errorf("error committing. Hash:%X", res.Data) - } - } - - //---------------------------------------- - - // Deliver some txs. - deliverTxsRange(0, 100) - - // Reap the txs. - reapCheck(100) - - // Reap again. We should get the same amount - reapCheck(100) - - // Deliver 0 to 999, we should reap 900 new txs - // because 100 were already counted. - deliverTxsRange(0, 1000) - - // Reap the txs. - reapCheck(1000) - - // Reap again. We should get the same amount - reapCheck(1000) - - // Commit from the conensus AppConn - commitRange(0, 500) - updateRange(0, 500) - - // We should have 500 left. - reapCheck(500) - - // Deliver 100 invalid txs and 100 valid txs - deliverTxsRange(900, 1100) - - // We should have 600 now. - reapCheck(600) -} - -func TestMempool_CheckTxChecksTxSize(t *testing.T) { - app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - mempl, cleanup := newMempoolWithApp(cc) - defer cleanup() - - maxTxSize := mempl.config.MaxTxBytes - - testCases := []struct { - len int - err bool - }{ - // check small txs. no error - 0: {10, false}, - 1: {1000, false}, - 2: {1000000, false}, - - // check around maxTxSize - 3: {maxTxSize - 1, false}, - 4: {maxTxSize, false}, - 5: {maxTxSize + 1, true}, - } - - for i, testCase := range testCases { - caseString := fmt.Sprintf("case %d, len %d", i, testCase.len) - - tx := tmrand.Bytes(testCase.len) - - err := mempl.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) - bv := gogotypes.BytesValue{Value: tx} - bz, err2 := bv.Marshal() - require.NoError(t, err2) - require.Equal(t, len(bz), proto.Size(&bv), caseString) - - if !testCase.err { - require.NoError(t, err, caseString) - } else { - require.Equal(t, err, types.ErrTxTooLarge{ - Max: maxTxSize, - Actual: testCase.len, - }, caseString) - } - } -} - -func TestMempoolTxsBytes(t *testing.T) { - app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - config := cfg.ResetTestRoot("mempool_test") - config.Mempool.MaxTxsBytes = 10 - mp, cleanup := newMempoolWithAppAndConfig(cc, config) - defer cleanup() - - // 1. zero by default - assert.EqualValues(t, 0, mp.SizeBytes()) - - // 2. len(tx) after CheckTx - err := mp.CheckTx(context.Background(), []byte{0x01}, nil, mempool.TxInfo{}) - require.NoError(t, err) - assert.EqualValues(t, 1, mp.SizeBytes()) - - // 3. zero again after tx is removed by Update - err = mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) - require.NoError(t, err) - assert.EqualValues(t, 0, mp.SizeBytes()) - - // 4. zero after Flush - err = mp.CheckTx(context.Background(), []byte{0x02, 0x03}, nil, mempool.TxInfo{}) - require.NoError(t, err) - assert.EqualValues(t, 2, mp.SizeBytes()) - - mp.Flush() - assert.EqualValues(t, 0, mp.SizeBytes()) - - // 5. ErrMempoolIsFull is returned when/if MaxTxsBytes limit is reached. - err = mp.CheckTx( - context.Background(), - []byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04}, - nil, - mempool.TxInfo{}, - ) - require.NoError(t, err) - - err = mp.CheckTx(context.Background(), []byte{0x05}, nil, mempool.TxInfo{}) - if assert.Error(t, err) { - assert.IsType(t, types.ErrMempoolIsFull{}, err) - } - - // 6. zero after tx is rechecked and removed due to not being valid anymore - app2 := kvstore.NewApplication() - cc = proxy.NewLocalClientCreator(app2) - mp, cleanup = newMempoolWithApp(cc) - defer cleanup() - - txBytes := make([]byte, 8) - binary.BigEndian.PutUint64(txBytes, uint64(0)) - - err = mp.CheckTx(context.Background(), txBytes, nil, mempool.TxInfo{}) - require.NoError(t, err) - assert.EqualValues(t, 8, mp.SizeBytes()) - - appConnCon, _ := cc.NewABCIClient() - appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) - err = appConnCon.Start() - require.Nil(t, err) - t.Cleanup(func() { - if err := appConnCon.Stop(); err != nil { - t.Error(err) - } - }) - ctx := context.Background() - res, err := appConnCon.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: txBytes}) - require.NoError(t, err) - require.EqualValues(t, 0, res.Code) - res2, err := appConnCon.CommitSync(ctx) - require.NoError(t, err) - require.NotEmpty(t, res2.Data) - - // Pretend like we committed nothing so txBytes gets rechecked and removed. - err = mp.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil) - require.NoError(t, err) - assert.EqualValues(t, 8, mp.SizeBytes()) - - // 7. Test RemoveTxByKey function - err = mp.CheckTx(context.Background(), []byte{0x06}, nil, mempool.TxInfo{}) - require.NoError(t, err) - assert.EqualValues(t, 9, mp.SizeBytes()) - mp.RemoveTxByKey(mempool.TxKey([]byte{0x07}), true) - assert.EqualValues(t, 9, mp.SizeBytes()) - mp.RemoveTxByKey(mempool.TxKey([]byte{0x06}), true) - assert.EqualValues(t, 8, mp.SizeBytes()) - -} - -// This will non-deterministically catch some concurrency failures like -// https://github.com/tendermint/tendermint/issues/3509 -// TODO: all of the tests should probably also run using the remote proxy app -// since otherwise we're not actually testing the concurrency of the mempool here! -func TestMempoolRemoteAppConcurrency(t *testing.T) { - sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) - app := kvstore.NewApplication() - cc, server := newRemoteApp(t, sockPath, app) - t.Cleanup(func() { - if err := server.Stop(); err != nil { - t.Error(err) - } - }) - config := cfg.ResetTestRoot("mempool_test") - mp, cleanup := newMempoolWithAppAndConfig(cc, config) - defer cleanup() - - // generate small number of txs - nTxs := 10 - txLen := 200 - txs := make([]types.Tx, nTxs) - for i := 0; i < nTxs; i++ { - txs[i] = tmrand.Bytes(txLen) - } - - // simulate a group of peers sending them over and over - N := config.Mempool.Size - maxPeers := 5 - for i := 0; i < N; i++ { - peerID := mrand.Intn(maxPeers) - txNum := mrand.Intn(nTxs) - tx := txs[txNum] - - // this will err with ErrTxInCache many times ... - mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: uint16(peerID)}) //nolint: errcheck // will error - } - err := mp.FlushAppConn() - require.NoError(t, err) -} - -// caller must close server -func newRemoteApp( - t *testing.T, - addr string, - app abci.Application, -) ( - clientCreator proxy.ClientCreator, - server service.Service, -) { - clientCreator = proxy.NewRemoteClientCreator(addr, "socket", true) - - // Start server - server = abciserver.NewSocketServer(addr, app) - server.SetLogger(log.TestingLogger().With("module", "abci-server")) - if err := server.Start(); err != nil { - t.Fatalf("Error starting socket server: %v", err.Error()) - } - return clientCreator, server -} - -func abciResponses(n int, code uint32) []*abci.ResponseDeliverTx { - responses := make([]*abci.ResponseDeliverTx, 0, n) - for i := 0; i < n; i++ { - responses = append(responses, &abci.ResponseDeliverTx{Code: code}) - } - return responses -} diff --git a/internal/mempool/v0/doc.go b/internal/mempool/v0/doc.go deleted file mode 100644 index 3b5d0d20d..000000000 --- a/internal/mempool/v0/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// The mempool pushes new txs onto the proxyAppConn. -// It gets a stream of (req, res) tuples from the proxy. -// The mempool stores good txs in a concurrent linked-list. - -// Multiple concurrent go-routines can traverse this linked-list -// safely by calling .NextWait() on each element. - -// So we have several go-routines: -// 1. Consensus calling Update() and ReapMaxBytesMaxGas() synchronously -// 2. Many mempool reactor's peer routines calling CheckTx() -// 3. Many mempool reactor's peer routines traversing the txs linked list - -// To manage these goroutines, there are three methods of locking. -// 1. Mutations to the linked-list is protected by an internal mtx (CList is goroutine-safe) -// 2. Mutations to the linked-list elements are atomic -// 3. CheckTx() and/or ReapMaxBytesMaxGas() calls can be paused upon Update(), protected by .updateMtx - -// Garbage collection of old elements from mempool.txs is handlde via the -// DetachPrev() call, which makes old elements not reachable by peer -// broadcastTxRoutine(). - -// TODO: Better handle abci client errors. (make it automatically handle connection errors) -package v0 diff --git a/internal/mempool/v0/reactor.go b/internal/mempool/v0/reactor.go deleted file mode 100644 index 29dec5833..000000000 --- a/internal/mempool/v0/reactor.go +++ /dev/null @@ -1,402 +0,0 @@ -package v0 - -import ( - "context" - "errors" - "fmt" - "runtime/debug" - "sync" - "time" - - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/libs/clist" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" - protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" - "github.com/tendermint/tendermint/types" -) - -var ( - _ service.Service = (*Reactor)(nil) - _ p2p.Wrapper = (*protomem.Message)(nil) -) - -// PeerManager defines the interface contract required for getting necessary -// peer information. This should eventually be replaced with a message-oriented -// approach utilizing the p2p stack. -type PeerManager interface { - GetHeight(types.NodeID) int64 -} - -// Reactor implements a service that contains mempool of txs that are broadcasted -// amongst peers. It maintains a map from peer ID to counter, to prevent gossiping -// txs to the peers you received it from. -type Reactor struct { - service.BaseService - - config *cfg.MempoolConfig - mempool *CListMempool - ids *mempool.MempoolIDs - - // XXX: Currently, this is the only way to get information about a peer. Ideally, - // we rely on message-oriented communication to get necessary peer data. - // ref: https://github.com/tendermint/tendermint/issues/5670 - peerMgr PeerManager - - mempoolCh *p2p.Channel - peerUpdates *p2p.PeerUpdates - closeCh chan struct{} - - // peerWG is used to coordinate graceful termination of all peer broadcasting - // goroutines. - peerWG sync.WaitGroup - - mtx tmsync.Mutex - peerRoutines map[types.NodeID]*tmsync.Closer -} - -// NewReactor returns a reference to a new reactor. -func NewReactor( - logger log.Logger, - config *cfg.MempoolConfig, - peerMgr PeerManager, - mp *CListMempool, - mempoolCh *p2p.Channel, - peerUpdates *p2p.PeerUpdates, -) *Reactor { - - r := &Reactor{ - config: config, - peerMgr: peerMgr, - mempool: mp, - ids: mempool.NewMempoolIDs(), - mempoolCh: mempoolCh, - peerUpdates: peerUpdates, - closeCh: make(chan struct{}), - peerRoutines: make(map[types.NodeID]*tmsync.Closer), - } - - r.BaseService = *service.NewBaseService(logger, "Mempool", r) - return r -} - -// GetChannelShims returns a map of ChannelDescriptorShim objects, where each -// object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding -// p2p proto.Message the new p2p Channel is responsible for handling. -// -// -// TODO: Remove once p2p refactor is complete. -// ref: https://github.com/tendermint/tendermint/issues/5670 -func GetChannelShims(config *cfg.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDescriptorShim { - largestTx := make([]byte, config.MaxTxBytes) - batchMsg := protomem.Message{ - Sum: &protomem.Message_Txs{ - Txs: &protomem.Txs{Txs: [][]byte{largestTx}}, - }, - } - - return map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - mempool.MempoolChannel: { - MsgType: new(protomem.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(mempool.MempoolChannel), - Priority: 5, - RecvMessageCapacity: batchMsg.Size(), - RecvBufferCapacity: 128, - MaxSendBytes: 5000, - }, - }, - } -} - -// OnStart starts separate go routines for each p2p Channel and listens for -// envelopes on each. In addition, it also listens for peer updates and handles -// messages on that p2p channel accordingly. The caller must be sure to execute -// OnStop to ensure the outbound p2p Channels are closed. -func (r *Reactor) OnStart() error { - if !r.config.Broadcast { - r.Logger.Info("tx broadcasting is disabled") - } - - go r.processMempoolCh() - go r.processPeerUpdates() - - return nil -} - -// OnStop stops the reactor by signaling to all spawned goroutines to exit and -// blocking until they all exit. -func (r *Reactor) OnStop() { - r.mtx.Lock() - for _, c := range r.peerRoutines { - c.Close() - } - r.mtx.Unlock() - - // wait for all spawned peer tx broadcasting goroutines to gracefully exit - r.peerWG.Wait() - - // Close closeCh to signal to all spawned goroutines to gracefully exit. All - // p2p Channels should execute Close(). - close(r.closeCh) - - // Wait for all p2p Channels to be closed before returning. This ensures we - // can easily reason about synchronization of all p2p Channels and ensure no - // panics will occur. - <-r.mempoolCh.Done() - <-r.peerUpdates.Done() -} - -// handleMempoolMessage handles envelopes sent from peers on the MempoolChannel. -// For every tx in the message, we execute CheckTx. It returns an error if an -// empty set of txs are sent in an envelope or if we receive an unexpected -// message type. -func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { - logger := r.Logger.With("peer", envelope.From) - - switch msg := envelope.Message.(type) { - case *protomem.Txs: - protoTxs := msg.GetTxs() - if len(protoTxs) == 0 { - return errors.New("empty txs received from peer") - } - - txInfo := mempool.TxInfo{SenderID: r.ids.GetForPeer(envelope.From)} - if len(envelope.From) != 0 { - txInfo.SenderNodeID = envelope.From - } - - for _, tx := range protoTxs { - if err := r.mempool.CheckTx(context.Background(), types.Tx(tx), nil, txInfo); err != nil { - logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(tx)), "err", err) - } - } - - default: - return fmt.Errorf("received unknown message: %T", msg) - } - - return nil -} - -// handleMessage handles an Envelope sent from a peer on a specific p2p Channel. -// It will handle errors and any possible panics gracefully. A caller can handle -// any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { - defer func() { - if e := recover(); e != nil { - err = fmt.Errorf("panic in processing message: %v", e) - r.Logger.Error( - "recovering from processing message panic", - "err", err, - "stack", string(debug.Stack()), - ) - } - }() - - r.Logger.Debug("received message", "peer", envelope.From) - - switch chID { - case mempool.MempoolChannel: - err = r.handleMempoolMessage(envelope) - - default: - err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) - } - - return err -} - -// processMempoolCh implements a blocking event loop where we listen for p2p -// Envelope messages from the mempoolCh. -func (r *Reactor) processMempoolCh() { - defer r.mempoolCh.Close() - - for { - select { - case envelope := <-r.mempoolCh.In: - if err := r.handleMessage(r.mempoolCh.ID, envelope); err != nil { - r.Logger.Error("failed to process message", "ch_id", r.mempoolCh.ID, "envelope", envelope, "err", err) - r.mempoolCh.Error <- p2p.PeerError{ - NodeID: envelope.From, - Err: err, - } - } - - case <-r.closeCh: - r.Logger.Debug("stopped listening on mempool channel; closing...") - return - } - } -} - -// processPeerUpdate processes a PeerUpdate. For added peers, PeerStatusUp, we -// check if the reactor is running and if we've already started a tx broadcasting -// goroutine or not. If not, we start one for the newly added peer. For down or -// removed peers, we remove the peer from the mempool peer ID set and signal to -// stop the tx broadcasting goroutine. -func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { - r.Logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) - - r.mtx.Lock() - defer r.mtx.Unlock() - - switch peerUpdate.Status { - case p2p.PeerStatusUp: - // Do not allow starting new tx broadcast loops after reactor shutdown - // has been initiated. This can happen after we've manually closed all - // peer broadcast loops and closed r.closeCh, but the router still sends - // in-flight peer updates. - if !r.IsRunning() { - return - } - - if r.config.Broadcast { - // Check if we've already started a goroutine for this peer, if not we create - // a new done channel so we can explicitly close the goroutine if the peer - // is later removed, we increment the waitgroup so the reactor can stop - // safely, and finally start the goroutine to broadcast txs to that peer. - _, ok := r.peerRoutines[peerUpdate.NodeID] - if !ok { - closer := tmsync.NewCloser() - - r.peerRoutines[peerUpdate.NodeID] = closer - r.peerWG.Add(1) - - r.ids.ReserveForPeer(peerUpdate.NodeID) - - // start a broadcast routine ensuring all txs are forwarded to the peer - go r.broadcastTxRoutine(peerUpdate.NodeID, closer) - } - } - - case p2p.PeerStatusDown: - r.ids.Reclaim(peerUpdate.NodeID) - - // Check if we've started a tx broadcasting goroutine for this peer. - // If we have, we signal to terminate the goroutine via the channel's closure. - // This will internally decrement the peer waitgroup and remove the peer - // from the map of peer tx broadcasting goroutines. - closer, ok := r.peerRoutines[peerUpdate.NodeID] - if ok { - closer.Close() - } - } -} - -// processPeerUpdates initiates a blocking process where we listen for and handle -// PeerUpdate messages. When the reactor is stopped, we will catch the signal and -// close the p2p PeerUpdatesCh gracefully. -func (r *Reactor) processPeerUpdates() { - defer r.peerUpdates.Close() - - for { - select { - case peerUpdate := <-r.peerUpdates.Updates(): - r.processPeerUpdate(peerUpdate) - - case <-r.closeCh: - r.Logger.Debug("stopped listening on peer updates channel; closing...") - return - } - } -} - -func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) { - peerMempoolID := r.ids.GetForPeer(peerID) - var next *clist.CElement - - // remove the peer ID from the map of routines and mark the waitgroup as done - defer func() { - r.mtx.Lock() - delete(r.peerRoutines, peerID) - r.mtx.Unlock() - - r.peerWG.Done() - - if e := recover(); e != nil { - r.Logger.Error( - "recovering from broadcasting mempool loop", - "err", e, - "stack", string(debug.Stack()), - ) - } - }() - - for { - if !r.IsRunning() { - return - } - - // This happens because the CElement we were looking at got garbage - // collected (removed). That is, .NextWait() returned nil. Go ahead and - // start from the beginning. - if next == nil { - select { - case <-r.mempool.TxsWaitChan(): // wait until a tx is available - if next = r.mempool.TxsFront(); next == nil { - continue - } - - case <-closer.Done(): - // The peer is marked for removal via a PeerUpdate as the doneCh was - // explicitly closed to signal we should exit. - return - - case <-r.closeCh: - // The reactor has signaled that we are stopped and thus we should - // implicitly exit this peer's goroutine. - return - } - } - - memTx := next.Value.(*mempoolTx) - - if r.peerMgr != nil { - height := r.peerMgr.GetHeight(peerID) - if height > 0 && height < memTx.Height()-1 { - // allow for a lag of one block - time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond) - continue - } - } - - // NOTE: Transaction batching was disabled due to: - // https://github.com/tendermint/tendermint/issues/5796 - - if _, ok := memTx.senders.Load(peerMempoolID); !ok { - // Send the mempool tx to the corresponding peer. Note, the peer may be - // behind and thus would not be able to process the mempool tx correctly. - r.mempoolCh.Out <- p2p.Envelope{ - To: peerID, - Message: &protomem.Txs{ - Txs: [][]byte{memTx.tx}, - }, - } - r.Logger.Debug( - "gossiped tx to peer", - "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(memTx.tx)), - "peer", peerID, - ) - } - - select { - case <-next.NextWaitChan(): - // see the start of the for loop for nil check - next = next.Next() - - case <-closer.Done(): - // The peer is marked for removal via a PeerUpdate as the doneCh was - // explicitly closed to signal we should exit. - return - - case <-r.closeCh: - // The reactor has signaled that we are stopped and thus we should - // implicitly exit this peer's goroutine. - return - } - } -} diff --git a/internal/mempool/v1/mempool.go b/internal/mempool/v1/mempool.go deleted file mode 100644 index 46e7f5fcc..000000000 --- a/internal/mempool/v1/mempool.go +++ /dev/null @@ -1,842 +0,0 @@ -package v1 - -import ( - "bytes" - "context" - "fmt" - "sync/atomic" - "time" - - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/libs/clist" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/libs/log" - tmmath "github.com/tendermint/tendermint/libs/math" - "github.com/tendermint/tendermint/proxy" - "github.com/tendermint/tendermint/types" -) - -var _ mempool.Mempool = (*TxMempool)(nil) - -// TxMempoolOption sets an optional parameter on the TxMempool. -type TxMempoolOption func(*TxMempool) - -// TxMempool defines a prioritized mempool data structure used by the v1 mempool -// reactor. It keeps a thread-safe priority queue of transactions that is used -// when a block proposer constructs a block and a thread-safe linked-list that -// is used to gossip transactions to peers in a FIFO manner. -type TxMempool struct { - logger log.Logger - metrics *mempool.Metrics - config *config.MempoolConfig - proxyAppConn proxy.AppConnMempool - - // txsAvailable fires once for each height when the mempool is not empty - txsAvailable chan struct{} - notifiedTxsAvailable bool - - // height defines the last block height process during Update() - height int64 - - // sizeBytes defines the total size of the mempool (sum of all tx bytes) - sizeBytes int64 - - // cache defines a fixed-size cache of already seen transactions as this - // reduces pressure on the proxyApp. - cache mempool.TxCache - - // txStore defines the main storage of valid transactions. Indexes are built - // on top of this store. - txStore *TxStore - - // gossipIndex defines the gossiping index of valid transactions via a - // thread-safe linked-list. We also use the gossip index as a cursor for - // rechecking transactions already in the mempool. - gossipIndex *clist.CList - - // recheckCursor and recheckEnd are used as cursors based on the gossip index - // to recheck transactions that are already in the mempool. Iteration is not - // thread-safe and transaction may be mutated in serial order. - // - // XXX/TODO: It might be somewhat of a codesmell to use the gossip index for - // iterator and cursor management when rechecking transactions. If the gossip - // index changes or is removed in a future refactor, this will have to be - // refactored. Instead, we should consider just keeping a slice of a snapshot - // of the mempool's current transactions during Update and an integer cursor - // into that slice. This, however, requires additional O(n) space complexity. - recheckCursor *clist.CElement // next expected response - recheckEnd *clist.CElement // re-checking stops here - - // priorityIndex defines the priority index of valid transactions via a - // thread-safe priority queue. - priorityIndex *TxPriorityQueue - - // heightIndex defines a height-based, in ascending order, transaction index. - // i.e. older transactions are first. - heightIndex *WrappedTxList - - // timestampIndex defines a timestamp-based, in ascending order, transaction - // index. i.e. older transactions are first. - timestampIndex *WrappedTxList - - // A read/write lock is used to safe guard updates, insertions and deletions - // from the mempool. A read-lock is implicitly acquired when executing CheckTx, - // however, a caller must explicitly grab a write-lock via Lock when updating - // the mempool via Update(). - mtx tmsync.RWMutex - preCheck mempool.PreCheckFunc - postCheck mempool.PostCheckFunc -} - -func NewTxMempool( - logger log.Logger, - cfg *config.MempoolConfig, - proxyAppConn proxy.AppConnMempool, - height int64, - options ...TxMempoolOption, -) *TxMempool { - - txmp := &TxMempool{ - logger: logger, - config: cfg, - proxyAppConn: proxyAppConn, - height: height, - cache: mempool.NopTxCache{}, - metrics: mempool.NopMetrics(), - txStore: NewTxStore(), - gossipIndex: clist.New(), - priorityIndex: NewTxPriorityQueue(), - heightIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { - return wtx1.height >= wtx2.height - }), - timestampIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { - return wtx1.timestamp.After(wtx2.timestamp) || wtx1.timestamp.Equal(wtx2.timestamp) - }), - } - - if cfg.CacheSize > 0 { - txmp.cache = mempool.NewLRUTxCache(cfg.CacheSize) - } - - proxyAppConn.SetResponseCallback(txmp.defaultTxCallback) - - for _, opt := range options { - opt(txmp) - } - - return txmp -} - -// WithPreCheck sets a filter for the mempool to reject a transaction if f(tx) -// returns an error. This is executed before CheckTx. It only applies to the -// first created block. After that, Update() overwrites the existing value. -func WithPreCheck(f mempool.PreCheckFunc) TxMempoolOption { - return func(txmp *TxMempool) { txmp.preCheck = f } -} - -// WithPostCheck sets a filter for the mempool to reject a transaction if -// f(tx, resp) returns an error. This is executed after CheckTx. It only applies -// to the first created block. After that, Update overwrites the existing value. -func WithPostCheck(f mempool.PostCheckFunc) TxMempoolOption { - return func(txmp *TxMempool) { txmp.postCheck = f } -} - -// WithMetrics sets the mempool's metrics collector. -func WithMetrics(metrics *mempool.Metrics) TxMempoolOption { - return func(txmp *TxMempool) { txmp.metrics = metrics } -} - -// Lock obtains a write-lock on the mempool. A caller must be sure to explicitly -// release the lock when finished. -func (txmp *TxMempool) Lock() { - txmp.mtx.Lock() -} - -// Unlock releases a write-lock on the mempool. -func (txmp *TxMempool) Unlock() { - txmp.mtx.Unlock() -} - -// Size returns the number of valid transactions in the mempool. It is -// thread-safe. -func (txmp *TxMempool) Size() int { - return txmp.txStore.Size() -} - -// SizeBytes return the total sum in bytes of all the valid transactions in the -// mempool. It is thread-safe. -func (txmp *TxMempool) SizeBytes() int64 { - return atomic.LoadInt64(&txmp.sizeBytes) -} - -// FlushAppConn executes FlushSync on the mempool's proxyAppConn. -// -// NOTE: The caller must obtain a write-lock via Lock() prior to execution. -func (txmp *TxMempool) FlushAppConn() error { - return txmp.proxyAppConn.FlushSync(context.Background()) -} - -// WaitForNextTx returns a blocking channel that will be closed when the next -// valid transaction is available to gossip. It is thread-safe. -func (txmp *TxMempool) WaitForNextTx() <-chan struct{} { - return txmp.gossipIndex.WaitChan() -} - -// NextGossipTx returns the next valid transaction to gossip. A caller must wait -// for WaitForNextTx to signal a transaction is available to gossip first. It is -// thread-safe. -func (txmp *TxMempool) NextGossipTx() *clist.CElement { - return txmp.gossipIndex.Front() -} - -// EnableTxsAvailable enables the mempool to trigger events when transactions -// are available on a block by block basis. -func (txmp *TxMempool) EnableTxsAvailable() { - txmp.mtx.Lock() - defer txmp.mtx.Unlock() - - txmp.txsAvailable = make(chan struct{}, 1) -} - -// TxsAvailable returns a channel which fires once for every height, and only -// when transactions are available in the mempool. It is thread-safe. -func (txmp *TxMempool) TxsAvailable() <-chan struct{} { - return txmp.txsAvailable -} - -// CheckTx executes the ABCI CheckTx method for a given transaction. It acquires -// a read-lock attempts to execute the application's CheckTx ABCI method via -// CheckTxAsync. We return an error if any of the following happen: -// -// - The CheckTxAsync execution fails. -// - The transaction already exists in the cache and we've already received the -// transaction from the peer. Otherwise, if it solely exists in the cache, we -// return nil. -// - The transaction size exceeds the maximum transaction size as defined by the -// configuration provided to the mempool. -// - The transaction fails Pre-Check (if it is defined). -// - The proxyAppConn fails, e.g. the buffer is full. -// -// If the mempool is full, we still execute CheckTx and attempt to find a lower -// priority transaction to evict. If such a transaction exists, we remove the -// lower priority transaction and add the new one with higher priority. -// -// NOTE: -// - The applications' CheckTx implementation may panic. -// - The caller is not to explicitly require any locks for executing CheckTx. -func (txmp *TxMempool) CheckTx( - ctx context.Context, - tx types.Tx, - cb func(*abci.Response), - txInfo mempool.TxInfo, -) error { - - txmp.mtx.RLock() - defer txmp.mtx.RUnlock() - - txSize := len(tx) - if txSize > txmp.config.MaxTxBytes { - return types.ErrTxTooLarge{ - Max: txmp.config.MaxTxBytes, - Actual: txSize, - } - } - - if txmp.preCheck != nil { - if err := txmp.preCheck(tx); err != nil { - return types.ErrPreCheck{ - Reason: err, - } - } - } - - if err := txmp.proxyAppConn.Error(); err != nil { - return err - } - - txHash := mempool.TxKey(tx) - - // We add the transaction to the mempool's cache and if the transaction already - // exists, i.e. false is returned, then we check if we've seen this transaction - // from the same sender and error if we have. Otherwise, we return nil. - if !txmp.cache.Push(tx) { - wtx, ok := txmp.txStore.GetOrSetPeerByTxHash(txHash, txInfo.SenderID) - if wtx != nil && ok { - // We already have the transaction stored and the we've already seen this - // transaction from txInfo.SenderID. - return types.ErrTxInCache - } - - txmp.logger.Debug("tx exists already in cache", "tx_hash", tx.Hash()) - return nil - } - - if ctx == nil { - ctx = context.Background() - } - - reqRes, err := txmp.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{Tx: tx}) - if err != nil { - txmp.cache.Remove(tx) - return err - } - - reqRes.SetCallback(func(res *abci.Response) { - if txmp.recheckCursor != nil { - panic("recheck cursor is non-nil in CheckTx callback") - } - - wtx := &WrappedTx{ - tx: tx, - hash: txHash, - timestamp: time.Now().UTC(), - height: txmp.height, - } - txmp.initTxCallback(wtx, res, txInfo) - - if cb != nil { - cb(res) - } - }) - - return nil -} - -// Flush flushes out the mempool. It acquires a read-lock, fetches all the -// transactions currently in the transaction store and removes each transaction -// from the store and all indexes and finally resets the cache. -// -// NOTE: -// - Flushing the mempool may leave the mempool in an inconsistent state. -func (txmp *TxMempool) Flush() { - txmp.mtx.RLock() - defer txmp.mtx.RUnlock() - - txmp.heightIndex.Reset() - txmp.timestampIndex.Reset() - - for _, wtx := range txmp.txStore.GetAllTxs() { - txmp.removeTx(wtx, false) - } - - atomic.SwapInt64(&txmp.sizeBytes, 0) - txmp.cache.Reset() -} - -// ReapMaxBytesMaxGas returns a list of transactions within the provided size -// and gas constraints. Transaction are retrieved in priority order. -// -// NOTE: -// - A read-lock is acquired. -// - Transactions returned are not actually removed from the mempool transaction -// store or indexes. -func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { - txmp.mtx.RLock() - defer txmp.mtx.RUnlock() - - var ( - totalGas int64 - totalSize int64 - ) - - // wTxs contains a list of *WrappedTx retrieved from the priority queue that - // need to be re-enqueued prior to returning. - wTxs := make([]*WrappedTx, 0, txmp.priorityIndex.NumTxs()) - defer func() { - for _, wtx := range wTxs { - txmp.priorityIndex.PushTx(wtx) - } - }() - - txs := make([]types.Tx, 0, txmp.priorityIndex.NumTxs()) - for txmp.priorityIndex.NumTxs() > 0 { - wtx := txmp.priorityIndex.PopTx() - txs = append(txs, wtx.tx) - wTxs = append(wTxs, wtx) - size := types.ComputeProtoSizeForTxs([]types.Tx{wtx.tx}) - - // Ensure we have capacity for the transaction with respect to the - // transaction size. - if maxBytes > -1 && totalSize+size > maxBytes { - return txs[:len(txs)-1] - } - - totalSize += size - - // ensure we have capacity for the transaction with respect to total gas - gas := totalGas + wtx.gasWanted - if maxGas > -1 && gas > maxGas { - return txs[:len(txs)-1] - } - - totalGas = gas - } - - return txs -} - -// ReapMaxTxs returns a list of transactions within the provided number of -// transactions bound. Transaction are retrieved in priority order. -// -// NOTE: -// - A read-lock is acquired. -// - Transactions returned are not actually removed from the mempool transaction -// store or indexes. -func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs { - txmp.mtx.RLock() - defer txmp.mtx.RUnlock() - - numTxs := txmp.priorityIndex.NumTxs() - if max < 0 { - max = numTxs - } - - cap := tmmath.MinInt(numTxs, max) - - // wTxs contains a list of *WrappedTx retrieved from the priority queue that - // need to be re-enqueued prior to returning. - wTxs := make([]*WrappedTx, 0, cap) - defer func() { - for _, wtx := range wTxs { - txmp.priorityIndex.PushTx(wtx) - } - }() - - txs := make([]types.Tx, 0, cap) - for txmp.priorityIndex.NumTxs() > 0 && len(txs) < max { - wtx := txmp.priorityIndex.PopTx() - txs = append(txs, wtx.tx) - wTxs = append(wTxs, wtx) - } - - return txs -} - -// Update iterates over all the transactions provided by the caller, i.e. the -// block producer, and removes them from the cache (if applicable) and removes -// the transactions from the main transaction store and associated indexes. -// Finally, if there are trainsactions remaining in the mempool, we initiate a -// re-CheckTx for them (if applicable), otherwise, we notify the caller more -// transactions are available. -// -// NOTE: -// - The caller must explicitly acquire a write-lock via Lock(). -func (txmp *TxMempool) Update( - blockHeight int64, - blockTxs types.Txs, - deliverTxResponses []*abci.ResponseDeliverTx, - newPreFn mempool.PreCheckFunc, - newPostFn mempool.PostCheckFunc, -) error { - - txmp.height = blockHeight - txmp.notifiedTxsAvailable = false - - if newPreFn != nil { - txmp.preCheck = newPreFn - } - if newPostFn != nil { - txmp.postCheck = newPostFn - } - - for i, tx := range blockTxs { - if deliverTxResponses[i].Code == abci.CodeTypeOK { - // add the valid committed transaction to the cache (if missing) - _ = txmp.cache.Push(tx) - } else if !txmp.config.KeepInvalidTxsInCache { - // allow invalid transactions to be re-submitted - txmp.cache.Remove(tx) - } - - // remove the committed transaction from the transaction store and indexes - if wtx := txmp.txStore.GetTxByHash(mempool.TxKey(tx)); wtx != nil { - txmp.removeTx(wtx, false) - } - } - - txmp.purgeExpiredTxs(blockHeight) - - // If there any uncommitted transactions left in the mempool, we either - // initiate re-CheckTx per remaining transaction or notify that remaining - // transactions are left. - if txmp.Size() > 0 { - if txmp.config.Recheck { - txmp.logger.Debug( - "executing re-CheckTx for all remaining transactions", - "num_txs", txmp.Size(), - "height", blockHeight, - ) - txmp.updateReCheckTxs() - } else { - txmp.notifyTxsAvailable() - } - } - - txmp.metrics.Size.Set(float64(txmp.Size())) - return nil -} - -// initTxCallback performs the initial, i.e. the first, callback after CheckTx -// has been executed by the ABCI application. In other words, initTxCallback is -// called after executing CheckTx when we see a unique transaction for the first -// time. CheckTx can be called again for the same transaction at a later point -// in time when re-checking, however, this callback will not be called. -// -// After the ABCI application executes CheckTx, initTxCallback is called with -// the ABCI *Response object and TxInfo. If postCheck is defined on the mempool, -// we execute that first. If there is no error from postCheck (if defined) and -// the ABCI CheckTx response code is OK, we attempt to insert the transaction. -// -// When attempting to insert the transaction, we first check if there is -// sufficient capacity. If there is sufficient capacity, the transaction is -// inserted into the txStore and indexed across all indexes. Otherwise, if the -// mempool is full, we attempt to find a lower priority transaction to evict in -// place of the new incoming transaction. If no such transaction exists, the -// new incoming transaction is rejected. -// -// If the new incoming transaction fails CheckTx or postCheck fails, we reject -// the new incoming transaction. -// -// NOTE: -// - An explicit lock is NOT required. -func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.Response, txInfo mempool.TxInfo) { - checkTxRes, ok := res.Value.(*abci.Response_CheckTx) - if !ok { - return - } - - var err error - if txmp.postCheck != nil { - err = txmp.postCheck(wtx.tx, checkTxRes.CheckTx) - } - - if err != nil || checkTxRes.CheckTx.Code != abci.CodeTypeOK { - // ignore bad transactions - txmp.logger.Info( - "rejected bad transaction", - "priority", wtx.priority, - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "peer_id", txInfo.SenderNodeID, - "code", checkTxRes.CheckTx.Code, - "post_check_err", err, - ) - - txmp.metrics.FailedTxs.Add(1) - - if !txmp.config.KeepInvalidTxsInCache { - txmp.cache.Remove(wtx.tx) - } - if err != nil { - checkTxRes.CheckTx.MempoolError = err.Error() - } - return - } - - sender := checkTxRes.CheckTx.Sender - priority := checkTxRes.CheckTx.Priority - - if len(sender) > 0 { - if wtx := txmp.txStore.GetTxBySender(sender); wtx != nil { - txmp.logger.Error( - "rejected incoming good transaction; tx already exists for sender", - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "sender", sender, - ) - txmp.metrics.RejectedTxs.Add(1) - return - } - } - - if err := txmp.canAddTx(wtx); err != nil { - evictTxs := txmp.priorityIndex.GetEvictableTxs( - priority, - int64(wtx.Size()), - txmp.SizeBytes(), - txmp.config.MaxTxsBytes, - ) - if len(evictTxs) == 0 { - // No room for the new incoming transaction so we just remove it from - // the cache. - txmp.cache.Remove(wtx.tx) - txmp.logger.Error( - "rejected incoming good transaction; mempool full", - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "err", err.Error(), - ) - txmp.metrics.RejectedTxs.Add(1) - return - } - - // evict an existing transaction(s) - // - // NOTE: - // - The transaction, toEvict, can be removed while a concurrent - // reCheckTx callback is being executed for the same transaction. - for _, toEvict := range evictTxs { - txmp.removeTx(toEvict, true) - txmp.logger.Debug( - "evicted existing good transaction; mempool full", - "old_tx", fmt.Sprintf("%X", toEvict.tx.Hash()), - "old_priority", toEvict.priority, - "new_tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "new_priority", wtx.priority, - ) - txmp.metrics.EvictedTxs.Add(1) - } - } - - wtx.gasWanted = checkTxRes.CheckTx.GasWanted - wtx.priority = priority - wtx.sender = sender - wtx.peers = map[uint16]struct{}{ - txInfo.SenderID: {}, - } - - txmp.metrics.TxSizeBytes.Observe(float64(wtx.Size())) - txmp.metrics.Size.Set(float64(txmp.Size())) - - txmp.insertTx(wtx) - txmp.logger.Debug( - "inserted good transaction", - "priority", wtx.priority, - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "height", txmp.height, - "num_txs", txmp.Size(), - ) - txmp.notifyTxsAvailable() - -} - -// defaultTxCallback performs the default CheckTx application callback. This is -// NOT executed when a transaction is first seen/received. Instead, this callback -// is executed during re-checking transactions (if enabled). A caller, i.e a -// block proposer, acquires a mempool write-lock via Lock() and when executing -// Update(), if the mempool is non-empty and Recheck is enabled, then all -// remaining transactions will be rechecked via CheckTxAsync. The order in which -// they are rechecked must be the same order in which this callback is called -// per transaction. -func (txmp *TxMempool) defaultTxCallback(req *abci.Request, res *abci.Response) { - if txmp.recheckCursor == nil { - return - } - - txmp.metrics.RecheckTimes.Add(1) - - checkTxRes, ok := res.Value.(*abci.Response_CheckTx) - if ok { - tx := req.GetCheckTx().Tx - wtx := txmp.recheckCursor.Value.(*WrappedTx) - if !bytes.Equal(tx, wtx.tx) { - panic(fmt.Sprintf("re-CheckTx transaction mismatch; got: %X, expected: %X", wtx.tx.Hash(), mempool.TxKey(tx))) - } - - // Only evaluate transactions that have not been removed. This can happen - // if an existing transaction is evicted during CheckTx and while this - // callback is being executed for the same evicted transaction. - if !txmp.txStore.IsTxRemoved(wtx.hash) { - var err error - if txmp.postCheck != nil { - err = txmp.postCheck(tx, checkTxRes.CheckTx) - } - - if checkTxRes.CheckTx.Code == abci.CodeTypeOK && err == nil { - wtx.priority = checkTxRes.CheckTx.Priority - } else { - txmp.logger.Debug( - "existing transaction no longer valid; failed re-CheckTx callback", - "priority", wtx.priority, - "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(wtx.tx)), - "err", err, - "code", checkTxRes.CheckTx.Code, - ) - - if wtx.gossipEl != txmp.recheckCursor { - panic("corrupted reCheckTx cursor") - } - - txmp.removeTx(wtx, !txmp.config.KeepInvalidTxsInCache) - } - } - - // move reCheckTx cursor to next element - if txmp.recheckCursor == txmp.recheckEnd { - txmp.recheckCursor = nil - } else { - txmp.recheckCursor = txmp.recheckCursor.Next() - } - - if txmp.recheckCursor == nil { - txmp.logger.Debug("finished rechecking transactions") - - if txmp.Size() > 0 { - txmp.notifyTxsAvailable() - } - } - - txmp.metrics.Size.Set(float64(txmp.Size())) - } -} - -// updateReCheckTxs updates the recheck cursors by using the gossipIndex. For -// each transaction, it executes CheckTxAsync. The global callback defined on -// the proxyAppConn will be executed for each transaction after CheckTx is -// executed. -// -// NOTE: -// - The caller must have a write-lock when executing updateReCheckTxs. -func (txmp *TxMempool) updateReCheckTxs() { - if txmp.Size() == 0 { - panic("attempted to update re-CheckTx txs when mempool is empty") - } - - txmp.recheckCursor = txmp.gossipIndex.Front() - txmp.recheckEnd = txmp.gossipIndex.Back() - ctx := context.Background() - - for e := txmp.gossipIndex.Front(); e != nil; e = e.Next() { - wtx := e.Value.(*WrappedTx) - - // Only execute CheckTx if the transaction is not marked as removed which - // could happen if the transaction was evicted. - if !txmp.txStore.IsTxRemoved(wtx.hash) { - _, err := txmp.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{ - Tx: wtx.tx, - Type: abci.CheckTxType_Recheck, - }) - if err != nil { - // no need in retrying since the tx will be rechecked after the next block - txmp.logger.Error("failed to execute CheckTx during rechecking", "err", err) - } - } - } - - if _, err := txmp.proxyAppConn.FlushAsync(ctx); err != nil { - txmp.logger.Error("failed to flush transactions during rechecking", "err", err) - } -} - -// canAddTx returns an error if we cannot insert the provided *WrappedTx into -// the mempool due to mempool configured constraints. Otherwise, nil is returned -// and the transaction can be inserted into the mempool. -func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { - var ( - numTxs = txmp.Size() - sizeBytes = txmp.SizeBytes() - ) - - if numTxs >= txmp.config.Size || int64(wtx.Size())+sizeBytes > txmp.config.MaxTxsBytes { - return types.ErrMempoolIsFull{ - NumTxs: numTxs, - MaxTxs: txmp.config.Size, - TxsBytes: sizeBytes, - MaxTxsBytes: txmp.config.MaxTxsBytes, - } - } - - return nil -} - -func (txmp *TxMempool) insertTx(wtx *WrappedTx) { - txmp.txStore.SetTx(wtx) - txmp.priorityIndex.PushTx(wtx) - txmp.heightIndex.Insert(wtx) - txmp.timestampIndex.Insert(wtx) - - // Insert the transaction into the gossip index and mark the reference to the - // linked-list element, which will be needed at a later point when the - // transaction is removed. - gossipEl := txmp.gossipIndex.PushBack(wtx) - wtx.gossipEl = gossipEl - - atomic.AddInt64(&txmp.sizeBytes, int64(wtx.Size())) -} - -func (txmp *TxMempool) removeTx(wtx *WrappedTx, removeFromCache bool) { - if txmp.txStore.IsTxRemoved(wtx.hash) { - return - } - - txmp.txStore.RemoveTx(wtx) - txmp.priorityIndex.RemoveTx(wtx) - txmp.heightIndex.Remove(wtx) - txmp.timestampIndex.Remove(wtx) - - // Remove the transaction from the gossip index and cleanup the linked-list - // element so it can be garbage collected. - txmp.gossipIndex.Remove(wtx.gossipEl) - wtx.gossipEl.DetachPrev() - - atomic.AddInt64(&txmp.sizeBytes, int64(-wtx.Size())) - - if removeFromCache { - txmp.cache.Remove(wtx.tx) - } -} - -// purgeExpiredTxs removes all transactions that have exceeded their respective -// height and/or time based TTLs from their respective indexes. Every expired -// transaction will be removed from the mempool entirely, except for the cache. -// -// NOTE: purgeExpiredTxs must only be called during TxMempool#Update in which -// the caller has a write-lock on the mempool and so we can safely iterate over -// the height and time based indexes. -func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { - now := time.Now() - expiredTxs := make(map[[mempool.TxKeySize]byte]*WrappedTx) - - if txmp.config.TTLNumBlocks > 0 { - purgeIdx := -1 - for i, wtx := range txmp.heightIndex.txs { - if (blockHeight - wtx.height) > txmp.config.TTLNumBlocks { - expiredTxs[mempool.TxKey(wtx.tx)] = wtx - purgeIdx = i - } else { - // since the index is sorted, we know no other txs can be be purged - break - } - } - - if purgeIdx >= 0 { - txmp.heightIndex.txs = txmp.heightIndex.txs[purgeIdx+1:] - } - } - - if txmp.config.TTLDuration > 0 { - purgeIdx := -1 - for i, wtx := range txmp.timestampIndex.txs { - if now.Sub(wtx.timestamp) > txmp.config.TTLDuration { - expiredTxs[mempool.TxKey(wtx.tx)] = wtx - purgeIdx = i - } else { - // since the index is sorted, we know no other txs can be be purged - break - } - } - - if purgeIdx >= 0 { - txmp.timestampIndex.txs = txmp.timestampIndex.txs[purgeIdx+1:] - } - } - - for _, wtx := range expiredTxs { - txmp.removeTx(wtx, false) - } -} - -func (txmp *TxMempool) notifyTxsAvailable() { - if txmp.Size() == 0 { - panic("attempt to notify txs available but mempool is empty!") - } - - if txmp.txsAvailable != nil && !txmp.notifiedTxsAvailable { - // channel cap is 1, so this will send once - txmp.notifiedTxsAvailable = true - - select { - case txmp.txsAvailable <- struct{}{}: - default: - } - } -} diff --git a/internal/mempool/v1/reactor_test.go b/internal/mempool/v1/reactor_test.go deleted file mode 100644 index 5934d534c..000000000 --- a/internal/mempool/v1/reactor_test.go +++ /dev/null @@ -1,147 +0,0 @@ -package v1 - -import ( - "os" - "strings" - "sync" - "testing" - - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/config" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/p2ptest" - "github.com/tendermint/tendermint/libs/log" - protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" - "github.com/tendermint/tendermint/types" -) - -type reactorTestSuite struct { - network *p2ptest.Network - logger log.Logger - - reactors map[types.NodeID]*Reactor - mempoolChannels map[types.NodeID]*p2p.Channel - mempools map[types.NodeID]*TxMempool - kvstores map[types.NodeID]*kvstore.Application - - peerChans map[types.NodeID]chan p2p.PeerUpdate - peerUpdates map[types.NodeID]*p2p.PeerUpdates - - nodes []types.NodeID -} - -func setupReactors(t *testing.T, numNodes int, chBuf uint) *reactorTestSuite { - t.Helper() - - cfg := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|")) - t.Cleanup(func() { - os.RemoveAll(cfg.RootDir) - }) - - rts := &reactorTestSuite{ - logger: log.TestingLogger().With("testCase", t.Name()), - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), - reactors: make(map[types.NodeID]*Reactor, numNodes), - mempoolChannels: make(map[types.NodeID]*p2p.Channel, numNodes), - mempools: make(map[types.NodeID]*TxMempool, numNodes), - kvstores: make(map[types.NodeID]*kvstore.Application, numNodes), - peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), - peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), - } - - chDesc := p2p.ChannelDescriptor{ID: byte(mempool.MempoolChannel)} - rts.mempoolChannels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(protomem.Message), int(chBuf)) - - for nodeID := range rts.network.Nodes { - rts.kvstores[nodeID] = kvstore.NewApplication() - - mempool := setup(t, 0) - rts.mempools[nodeID] = mempool - - rts.peerChans[nodeID] = make(chan p2p.PeerUpdate) - rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1) - rts.network.Nodes[nodeID].PeerManager.Register(rts.peerUpdates[nodeID]) - - rts.reactors[nodeID] = NewReactor( - rts.logger.With("nodeID", nodeID), - cfg.Mempool, - rts.network.Nodes[nodeID].PeerManager, - mempool, - rts.mempoolChannels[nodeID], - rts.peerUpdates[nodeID], - ) - - rts.nodes = append(rts.nodes, nodeID) - - require.NoError(t, rts.reactors[nodeID].Start()) - require.True(t, rts.reactors[nodeID].IsRunning()) - } - - require.Len(t, rts.reactors, numNodes) - - t.Cleanup(func() { - for nodeID := range rts.reactors { - if rts.reactors[nodeID].IsRunning() { - require.NoError(t, rts.reactors[nodeID].Stop()) - require.False(t, rts.reactors[nodeID].IsRunning()) - } - } - }) - - return rts -} - -func (rts *reactorTestSuite) start(t *testing.T) { - t.Helper() - rts.network.Start(t) - require.Len(t, - rts.network.RandomNode().PeerManager.Peers(), - len(rts.nodes)-1, - "network does not have expected number of nodes") -} - -func TestReactorBroadcastDoesNotPanic(t *testing.T) { - numNodes := 2 - rts := setupReactors(t, numNodes, 0) - - observePanic := func(r interface{}) { - t.Fatal("panic detected in reactor") - } - - primary := rts.nodes[0] - secondary := rts.nodes[1] - primaryReactor := rts.reactors[primary] - primaryMempool := primaryReactor.mempool - secondaryReactor := rts.reactors[secondary] - - primaryReactor.observePanic = observePanic - secondaryReactor.observePanic = observePanic - - firstTx := &WrappedTx{} - primaryMempool.insertTx(firstTx) - - // run the router - rts.start(t) - - closer := tmsync.NewCloser() - primaryReactor.peerWG.Add(1) - go primaryReactor.broadcastTxRoutine(secondary, closer) - - wg := &sync.WaitGroup{} - for i := 0; i < 50; i++ { - next := &WrappedTx{} - wg.Add(1) - go func() { - defer wg.Done() - primaryMempool.insertTx(next) - }() - } - - err := primaryReactor.Stop() - require.NoError(t, err) - primaryReactor.peerWG.Wait() - wg.Wait() -} diff --git a/internal/mempool/v1/tx.go b/internal/mempool/v1/tx.go deleted file mode 100644 index 15173b91f..000000000 --- a/internal/mempool/v1/tx.go +++ /dev/null @@ -1,282 +0,0 @@ -package v1 - -import ( - "sort" - "time" - - "github.com/tendermint/tendermint/internal/libs/clist" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/types" -) - -// WrappedTx defines a wrapper around a raw transaction with additional metadata -// that is used for indexing. -type WrappedTx struct { - // tx represents the raw binary transaction data - tx types.Tx - - // hash defines the transaction hash and the primary key used in the mempool - hash [mempool.TxKeySize]byte - - // height defines the height at which the transaction was validated at - height int64 - - // gasWanted defines the amount of gas the transaction sender requires - gasWanted int64 - - // priority defines the transaction's priority as specified by the application - // in the ResponseCheckTx response. - priority int64 - - // sender defines the transaction's sender as specified by the application in - // the ResponseCheckTx response. - sender string - - // timestamp is the time at which the node first received the transaction from - // a peer. It is used as a second dimension is prioritizing transactions when - // two transactions have the same priority. - timestamp time.Time - - // peers records a mapping of all peers that sent a given transaction - peers map[uint16]struct{} - - // heapIndex defines the index of the item in the heap - heapIndex int - - // gossipEl references the linked-list element in the gossip index - gossipEl *clist.CElement - - // removed marks the transaction as removed from the mempool. This is set - // during RemoveTx and is needed due to the fact that a given existing - // transaction in the mempool can be evicted when it is simultaneously having - // a reCheckTx callback executed. - removed bool -} - -func (wtx *WrappedTx) Size() int { - return len(wtx.tx) -} - -// TxStore implements a thread-safe mapping of valid transaction(s). -// -// NOTE: -// - Concurrent read-only access to a *WrappedTx object is OK. However, mutative -// access is not allowed. Regardless, it is not expected for the mempool to -// need mutative access. -type TxStore struct { - mtx tmsync.RWMutex - hashTxs map[[mempool.TxKeySize]byte]*WrappedTx // primary index - senderTxs map[string]*WrappedTx // sender is defined by the ABCI application -} - -func NewTxStore() *TxStore { - return &TxStore{ - senderTxs: make(map[string]*WrappedTx), - hashTxs: make(map[[mempool.TxKeySize]byte]*WrappedTx), - } -} - -// Size returns the total number of transactions in the store. -func (txs *TxStore) Size() int { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - return len(txs.hashTxs) -} - -// GetAllTxs returns all the transactions currently in the store. -func (txs *TxStore) GetAllTxs() []*WrappedTx { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - wTxs := make([]*WrappedTx, len(txs.hashTxs)) - i := 0 - for _, wtx := range txs.hashTxs { - wTxs[i] = wtx - i++ - } - - return wTxs -} - -// GetTxBySender returns a *WrappedTx by the transaction's sender property -// defined by the ABCI application. -func (txs *TxStore) GetTxBySender(sender string) *WrappedTx { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - return txs.senderTxs[sender] -} - -// GetTxByHash returns a *WrappedTx by the transaction's hash. -func (txs *TxStore) GetTxByHash(hash [mempool.TxKeySize]byte) *WrappedTx { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - return txs.hashTxs[hash] -} - -// IsTxRemoved returns true if a transaction by hash is marked as removed and -// false otherwise. -func (txs *TxStore) IsTxRemoved(hash [mempool.TxKeySize]byte) bool { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - wtx, ok := txs.hashTxs[hash] - if ok { - return wtx.removed - } - - return false -} - -// SetTx stores a *WrappedTx by it's hash. If the transaction also contains a -// non-empty sender, we additionally store the transaction by the sender as -// defined by the ABCI application. -func (txs *TxStore) SetTx(wtx *WrappedTx) { - txs.mtx.Lock() - defer txs.mtx.Unlock() - - if len(wtx.sender) > 0 { - txs.senderTxs[wtx.sender] = wtx - } - - txs.hashTxs[mempool.TxKey(wtx.tx)] = wtx -} - -// RemoveTx removes a *WrappedTx from the transaction store. It deletes all -// indexes of the transaction. -func (txs *TxStore) RemoveTx(wtx *WrappedTx) { - txs.mtx.Lock() - defer txs.mtx.Unlock() - - if len(wtx.sender) > 0 { - delete(txs.senderTxs, wtx.sender) - } - - delete(txs.hashTxs, mempool.TxKey(wtx.tx)) - wtx.removed = true -} - -// TxHasPeer returns true if a transaction by hash has a given peer ID and false -// otherwise. If the transaction does not exist, false is returned. -func (txs *TxStore) TxHasPeer(hash [mempool.TxKeySize]byte, peerID uint16) bool { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - wtx := txs.hashTxs[hash] - if wtx == nil { - return false - } - - _, ok := wtx.peers[peerID] - return ok -} - -// GetOrSetPeerByTxHash looks up a WrappedTx by transaction hash and adds the -// given peerID to the WrappedTx's set of peers that sent us this transaction. -// We return true if we've already recorded the given peer for this transaction -// and false otherwise. If the transaction does not exist by hash, we return -// (nil, false). -func (txs *TxStore) GetOrSetPeerByTxHash(hash [mempool.TxKeySize]byte, peerID uint16) (*WrappedTx, bool) { - txs.mtx.Lock() - defer txs.mtx.Unlock() - - wtx := txs.hashTxs[hash] - if wtx == nil { - return nil, false - } - - if wtx.peers == nil { - wtx.peers = make(map[uint16]struct{}) - } - - if _, ok := wtx.peers[peerID]; ok { - return wtx, true - } - - wtx.peers[peerID] = struct{}{} - return wtx, false -} - -// WrappedTxList implements a thread-safe list of *WrappedTx objects that can be -// used to build generic transaction indexes in the mempool. It accepts a -// comparator function, less(a, b *WrappedTx) bool, that compares two WrappedTx -// references which is used during Insert in order to determine sorted order. If -// less returns true, a <= b. -type WrappedTxList struct { - mtx tmsync.RWMutex - txs []*WrappedTx - less func(*WrappedTx, *WrappedTx) bool -} - -func NewWrappedTxList(less func(*WrappedTx, *WrappedTx) bool) *WrappedTxList { - return &WrappedTxList{ - txs: make([]*WrappedTx, 0), - less: less, - } -} - -// Size returns the number of WrappedTx objects in the list. -func (wtl *WrappedTxList) Size() int { - wtl.mtx.RLock() - defer wtl.mtx.RUnlock() - - return len(wtl.txs) -} - -// Reset resets the list of transactions to an empty list. -func (wtl *WrappedTxList) Reset() { - wtl.mtx.Lock() - defer wtl.mtx.Unlock() - - wtl.txs = make([]*WrappedTx, 0) -} - -// Insert inserts a WrappedTx reference into the sorted list based on the list's -// comparator function. -func (wtl *WrappedTxList) Insert(wtx *WrappedTx) { - wtl.mtx.Lock() - defer wtl.mtx.Unlock() - - i := sort.Search(len(wtl.txs), func(i int) bool { - return wtl.less(wtl.txs[i], wtx) - }) - - if i == len(wtl.txs) { - // insert at the end - wtl.txs = append(wtl.txs, wtx) - return - } - - // Make space for the inserted element by shifting values at the insertion - // index up one index. - // - // NOTE: The call to append does not allocate memory when cap(wtl.txs) > len(wtl.txs). - wtl.txs = append(wtl.txs[:i+1], wtl.txs[i:]...) - wtl.txs[i] = wtx -} - -// Remove attempts to remove a WrappedTx from the sorted list. -func (wtl *WrappedTxList) Remove(wtx *WrappedTx) { - wtl.mtx.Lock() - defer wtl.mtx.Unlock() - - i := sort.Search(len(wtl.txs), func(i int) bool { - return wtl.less(wtl.txs[i], wtx) - }) - - // Since the list is sorted, we evaluate all elements starting at i. Note, if - // the element does not exist, we may potentially evaluate the entire remainder - // of the list. However, a caller should not be expected to call Remove with a - // non-existing element. - for i < len(wtl.txs) { - if wtl.txs[i] == wtx { - wtl.txs = append(wtl.txs[:i], wtl.txs[i+1:]...) - return - } - - i++ - } -} diff --git a/internal/p2p/address_test.go b/internal/p2p/address_test.go index 2745faf73..d5f9e498e 100644 --- a/internal/p2p/address_test.go +++ b/internal/p2p/address_test.go @@ -1,6 +1,7 @@ package p2p_test import ( + "context" "net" "strings" "testing" @@ -204,6 +205,9 @@ func TestParseNodeAddress(t *testing.T) { func TestNodeAddress_Resolve(t *testing.T) { id := types.NodeID("00112233445566778899aabbccddeeff00112233") + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + testcases := []struct { address p2p.NodeAddress expect p2p.Endpoint @@ -275,6 +279,9 @@ func TestNodeAddress_Resolve(t *testing.T) { for _, tc := range testcases { tc := tc t.Run(tc.address.String(), func(t *testing.T) { + ctx, cancel := context.WithCancel(bctx) + defer cancel() + endpoints, err := tc.address.Resolve(ctx) if !tc.ok { require.Error(t, err) diff --git a/internal/p2p/base_reactor.go b/internal/p2p/base_reactor.go deleted file mode 100644 index 09925caf8..000000000 --- a/internal/p2p/base_reactor.go +++ /dev/null @@ -1,74 +0,0 @@ -package p2p - -import ( - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/service" -) - -// Reactor is responsible for handling incoming messages on one or more -// Channel. Switch calls GetChannels when reactor is added to it. When a new -// peer joins our node, InitPeer and AddPeer are called. RemovePeer is called -// when the peer is stopped. Receive is called when a message is received on a -// channel associated with this reactor. -// -// Peer#Send or Peer#TrySend should be used to send the message to a peer. -type Reactor interface { - service.Service // Start, Stop - - // SetSwitch allows setting a switch. - SetSwitch(*Switch) - - // GetChannels returns the list of MConnection.ChannelDescriptor. Make sure - // that each ID is unique across all the reactors added to the switch. - GetChannels() []*conn.ChannelDescriptor - - // InitPeer is called by the switch before the peer is started. Use it to - // initialize data for the peer (e.g. peer state). - // - // NOTE: The switch won't call AddPeer nor RemovePeer if it fails to start - // the peer. Do not store any data associated with the peer in the reactor - // itself unless you don't want to have a state, which is never cleaned up. - InitPeer(peer Peer) Peer - - // AddPeer is called by the switch after the peer is added and successfully - // started. Use it to start goroutines communicating with the peer. - AddPeer(peer Peer) - - // RemovePeer is called by the switch when the peer is stopped (due to error - // or other reason). - RemovePeer(peer Peer, reason interface{}) - - // Receive is called by the switch when msgBytes is received from the peer. - // - // NOTE reactor can not keep msgBytes around after Receive completes without - // copying. - // - // CONTRACT: msgBytes are not nil. - // - // XXX: do not call any methods that can block or incur heavy processing. - // https://github.com/tendermint/tendermint/issues/2888 - Receive(chID byte, peer Peer, msgBytes []byte) -} - -//-------------------------------------- - -type BaseReactor struct { - service.BaseService // Provides Start, Stop, .Quit - Switch *Switch -} - -func NewBaseReactor(name string, impl Reactor) *BaseReactor { - return &BaseReactor{ - BaseService: *service.NewBaseService(nil, name, impl), - Switch: nil, - } -} - -func (br *BaseReactor) SetSwitch(sw *Switch) { - br.Switch = sw -} -func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil } -func (*BaseReactor) AddPeer(peer Peer) {} -func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {} -func (*BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {} -func (*BaseReactor) InitPeer(peer Peer) Peer { return peer } diff --git a/internal/p2p/conn/conn_go110.go b/internal/p2p/conn/conn_go110.go index 682188101..459c3169b 100644 --- a/internal/p2p/conn/conn_go110.go +++ b/internal/p2p/conn/conn_go110.go @@ -1,3 +1,4 @@ +//go:build go1.10 // +build go1.10 package conn diff --git a/internal/p2p/conn/conn_notgo110.go b/internal/p2p/conn/conn_notgo110.go index ed642eb54..21dffad2c 100644 --- a/internal/p2p/conn/conn_notgo110.go +++ b/internal/p2p/conn/conn_notgo110.go @@ -1,3 +1,4 @@ +//go:build !go1.10 // +build !go1.10 package conn diff --git a/internal/p2p/conn/connection.go b/internal/p2p/conn/connection.go index dc5bacc39..9fb330286 100644 --- a/internal/p2p/conn/connection.go +++ b/internal/p2p/conn/connection.go @@ -2,6 +2,7 @@ package conn import ( "bufio" + "context" "errors" "fmt" "io" @@ -14,7 +15,7 @@ import ( "github.com/gogo/protobuf/proto" - flow "github.com/tendermint/tendermint/internal/libs/flowrate" + "github.com/tendermint/tendermint/internal/libs/flowrate" "github.com/tendermint/tendermint/internal/libs/protoio" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/libs/timer" @@ -48,7 +49,7 @@ const ( defaultPongTimeout = 45 * time.Second ) -type receiveCbFunc func(chID byte, msgBytes []byte) +type receiveCbFunc func(chID ChannelID, msgBytes []byte) type errorCbFunc func(interface{}) /* @@ -64,15 +65,11 @@ initialization of the connection. There are two methods for sending messages: func (m MConnection) Send(chID byte, msgBytes []byte) bool {} - func (m MConnection) TrySend(chID byte, msgBytes []byte}) bool {} `Send(chID, msgBytes)` is a blocking call that waits until `msg` is successfully queued for the channel with the given id byte `chID`, or until the request times out. The message `msg` is serialized using Protobuf. -`TrySend(chID, msgBytes)` is a nonblocking call that returns false if the -channel's queue is full. - Inbound message bytes are handled with an onReceive callback function. */ type MConnection struct { @@ -81,12 +78,12 @@ type MConnection struct { conn net.Conn bufConnReader *bufio.Reader bufConnWriter *bufio.Writer - sendMonitor *flow.Monitor - recvMonitor *flow.Monitor + sendMonitor *flowrate.Monitor + recvMonitor *flowrate.Monitor send chan struct{} pong chan struct{} - channels []*Channel - channelsIdx map[byte]*Channel + channels []*channel + channelsIdx map[ChannelID]*channel onReceive receiveCbFunc onError errorCbFunc errored uint32 @@ -150,12 +147,14 @@ func DefaultMConnConfig() MConnConfig { // NewMConnection wraps net.Conn and creates multiplex connection func NewMConnection( + logger log.Logger, conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, onError errorCbFunc, ) *MConnection { return NewMConnectionWithConfig( + logger, conn, chDescs, onReceive, @@ -165,6 +164,7 @@ func NewMConnection( // NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config func NewMConnectionWithConfig( + logger log.Logger, conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, @@ -179,8 +179,8 @@ func NewMConnectionWithConfig( conn: conn, bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize), bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize), - sendMonitor: flow.New(0, 0), - recvMonitor: flow.New(0, 0), + sendMonitor: flowrate.New(0, 0), + recvMonitor: flowrate.New(0, 0), send: make(chan struct{}, 1), pong: make(chan struct{}, 1), onReceive: onReceive, @@ -189,9 +189,11 @@ func NewMConnectionWithConfig( created: time.Now(), } + mconn.BaseService = *service.NewBaseService(logger, "MConnection", mconn) + // Create channels - var channelsIdx = map[byte]*Channel{} - var channels = []*Channel{} + var channelsIdx = map[ChannelID]*channel{} + var channels = []*channel{} for _, desc := range chDescs { channel := newChannel(mconn, *desc) @@ -201,24 +203,15 @@ func NewMConnectionWithConfig( mconn.channels = channels mconn.channelsIdx = channelsIdx - mconn.BaseService = *service.NewBaseService(nil, "MConnection", mconn) - // maxPacketMsgSize() is a bit heavy, so call just once mconn._maxPacketMsgSize = mconn.maxPacketMsgSize() return mconn } -func (c *MConnection) SetLogger(l log.Logger) { - c.BaseService.SetLogger(l) - for _, ch := range c.channels { - ch.SetLogger(l) - } -} - // OnStart implements BaseService -func (c *MConnection) OnStart() error { - if err := c.BaseService.OnStart(); err != nil { +func (c *MConnection) OnStart(ctx context.Context) error { + if err := c.BaseService.OnStart(ctx); err != nil { return err } c.flushTimer = timer.NewThrottleTimer("flush", c.config.FlushThrottle) @@ -265,43 +258,6 @@ func (c *MConnection) stopServices() (alreadyStopped bool) { return false } -// FlushStop replicates the logic of OnStop. -// It additionally ensures that all successful -// .Send() calls will get flushed before closing -// the connection. -func (c *MConnection) FlushStop() { - if c.stopServices() { - return - } - - // this block is unique to FlushStop - { - // wait until the sendRoutine exits - // so we dont race on calling sendSomePacketMsgs - <-c.doneSendRoutine - - // Send and flush all pending msgs. - // Since sendRoutine has exited, we can call this - // safely - eof := c.sendSomePacketMsgs() - for !eof { - eof = c.sendSomePacketMsgs() - } - c.flush() - - // Now we can close the connection - } - - c.conn.Close() - - // We can't close pong safely here because - // recvRoutine may write to it after we've stopped. - // Though it doesn't need to get closed at all, - // we close it @ recvRoutine. - - // c.Stop() -} - // OnStop implements BaseService func (c *MConnection) OnStop() { if c.stopServices() { @@ -348,7 +304,7 @@ func (c *MConnection) stopForError(r interface{}) { } // Queues a message to be sent to channel. -func (c *MConnection) Send(chID byte, msgBytes []byte) bool { +func (c *MConnection) Send(chID ChannelID, msgBytes []byte) bool { if !c.IsRunning() { return false } @@ -375,49 +331,6 @@ func (c *MConnection) Send(chID byte, msgBytes []byte) bool { return success } -// Queues a message to be sent to channel. -// Nonblocking, returns true if successful. -func (c *MConnection) TrySend(chID byte, msgBytes []byte) bool { - if !c.IsRunning() { - return false - } - - c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msgBytes", msgBytes) - - // Send message to channel. - channel, ok := c.channelsIdx[chID] - if !ok { - c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID)) - return false - } - - ok = channel.trySendBytes(msgBytes) - if ok { - // Wake up sendRoutine if necessary - select { - case c.send <- struct{}{}: - default: - } - } - - return ok -} - -// CanSend returns true if you can send more data onto the chID, false -// otherwise. Use only as a heuristic. -func (c *MConnection) CanSend(chID byte) bool { - if !c.IsRunning() { - return false - } - - channel, ok := c.channelsIdx[chID] - if !ok { - c.Logger.Error(fmt.Sprintf("Unknown channel %X", chID)) - return false - } - return channel.canSend() -} - // sendRoutine polls for packets to send from channels. func (c *MConnection) sendRoutine() { defer c._recover() @@ -520,7 +433,7 @@ func (c *MConnection) sendPacketMsg() bool { // Choose a channel to create a PacketMsg from. // The chosen channel will be the one whose recentlySent/priority is the least. var leastRatio float32 = math.MaxFloat32 - var leastChannel *Channel + var leastChannel *channel for _, channel := range c.channels { // If nothing to send, skip this channel if !channel.isSendPending() { @@ -624,7 +537,7 @@ FOR_LOOP: // never block } case *tmp2p.Packet_PacketMsg: - channelID := byte(pkt.PacketMsg.ChannelID) + channelID := ChannelID(pkt.PacketMsg.ChannelID) channel, ok := c.channelsIdx[channelID] if pkt.PacketMsg.ChannelID < 0 || pkt.PacketMsg.ChannelID > math.MaxUint8 || !ok || channel == nil { err := fmt.Errorf("unknown channel %X", pkt.PacketMsg.ChannelID) @@ -682,13 +595,6 @@ func (c *MConnection) maxPacketMsgSize() int { return len(bz) } -type ConnectionStatus struct { - Duration time.Duration - SendMonitor flow.Status - RecvMonitor flow.Status - Channels []ChannelStatus -} - type ChannelStatus struct { ID byte SendQueueCapacity int @@ -697,30 +603,16 @@ type ChannelStatus struct { RecentlySent int64 } -func (c *MConnection) Status() ConnectionStatus { - var status ConnectionStatus - status.Duration = time.Since(c.created) - status.SendMonitor = c.sendMonitor.Status() - status.RecvMonitor = c.recvMonitor.Status() - status.Channels = make([]ChannelStatus, len(c.channels)) - for i, channel := range c.channels { - status.Channels[i] = ChannelStatus{ - ID: channel.desc.ID, - SendQueueCapacity: cap(channel.sendQueue), - SendQueueSize: int(atomic.LoadInt32(&channel.sendQueueSize)), - Priority: channel.desc.Priority, - RecentlySent: atomic.LoadInt64(&channel.recentlySent), - } - } - return status -} - //----------------------------------------------------------------------------- +// ChannelID is an arbitrary channel ID. +type ChannelID uint16 type ChannelDescriptor struct { - ID byte + ID ChannelID Priority int + MessageType proto.Message + // TODO: Remove once p2p refactor is complete. SendQueueCapacity int RecvMessageCapacity int @@ -728,10 +620,6 @@ type ChannelDescriptor struct { // RecvBufferCapacity defines the max buffer size of inbound messages for a // given p2p Channel queue. RecvBufferCapacity int - - // MaxSendBytes defines the maximum number of bytes that can be sent at any - // given moment from a Channel to a peer. - MaxSendBytes uint } func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) { @@ -748,44 +636,45 @@ func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) { return } -// TODO: lowercase. // NOTE: not goroutine-safe. -type Channel struct { +type channel struct { + // Exponential moving average. + // This field must be accessed atomically. + // It is first in the struct to ensure correct alignment. + // See https://github.com/tendermint/tendermint/issues/7000. + recentlySent int64 + conn *MConnection desc ChannelDescriptor sendQueue chan []byte sendQueueSize int32 // atomic. recving []byte sending []byte - recentlySent int64 // exponential moving average maxPacketMsgPayloadSize int Logger log.Logger } -func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel { +func newChannel(conn *MConnection, desc ChannelDescriptor) *channel { desc = desc.FillDefaults() if desc.Priority <= 0 { panic("Channel default priority must be a positive integer") } - return &Channel{ + return &channel{ conn: conn, desc: desc, sendQueue: make(chan []byte, desc.SendQueueCapacity), recving: make([]byte, 0, desc.RecvBufferCapacity), maxPacketMsgPayloadSize: conn.config.MaxPacketMsgPayloadSize, + Logger: conn.Logger, } } -func (ch *Channel) SetLogger(l log.Logger) { - ch.Logger = l -} - // Queues message to send to this channel. // Goroutine-safe // Times out (and returns false) after defaultSendTimeout -func (ch *Channel) sendBytes(bytes []byte) bool { +func (ch *channel) sendBytes(bytes []byte) bool { select { case ch.sendQueue <- bytes: atomic.AddInt32(&ch.sendQueueSize, 1) @@ -795,34 +684,10 @@ func (ch *Channel) sendBytes(bytes []byte) bool { } } -// Queues message to send to this channel. -// Nonblocking, returns true if successful. -// Goroutine-safe -func (ch *Channel) trySendBytes(bytes []byte) bool { - select { - case ch.sendQueue <- bytes: - atomic.AddInt32(&ch.sendQueueSize, 1) - return true - default: - return false - } -} - -// Goroutine-safe -func (ch *Channel) loadSendQueueSize() (size int) { - return int(atomic.LoadInt32(&ch.sendQueueSize)) -} - -// Goroutine-safe -// Use only as a heuristic. -func (ch *Channel) canSend() bool { - return ch.loadSendQueueSize() < defaultSendQueueCapacity -} - // Returns true if any PacketMsgs are pending to be sent. // Call before calling nextPacketMsg() // Goroutine-safe -func (ch *Channel) isSendPending() bool { +func (ch *channel) isSendPending() bool { if len(ch.sending) == 0 { if len(ch.sendQueue) == 0 { return false @@ -834,7 +699,7 @@ func (ch *Channel) isSendPending() bool { // Creates a new PacketMsg to send. // Not goroutine-safe -func (ch *Channel) nextPacketMsg() tmp2p.PacketMsg { +func (ch *channel) nextPacketMsg() tmp2p.PacketMsg { packet := tmp2p.PacketMsg{ChannelID: int32(ch.desc.ID)} maxSize := ch.maxPacketMsgPayloadSize packet.Data = ch.sending[:tmmath.MinInt(maxSize, len(ch.sending))] @@ -851,7 +716,7 @@ func (ch *Channel) nextPacketMsg() tmp2p.PacketMsg { // Writes next PacketMsg to w and updates c.recentlySent. // Not goroutine-safe -func (ch *Channel) writePacketMsgTo(w io.Writer) (n int, err error) { +func (ch *channel) writePacketMsgTo(w io.Writer) (n int, err error) { packet := ch.nextPacketMsg() n, err = protoio.NewDelimitedWriter(w).WriteMsg(mustWrapPacket(&packet)) atomic.AddInt64(&ch.recentlySent, int64(n)) @@ -861,7 +726,7 @@ func (ch *Channel) writePacketMsgTo(w io.Writer) (n int, err error) { // Handles incoming PacketMsgs. It returns a message bytes if message is // complete, which is owned by the caller and will not be modified. // Not goroutine-safe -func (ch *Channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) { +func (ch *channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) { ch.Logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet) var recvCap, recvReceived = ch.desc.RecvMessageCapacity, len(ch.recving) + len(packet.Data) if recvCap < recvReceived { @@ -878,7 +743,7 @@ func (ch *Channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) { // Call this periodically to update stats for throttling purposes. // Not goroutine-safe -func (ch *Channel) updateStats() { +func (ch *channel) updateStats() { // Exponential decay of stats. // TODO: optimize. atomic.StoreInt64(&ch.recentlySent, int64(float64(atomic.LoadInt64(&ch.recentlySent))*0.8)) diff --git a/internal/p2p/conn/connection_test.go b/internal/p2p/conn/connection_test.go index 6d009f85c..dc198d8bd 100644 --- a/internal/p2p/conn/connection_test.go +++ b/internal/p2p/conn/connection_test.go @@ -1,6 +1,7 @@ package conn import ( + "context" "encoding/hex" "net" "testing" @@ -19,27 +20,27 @@ import ( const maxPingPongPacketSize = 1024 // bytes -func createTestMConnection(conn net.Conn) *MConnection { - onReceive := func(chID byte, msgBytes []byte) { - } - onError := func(r interface{}) { - } - c := createMConnectionWithCallbacks(conn, onReceive, onError) - c.SetLogger(log.TestingLogger()) - return c +func createTestMConnection(logger log.Logger, conn net.Conn) *MConnection { + return createMConnectionWithCallbacks(logger, conn, + // onRecieve + func(chID ChannelID, msgBytes []byte) { + }, + // onError + func(r interface{}) { + }) } func createMConnectionWithCallbacks( + logger log.Logger, conn net.Conn, - onReceive func(chID byte, msgBytes []byte), + onReceive func(chID ChannelID, msgBytes []byte), onError func(r interface{}), ) *MConnection { cfg := DefaultMConnConfig() cfg.PingInterval = 90 * time.Millisecond cfg.PongTimeout = 45 * time.Millisecond chDescs := []*ChannelDescriptor{{ID: 0x01, Priority: 1, SendQueueCapacity: 1}} - c := NewMConnectionWithConfig(conn, chDescs, onReceive, onError, cfg) - c.SetLogger(log.TestingLogger()) + c := NewMConnectionWithConfig(logger, conn, chDescs, onReceive, onError, cfg) return c } @@ -47,8 +48,11 @@ func TestMConnectionSendFlushStop(t *testing.T) { server, client := NetPipe() t.Cleanup(closeAll(t, client, server)) - clientConn := createTestMConnection(client) - err := clientConn.Start() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + clientConn := createTestMConnection(log.TestingLogger(), client) + err := clientConn.Start(ctx) require.Nil(t, err) t.Cleanup(stopAll(t, clientConn)) @@ -69,9 +73,6 @@ func TestMConnectionSendFlushStop(t *testing.T) { errCh <- err }() - // stop the conn - it should flush all conns - clientConn.FlushStop() - timer := time.NewTimer(3 * time.Second) select { case <-errCh: @@ -84,8 +85,11 @@ func TestMConnectionSend(t *testing.T) { server, client := NetPipe() t.Cleanup(closeAll(t, client, server)) - mconn := createTestMConnection(client) - err := mconn.Start() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconn := createTestMConnection(log.TestingLogger(), client) + err := mconn.Start(ctx) require.Nil(t, err) t.Cleanup(stopAll(t, mconn)) @@ -97,16 +101,14 @@ func TestMConnectionSend(t *testing.T) { if err != nil { t.Error(err) } - assert.True(t, mconn.CanSend(0x01)) msg = []byte("Spider-Man") - assert.True(t, mconn.TrySend(0x01, msg)) + assert.True(t, mconn.Send(0x01, msg)) _, err = server.Read(make([]byte, len(msg))) if err != nil { t.Error(err) } - assert.False(t, mconn.CanSend(0x05), "CanSend should return false because channel is unknown") assert.False(t, mconn.Send(0x05, []byte("Absorbing Man")), "Send should return false because channel is unknown") } @@ -116,19 +118,24 @@ func TestMConnectionReceive(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + onReceive := func(chID ChannelID, msgBytes []byte) { receivedCh <- msgBytes } onError := func(r interface{}) { errorsCh <- r } - mconn1 := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn1.Start() + logger := log.TestingLogger() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconn1 := createMConnectionWithCallbacks(logger, client, onReceive, onError) + err := mconn1.Start(ctx) require.Nil(t, err) t.Cleanup(stopAll(t, mconn1)) - mconn2 := createTestMConnection(server) - err = mconn2.Start() + mconn2 := createTestMConnection(logger, server) + err = mconn2.Start(ctx) require.Nil(t, err) t.Cleanup(stopAll(t, mconn2)) @@ -145,34 +152,24 @@ func TestMConnectionReceive(t *testing.T) { } } -func TestMConnectionStatus(t *testing.T) { - server, client := NetPipe() - t.Cleanup(closeAll(t, client, server)) - - mconn := createTestMConnection(client) - err := mconn.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, mconn)) - - status := mconn.Status() - assert.NotNil(t, status) - assert.Zero(t, status.Channels[0].SendQueueSize) -} - func TestMConnectionPongTimeoutResultsInError(t *testing.T) { server, client := net.Pipe() t.Cleanup(closeAll(t, client, server)) receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + onReceive := func(chID ChannelID, msgBytes []byte) { receivedCh <- msgBytes } onError := func(r interface{}) { errorsCh <- r } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn.Start() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError) + err := mconn.Start(ctx) require.Nil(t, err) t.Cleanup(stopAll(t, mconn)) @@ -203,14 +200,17 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + onReceive := func(chID ChannelID, msgBytes []byte) { receivedCh <- msgBytes } onError := func(r interface{}) { errorsCh <- r } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn.Start() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError) + err := mconn.Start(ctx) require.Nil(t, err) t.Cleanup(stopAll(t, mconn)) @@ -257,14 +257,17 @@ func TestMConnectionMultiplePings(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + onReceive := func(chID ChannelID, msgBytes []byte) { receivedCh <- msgBytes } onError := func(r interface{}) { errorsCh <- r } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn.Start() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError) + err := mconn.Start(ctx) require.Nil(t, err) t.Cleanup(stopAll(t, mconn)) @@ -304,14 +307,18 @@ func TestMConnectionPingPongs(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + onReceive := func(chID ChannelID, msgBytes []byte) { receivedCh <- msgBytes } onError := func(r interface{}) { errorsCh <- r } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn.Start() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError) + err := mconn.Start(ctx) require.Nil(t, err) t.Cleanup(stopAll(t, mconn)) @@ -361,14 +368,17 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + onReceive := func(chID ChannelID, msgBytes []byte) { receivedCh <- msgBytes } onError := func(r interface{}) { errorsCh <- r } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn.Start() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError) + err := mconn.Start(ctx) require.Nil(t, err) t.Cleanup(stopAll(t, mconn)) @@ -387,10 +397,14 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) { } } -func newClientAndServerConnsForReadErrors(t *testing.T, chOnErr chan struct{}) (*MConnection, *MConnection) { +func newClientAndServerConnsForReadErrors( + ctx context.Context, + t *testing.T, + chOnErr chan struct{}, +) (*MConnection, *MConnection) { server, client := NetPipe() - onReceive := func(chID byte, msgBytes []byte) {} + onReceive := func(chID ChannelID, msgBytes []byte) {} onError := func(r interface{}) {} // create client conn with two channels @@ -398,20 +412,21 @@ func newClientAndServerConnsForReadErrors(t *testing.T, chOnErr chan struct{}) ( {ID: 0x01, Priority: 1, SendQueueCapacity: 1}, {ID: 0x02, Priority: 1, SendQueueCapacity: 1}, } - mconnClient := NewMConnection(client, chDescs, onReceive, onError) - mconnClient.SetLogger(log.TestingLogger().With("module", "client")) - err := mconnClient.Start() + logger := log.TestingLogger() + + mconnClient := NewMConnection(logger.With("module", "client"), client, chDescs, onReceive, onError) + err := mconnClient.Start(ctx) require.Nil(t, err) // create server conn with 1 channel // it fires on chOnErr when there's an error - serverLogger := log.TestingLogger().With("module", "server") + serverLogger := logger.With("module", "server") onError = func(r interface{}) { chOnErr <- struct{}{} } - mconnServer := createMConnectionWithCallbacks(server, onReceive, onError) - mconnServer.SetLogger(serverLogger) - err = mconnServer.Start() + + mconnServer := createMConnectionWithCallbacks(serverLogger, server, onReceive, onError) + err = mconnServer.Start(ctx) require.Nil(t, err) return mconnClient, mconnServer } @@ -427,8 +442,11 @@ func expectSend(ch chan struct{}) bool { } func TestMConnectionReadErrorBadEncoding(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + chOnErr := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(ctx, t, chOnErr) client := mconnClient.conn @@ -440,8 +458,11 @@ func TestMConnectionReadErrorBadEncoding(t *testing.T) { } func TestMConnectionReadErrorUnknownChannel(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + chOnErr := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(ctx, t, chOnErr) msg := []byte("Ant-Man") @@ -459,10 +480,13 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { chOnErr := make(chan struct{}) chOnRcv := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(ctx, t, chOnErr) t.Cleanup(stopAll(t, mconnClient, mconnServer)) - mconnServer.onReceive = func(chID byte, msgBytes []byte) { + mconnServer.onReceive = func(chID ChannelID, msgBytes []byte) { chOnRcv <- struct{}{} } @@ -493,8 +517,11 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { } func TestMConnectionReadErrorUnknownMsgType(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + chOnErr := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(ctx, t, chOnErr) t.Cleanup(stopAll(t, mconnClient, mconnServer)) // send msg with unknown msg type @@ -506,26 +533,25 @@ func TestMConnectionReadErrorUnknownMsgType(t *testing.T) { func TestMConnectionTrySend(t *testing.T) { server, client := NetPipe() t.Cleanup(closeAll(t, client, server)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - mconn := createTestMConnection(client) - err := mconn.Start() + mconn := createTestMConnection(log.TestingLogger(), client) + err := mconn.Start(ctx) require.Nil(t, err) t.Cleanup(stopAll(t, mconn)) msg := []byte("Semicolon-Woman") resultCh := make(chan string, 2) - assert.True(t, mconn.TrySend(0x01, msg)) + assert.True(t, mconn.Send(0x01, msg)) _, err = server.Read(make([]byte, len(msg))) require.NoError(t, err) - assert.True(t, mconn.CanSend(0x01)) - assert.True(t, mconn.TrySend(0x01, msg)) - assert.False(t, mconn.CanSend(0x01)) + assert.True(t, mconn.Send(0x01, msg)) go func() { - mconn.TrySend(0x01, msg) + mconn.Send(0x01, msg) resultCh <- "TrySend" }() - assert.False(t, mconn.CanSend(0x01)) - assert.False(t, mconn.TrySend(0x01, msg)) + assert.False(t, mconn.Send(0x01, msg)) assert.Equal(t, "TrySend", <-resultCh) } @@ -557,10 +583,13 @@ func TestMConnectionChannelOverflow(t *testing.T) { chOnErr := make(chan struct{}) chOnRcv := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(ctx, t, chOnErr) t.Cleanup(stopAll(t, mconnClient, mconnServer)) - mconnServer.onReceive = func(chID byte, msgBytes []byte) { + mconnServer.onReceive = func(chID ChannelID, msgBytes []byte) { chOnRcv <- struct{}{} } diff --git a/internal/p2p/conn/evil_secret_connection_test.go b/internal/p2p/conn/evil_secret_connection_test.go index 6d8b7cbf7..05e88cd85 100644 --- a/internal/p2p/conn/evil_secret_connection_test.go +++ b/internal/p2p/conn/evil_secret_connection_test.go @@ -13,7 +13,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/internal/libs/protoio" tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ) @@ -113,7 +113,7 @@ func (c *evilConn) Read(data []byte) (n int, err error) { case 1: signature := c.signChallenge() if !c.badAuthSignature { - pkpb, err := cryptoenc.PubKeyToProto(c.privKey.PubKey()) + pkpb, err := encoding.PubKeyToProto(c.privKey.PubKey()) if err != nil { panic(err) } diff --git a/internal/p2p/conn/secret_connection.go b/internal/p2p/conn/secret_connection.go index 2f0d269d6..35fac488a 100644 --- a/internal/p2p/conn/secret_connection.go +++ b/internal/p2p/conn/secret_connection.go @@ -23,7 +23,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/internal/libs/protoio" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/async" @@ -406,7 +406,7 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte // Send our info and receive theirs in tandem. var trs, _ = async.Parallel( func(_ int) (val interface{}, abort bool, err error) { - pbpk, err := cryptoenc.PubKeyToProto(pubKey) + pbpk, err := encoding.PubKeyToProto(pubKey) if err != nil { return nil, true, err } @@ -423,7 +423,7 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte return nil, true, err // abort } - pk, err := cryptoenc.PubKeyFromProto(pba.PubKey) + pk, err := encoding.PubKeyFromProto(pba.PubKey) if err != nil { return nil, true, err // abort } diff --git a/internal/p2p/conn/secret_connection_test.go b/internal/p2p/conn/secret_connection_test.go index 62587c0da..08a7925fa 100644 --- a/internal/p2p/conn/secret_connection_test.go +++ b/internal/p2p/conn/secret_connection_test.go @@ -6,7 +6,6 @@ import ( "flag" "fmt" "io" - "io/ioutil" "log" mrand "math/rand" "os" @@ -195,7 +194,7 @@ func TestSecretConnectionReadWrite(t *testing.T) { compareWritesReads := func(writes []string, reads []string) { for { // Pop next write & corresponding reads - var read, write string = "", writes[0] + var read, write = "", writes[0] var readCount = 0 for _, readChunk := range reads { read += readChunk @@ -229,7 +228,7 @@ func TestDeriveSecretsAndChallengeGolden(t *testing.T) { if *update { t.Logf("Updating golden test vector file %s", goldenFilepath) data := createGoldenTestVectors(t) - require.NoError(t, ioutil.WriteFile(goldenFilepath, []byte(data), 0644)) + require.NoError(t, os.WriteFile(goldenFilepath, []byte(data), 0644)) } f, err := os.Open(goldenFilepath) if err != nil { diff --git a/internal/p2p/conn_set.go b/internal/p2p/conn_set.go deleted file mode 100644 index 987d9f968..000000000 --- a/internal/p2p/conn_set.go +++ /dev/null @@ -1,82 +0,0 @@ -package p2p - -import ( - "net" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" -) - -// ConnSet is a lookup table for connections and all their ips. -type ConnSet interface { - Has(net.Conn) bool - HasIP(net.IP) bool - Set(net.Conn, []net.IP) - Remove(net.Conn) - RemoveAddr(net.Addr) -} - -type connSetItem struct { - conn net.Conn - ips []net.IP -} - -type connSet struct { - tmsync.RWMutex - - conns map[string]connSetItem -} - -// NewConnSet returns a ConnSet implementation. -func NewConnSet() ConnSet { - return &connSet{ - conns: map[string]connSetItem{}, - } -} - -func (cs *connSet) Has(c net.Conn) bool { - cs.RLock() - defer cs.RUnlock() - - _, ok := cs.conns[c.RemoteAddr().String()] - - return ok -} - -func (cs *connSet) HasIP(ip net.IP) bool { - cs.RLock() - defer cs.RUnlock() - - for _, c := range cs.conns { - for _, known := range c.ips { - if known.Equal(ip) { - return true - } - } - } - - return false -} - -func (cs *connSet) Remove(c net.Conn) { - cs.Lock() - defer cs.Unlock() - - delete(cs.conns, c.RemoteAddr().String()) -} - -func (cs *connSet) RemoveAddr(addr net.Addr) { - cs.Lock() - defer cs.Unlock() - - delete(cs.conns, addr.String()) -} - -func (cs *connSet) Set(c net.Conn, ips []net.IP) { - cs.Lock() - defer cs.Unlock() - - cs.conns[c.RemoteAddr().String()] = connSetItem{ - conn: c, - ips: ips, - } -} diff --git a/internal/p2p/errors.go b/internal/p2p/errors.go index 648f2cb3a..d4df28792 100644 --- a/internal/p2p/errors.go +++ b/internal/p2p/errors.go @@ -17,7 +17,7 @@ func (e ErrFilterTimeout) Error() string { // ErrRejected indicates that a Peer was rejected carrying additional // information as to the reason. type ErrRejected struct { - addr NetAddress + addr NodeAddress conn net.Conn err error id types.NodeID @@ -30,7 +30,7 @@ type ErrRejected struct { } // Addr returns the NetAddress for the rejected Peer. -func (e ErrRejected) Addr() NetAddress { +func (e ErrRejected) Addr() NodeAddress { return e.addr } @@ -120,15 +120,15 @@ func (e ErrSwitchDuplicatePeerIP) Error() string { // ErrSwitchConnectToSelf to be raised when trying to connect to itself. type ErrSwitchConnectToSelf struct { - Addr *NetAddress + Addr *NodeAddress } func (e ErrSwitchConnectToSelf) Error() string { - return fmt.Sprintf("connect to self: %v", e.Addr) + return fmt.Sprintf("connect to self: %s", e.Addr) } type ErrSwitchAuthenticationFailure struct { - Dialed *NetAddress + Dialed *NodeAddress Got types.NodeID } diff --git a/internal/p2p/metrics.go b/internal/p2p/metrics.go index e3481058b..3677180de 100644 --- a/internal/p2p/metrics.go +++ b/internal/p2p/metrics.go @@ -1,6 +1,11 @@ package p2p import ( + "fmt" + "reflect" + "regexp" + "sync" + "github.com/go-kit/kit/metrics" "github.com/go-kit/kit/metrics/discard" "github.com/go-kit/kit/metrics/prometheus" @@ -13,6 +18,13 @@ const ( MetricsSubsystem = "p2p" ) +var ( + // valueToLabelRegexp is used to find the golang package name and type name + // so that the name can be turned into a prometheus label where the characters + // in the label do not include prometheus special characters such as '*' and '.'. + valueToLabelRegexp = regexp.MustCompile(`\*?(\w+)\.(.*)`) +) + // Metrics contains metrics exposed by this package. type Metrics struct { // Number of peers. @@ -43,6 +55,9 @@ type Metrics struct { // PeerQueueMsgSize defines the average size of messages sent over a peer's // queue for a specific flow (i.e. Channel). PeerQueueMsgSize metrics.Gauge + + mtx *sync.RWMutex + messageLabelNames map[reflect.Type]string } // PrometheusMetrics returns Metrics build using Prometheus client library. @@ -68,14 +83,14 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Subsystem: MetricsSubsystem, Name: "peer_receive_bytes_total", Help: "Number of bytes received from a given peer.", - }, append(labels, "peer_id", "chID")).With(labelsAndValues...), + }, append(labels, "peer_id", "chID", "message_type")).With(labelsAndValues...), PeerSendBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, Name: "peer_send_bytes_total", Help: "Number of bytes sent to a given peer.", - }, append(labels, "peer_id", "chID")).With(labelsAndValues...), + }, append(labels, "peer_id", "chID", "message_type")).With(labelsAndValues...), PeerPendingSendBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, @@ -118,6 +133,9 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "router_channel_queue_msg_size", Help: "The size of messages sent over a peer's queue for a specific p2p Channel.", }, append(labels, "ch_id")).With(labelsAndValues...), + + mtx: &sync.RWMutex{}, + messageLabelNames: map[reflect.Type]string{}, } } @@ -133,5 +151,30 @@ func NopMetrics() *Metrics { RouterChannelQueueSend: discard.NewHistogram(), PeerQueueDroppedMsgs: discard.NewCounter(), PeerQueueMsgSize: discard.NewGauge(), + mtx: &sync.RWMutex{}, + messageLabelNames: map[reflect.Type]string{}, } } + +// ValueToMetricLabel is a method that is used to produce a prometheus label value of the golang +// type that is passed in. +// This method uses a map on the Metrics struct so that each label name only needs +// to be produced once to prevent expensive string operations. +func (m *Metrics) ValueToMetricLabel(i interface{}) string { + t := reflect.TypeOf(i) + m.mtx.RLock() + + if s, ok := m.messageLabelNames[t]; ok { + m.mtx.RUnlock() + return s + } + m.mtx.RUnlock() + + s := t.String() + ss := valueToLabelRegexp.FindStringSubmatch(s) + l := fmt.Sprintf("%s_%s", ss[1], ss[2]) + m.mtx.Lock() + defer m.mtx.Unlock() + m.messageLabelNames[t] = l + return l +} diff --git a/internal/p2p/metrics_test.go b/internal/p2p/metrics_test.go new file mode 100644 index 000000000..53b3c47bd --- /dev/null +++ b/internal/p2p/metrics_test.go @@ -0,0 +1,19 @@ +package p2p + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/proto/tendermint/p2p" +) + +func TestValueToMetricsLabel(t *testing.T) { + m := NopMetrics() + r := &p2p.PexResponse{} + str := m.ValueToMetricLabel(r) + assert.Equal(t, "p2p_PexResponse", str) + + // subsequent calls to the function should produce the same result + str = m.ValueToMetricLabel(r) + assert.Equal(t, "p2p_PexResponse", str) +} diff --git a/internal/p2p/mock/peer.go b/internal/p2p/mock/peer.go deleted file mode 100644 index cede51768..000000000 --- a/internal/p2p/mock/peer.go +++ /dev/null @@ -1,70 +0,0 @@ -package mock - -import ( - "net" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -type Peer struct { - *service.BaseService - ip net.IP - id types.NodeID - addr *p2p.NetAddress - kv map[string]interface{} - Outbound, Persistent bool -} - -// NewPeer creates and starts a new mock peer. If the ip -// is nil, random routable address is used. -func NewPeer(ip net.IP) *Peer { - var netAddr *p2p.NetAddress - if ip == nil { - _, netAddr = p2p.CreateRoutableAddr() - } else { - netAddr = types.NewNetAddressIPPort(ip, 26656) - } - nodeKey := types.GenNodeKey() - netAddr.ID = nodeKey.ID - mp := &Peer{ - ip: ip, - id: nodeKey.ID, - addr: netAddr, - kv: make(map[string]interface{}), - } - mp.BaseService = service.NewBaseService(nil, "MockPeer", mp) - if err := mp.Start(); err != nil { - panic(err) - } - return mp -} - -func (mp *Peer) FlushStop() { mp.Stop() } //nolint:errcheck //ignore error -func (mp *Peer) TrySend(chID byte, msgBytes []byte) bool { return true } -func (mp *Peer) Send(chID byte, msgBytes []byte) bool { return true } -func (mp *Peer) NodeInfo() types.NodeInfo { - return types.NodeInfo{ - NodeID: mp.addr.ID, - ListenAddr: mp.addr.DialString(), - } -} -func (mp *Peer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } -func (mp *Peer) ID() types.NodeID { return mp.id } -func (mp *Peer) IsOutbound() bool { return mp.Outbound } -func (mp *Peer) IsPersistent() bool { return mp.Persistent } -func (mp *Peer) Get(key string) interface{} { - if value, ok := mp.kv[key]; ok { - return value - } - return nil -} -func (mp *Peer) Set(key string, value interface{}) { - mp.kv[key] = value -} -func (mp *Peer) RemoteIP() net.IP { return mp.ip } -func (mp *Peer) SocketAddr() *p2p.NetAddress { return mp.addr } -func (mp *Peer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } -func (mp *Peer) CloseConn() error { return nil } diff --git a/internal/p2p/mock/reactor.go b/internal/p2p/mock/reactor.go deleted file mode 100644 index d634a8032..000000000 --- a/internal/p2p/mock/reactor.go +++ /dev/null @@ -1,23 +0,0 @@ -package mock - -import ( - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/log" -) - -type Reactor struct { - p2p.BaseReactor -} - -func NewReactor() *Reactor { - r := &Reactor{} - r.BaseReactor = *p2p.NewBaseReactor("Mock-PEX", r) - r.SetLogger(log.TestingLogger()) - return r -} - -func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { return []*conn.ChannelDescriptor{} } -func (r *Reactor) AddPeer(peer p2p.Peer) {} -func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {} -func (r *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {} diff --git a/internal/p2p/mocks/connection.go b/internal/p2p/mocks/connection.go index 6c6174117..65b9afafb 100644 --- a/internal/p2p/mocks/connection.go +++ b/internal/p2p/mocks/connection.go @@ -35,20 +35,6 @@ func (_m *Connection) Close() error { return r0 } -// FlushClose provides a mock function with given fields: -func (_m *Connection) FlushClose() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - // Handshake provides a mock function with given fields: _a0, _a1, _a2 func (_m *Connection) Handshake(_a0 context.Context, _a1 types.NodeInfo, _a2 crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) { ret := _m.Called(_a0, _a1, _a2) @@ -94,14 +80,14 @@ func (_m *Connection) LocalEndpoint() p2p.Endpoint { } // ReceiveMessage provides a mock function with given fields: -func (_m *Connection) ReceiveMessage() (p2p.ChannelID, []byte, error) { +func (_m *Connection) ReceiveMessage() (conn.ChannelID, []byte, error) { ret := _m.Called() - var r0 p2p.ChannelID - if rf, ok := ret.Get(0).(func() p2p.ChannelID); ok { + var r0 conn.ChannelID + if rf, ok := ret.Get(0).(func() conn.ChannelID); ok { r0 = rf() } else { - r0 = ret.Get(0).(p2p.ChannelID) + r0 = ret.Get(0).(conn.ChannelID) } var r1 []byte @@ -138,35 +124,14 @@ func (_m *Connection) RemoteEndpoint() p2p.Endpoint { } // SendMessage provides a mock function with given fields: _a0, _a1 -func (_m *Connection) SendMessage(_a0 p2p.ChannelID, _a1 []byte) (bool, error) { +func (_m *Connection) SendMessage(_a0 conn.ChannelID, _a1 []byte) error { ret := _m.Called(_a0, _a1) - var r0 bool - if rf, ok := ret.Get(0).(func(p2p.ChannelID, []byte) bool); ok { + var r0 error + if rf, ok := ret.Get(0).(func(conn.ChannelID, []byte) error); ok { r0 = rf(_a0, _a1) } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(p2p.ChannelID, []byte) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Status provides a mock function with given fields: -func (_m *Connection) Status() conn.ConnectionStatus { - ret := _m.Called() - - var r0 conn.ConnectionStatus - if rf, ok := ret.Get(0).(func() conn.ConnectionStatus); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(conn.ConnectionStatus) + r0 = ret.Error(0) } return r0 @@ -185,24 +150,3 @@ func (_m *Connection) String() string { return r0 } - -// TrySendMessage provides a mock function with given fields: _a0, _a1 -func (_m *Connection) TrySendMessage(_a0 p2p.ChannelID, _a1 []byte) (bool, error) { - ret := _m.Called(_a0, _a1) - - var r0 bool - if rf, ok := ret.Get(0).(func(p2p.ChannelID, []byte) bool); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(p2p.ChannelID, []byte) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/internal/p2p/mocks/peer.go b/internal/p2p/mocks/peer.go deleted file mode 100644 index b905c1156..000000000 --- a/internal/p2p/mocks/peer.go +++ /dev/null @@ -1,334 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - conn "github.com/tendermint/tendermint/internal/p2p/conn" - log "github.com/tendermint/tendermint/libs/log" - - mock "github.com/stretchr/testify/mock" - - net "net" - - types "github.com/tendermint/tendermint/types" -) - -// Peer is an autogenerated mock type for the Peer type -type Peer struct { - mock.Mock -} - -// CloseConn provides a mock function with given fields: -func (_m *Peer) CloseConn() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// FlushStop provides a mock function with given fields: -func (_m *Peer) FlushStop() { - _m.Called() -} - -// Get provides a mock function with given fields: _a0 -func (_m *Peer) Get(_a0 string) interface{} { - ret := _m.Called(_a0) - - var r0 interface{} - if rf, ok := ret.Get(0).(func(string) interface{}); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(interface{}) - } - } - - return r0 -} - -// ID provides a mock function with given fields: -func (_m *Peer) ID() types.NodeID { - ret := _m.Called() - - var r0 types.NodeID - if rf, ok := ret.Get(0).(func() types.NodeID); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(types.NodeID) - } - - return r0 -} - -// IsOutbound provides a mock function with given fields: -func (_m *Peer) IsOutbound() bool { - ret := _m.Called() - - var r0 bool - if rf, ok := ret.Get(0).(func() bool); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// IsPersistent provides a mock function with given fields: -func (_m *Peer) IsPersistent() bool { - ret := _m.Called() - - var r0 bool - if rf, ok := ret.Get(0).(func() bool); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// IsRunning provides a mock function with given fields: -func (_m *Peer) IsRunning() bool { - ret := _m.Called() - - var r0 bool - if rf, ok := ret.Get(0).(func() bool); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// NodeInfo provides a mock function with given fields: -func (_m *Peer) NodeInfo() types.NodeInfo { - ret := _m.Called() - - var r0 types.NodeInfo - if rf, ok := ret.Get(0).(func() types.NodeInfo); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(types.NodeInfo) - } - - return r0 -} - -// OnReset provides a mock function with given fields: -func (_m *Peer) OnReset() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// OnStart provides a mock function with given fields: -func (_m *Peer) OnStart() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// OnStop provides a mock function with given fields: -func (_m *Peer) OnStop() { - _m.Called() -} - -// Quit provides a mock function with given fields: -func (_m *Peer) Quit() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// RemoteAddr provides a mock function with given fields: -func (_m *Peer) RemoteAddr() net.Addr { - ret := _m.Called() - - var r0 net.Addr - if rf, ok := ret.Get(0).(func() net.Addr); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(net.Addr) - } - } - - return r0 -} - -// RemoteIP provides a mock function with given fields: -func (_m *Peer) RemoteIP() net.IP { - ret := _m.Called() - - var r0 net.IP - if rf, ok := ret.Get(0).(func() net.IP); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(net.IP) - } - } - - return r0 -} - -// Reset provides a mock function with given fields: -func (_m *Peer) Reset() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Send provides a mock function with given fields: _a0, _a1 -func (_m *Peer) Send(_a0 byte, _a1 []byte) bool { - ret := _m.Called(_a0, _a1) - - var r0 bool - if rf, ok := ret.Get(0).(func(byte, []byte) bool); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// Set provides a mock function with given fields: _a0, _a1 -func (_m *Peer) Set(_a0 string, _a1 interface{}) { - _m.Called(_a0, _a1) -} - -// SetLogger provides a mock function with given fields: _a0 -func (_m *Peer) SetLogger(_a0 log.Logger) { - _m.Called(_a0) -} - -// SocketAddr provides a mock function with given fields: -func (_m *Peer) SocketAddr() *types.NetAddress { - ret := _m.Called() - - var r0 *types.NetAddress - if rf, ok := ret.Get(0).(func() *types.NetAddress); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.NetAddress) - } - } - - return r0 -} - -// Start provides a mock function with given fields: -func (_m *Peer) Start() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Status provides a mock function with given fields: -func (_m *Peer) Status() conn.ConnectionStatus { - ret := _m.Called() - - var r0 conn.ConnectionStatus - if rf, ok := ret.Get(0).(func() conn.ConnectionStatus); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(conn.ConnectionStatus) - } - - return r0 -} - -// Stop provides a mock function with given fields: -func (_m *Peer) Stop() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// String provides a mock function with given fields: -func (_m *Peer) String() string { - ret := _m.Called() - - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(string) - } - - return r0 -} - -// TrySend provides a mock function with given fields: _a0, _a1 -func (_m *Peer) TrySend(_a0 byte, _a1 []byte) bool { - ret := _m.Called(_a0, _a1) - - var r0 bool - if rf, ok := ret.Get(0).(func(byte, []byte) bool); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// Wait provides a mock function with given fields: -func (_m *Peer) Wait() { - _m.Called() -} diff --git a/internal/p2p/mocks/transport.go b/internal/p2p/mocks/transport.go index 82bd670cb..eea1de4c5 100644 --- a/internal/p2p/mocks/transport.go +++ b/internal/p2p/mocks/transport.go @@ -5,7 +5,10 @@ package mocks import ( context "context" + conn "github.com/tendermint/tendermint/internal/p2p/conn" + mock "github.com/stretchr/testify/mock" + p2p "github.com/tendermint/tendermint/internal/p2p" ) @@ -37,6 +40,11 @@ func (_m *Transport) Accept() (p2p.Connection, error) { return r0, r1 } +// AddChannelDescriptors provides a mock function with given fields: _a0 +func (_m *Transport) AddChannelDescriptors(_a0 []*conn.ChannelDescriptor) { + _m.Called(_a0) +} + // Close provides a mock function with given fields: func (_m *Transport) Close() error { ret := _m.Called() @@ -90,6 +98,20 @@ func (_m *Transport) Endpoints() []p2p.Endpoint { return r0 } +// Listen provides a mock function with given fields: _a0 +func (_m *Transport) Listen(_a0 p2p.Endpoint) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(p2p.Endpoint) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // Protocols provides a mock function with given fields: func (_m *Transport) Protocols() []p2p.Protocol { ret := _m.Called() diff --git a/internal/p2p/netaddress.go b/internal/p2p/netaddress.go deleted file mode 100644 index 6fce3a769..000000000 --- a/internal/p2p/netaddress.go +++ /dev/null @@ -1,11 +0,0 @@ -// Modified for Tendermint -// Originally Copyright (c) 2013-2014 Conformal Systems LLC. -// https://github.com/conformal/btcd/blob/master/LICENSE - -package p2p - -import ( - "github.com/tendermint/tendermint/types" -) - -type NetAddress = types.NetAddress diff --git a/internal/p2p/p2p_test.go b/internal/p2p/p2p_test.go index 6e524d492..d8657b774 100644 --- a/internal/p2p/p2p_test.go +++ b/internal/p2p/p2p_test.go @@ -1,25 +1,23 @@ package p2p_test import ( - "context" - "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/types" ) // Common setup for P2P tests. var ( - ctx = context.Background() chID = p2p.ChannelID(1) - chDesc = p2p.ChannelDescriptor{ - ID: byte(chID), + chDesc = &p2p.ChannelDescriptor{ + ID: chID, + MessageType: &p2ptest.Message{}, Priority: 5, SendQueueCapacity: 10, RecvMessageCapacity: 10, - MaxSendBytes: 1000, } selfKey crypto.PrivKey = ed25519.GenPrivKeyFromSecret([]byte{0xf9, 0x1b, 0x08, 0xaa, 0x38, 0xee, 0x34, 0xdd}) diff --git a/internal/p2p/p2ptest/network.go b/internal/p2p/p2ptest/network.go index 1daba3f14..6ee253b3c 100644 --- a/internal/p2p/p2ptest/network.go +++ b/internal/p2p/p2ptest/network.go @@ -6,7 +6,6 @@ import ( "testing" "time" - "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" @@ -48,7 +47,7 @@ func (opts *NetworkOptions) setDefaults() { // MakeNetwork creates a test network with the given number of nodes and // connects them to each other. -func MakeNetwork(t *testing.T, opts NetworkOptions) *Network { +func MakeNetwork(ctx context.Context, t *testing.T, opts NetworkOptions) *Network { opts.setDefaults() logger := log.TestingLogger() network := &Network{ @@ -58,7 +57,7 @@ func MakeNetwork(t *testing.T, opts NetworkOptions) *Network { } for i := 0; i < opts.NumNodes; i++ { - node := network.MakeNode(t, opts.NodeOpts) + node := network.MakeNode(ctx, t, opts.NodeOpts) network.Nodes[node.NodeID] = node } @@ -137,13 +136,11 @@ func (n *Network) NodeIDs() []types.NodeID { // doing error checks and cleanups. func (n *Network) MakeChannels( t *testing.T, - chDesc p2p.ChannelDescriptor, - messageType proto.Message, - size int, + chDesc *p2p.ChannelDescriptor, ) map[types.NodeID]*p2p.Channel { channels := map[types.NodeID]*p2p.Channel{} for _, node := range n.Nodes { - channels[node.NodeID] = node.MakeChannel(t, chDesc, messageType, size) + channels[node.NodeID] = node.MakeChannel(t, chDesc) } return channels } @@ -153,13 +150,11 @@ func (n *Network) MakeChannels( // all the channels. func (n *Network) MakeChannelsNoCleanup( t *testing.T, - chDesc p2p.ChannelDescriptor, - messageType proto.Message, - size int, + chDesc *p2p.ChannelDescriptor, ) map[types.NodeID]*p2p.Channel { channels := map[types.NodeID]*p2p.Channel{} for _, node := range n.Nodes { - channels[node.NodeID] = node.MakeChannelNoCleanup(t, chDesc, messageType, size) + channels[node.NodeID] = node.MakeChannelNoCleanup(t, chDesc) } return channels } @@ -226,7 +221,7 @@ type Node struct { // MakeNode creates a new Node configured for the network with a // running peer manager, but does not add it to the existing // network. Callers are responsible for updating peering relationships. -func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node { +func (n *Network) MakeNode(ctx context.Context, t *testing.T, opts NodeOptions) *Node { privKey := ed25519.GenPrivKey() nodeID := types.NodeIDFromPubKey(privKey.PubKey()) nodeInfo := types.NodeInfo{ @@ -254,10 +249,12 @@ func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node { privKey, peerManager, []p2p.Transport{transport}, + transport.Endpoints(), p2p.RouterOptions{DialSleep: func(_ context.Context) {}}, ) + require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) t.Cleanup(func() { if router.IsRunning() { @@ -281,11 +278,13 @@ func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node { // MakeChannel opens a channel, with automatic error handling and cleanup. On // test cleanup, it also checks that the channel is empty, to make sure // all expected messages have been asserted. -func (n *Node) MakeChannel(t *testing.T, chDesc p2p.ChannelDescriptor, - messageType proto.Message, size int) *p2p.Channel { - channel, err := n.Router.OpenChannel(chDesc, messageType, size) +func (n *Node) MakeChannel( + t *testing.T, + chDesc *p2p.ChannelDescriptor, +) *p2p.Channel { + channel, err := n.Router.OpenChannel(chDesc) require.NoError(t, err) - require.Contains(t, n.Router.NodeInfo().Channels, chDesc.ID) + require.Contains(t, n.Router.NodeInfo().Channels, byte(chDesc.ID)) t.Cleanup(func() { RequireEmpty(t, channel) channel.Close() @@ -297,24 +296,21 @@ func (n *Node) MakeChannel(t *testing.T, chDesc p2p.ChannelDescriptor, // caller must ensure proper cleanup of the channel. func (n *Node) MakeChannelNoCleanup( t *testing.T, - chDesc p2p.ChannelDescriptor, - messageType proto.Message, - size int, + chDesc *p2p.ChannelDescriptor, ) *p2p.Channel { - - channel, err := n.Router.OpenChannel(chDesc, messageType, size) + channel, err := n.Router.OpenChannel(chDesc) require.NoError(t, err) return channel } // MakePeerUpdates opens a peer update subscription, with automatic cleanup. // It checks that all updates have been consumed during cleanup. -func (n *Node) MakePeerUpdates(t *testing.T) *p2p.PeerUpdates { +func (n *Node) MakePeerUpdates(ctx context.Context, t *testing.T) *p2p.PeerUpdates { t.Helper() sub := n.PeerManager.Subscribe() t.Cleanup(func() { t.Helper() - RequireNoUpdates(t, sub) + RequireNoUpdates(ctx, t, sub) sub.Close() }) @@ -333,12 +329,12 @@ func (n *Node) MakePeerUpdatesNoRequireEmpty(t *testing.T) *p2p.PeerUpdates { return sub } -func MakeChannelDesc(chID p2p.ChannelID) p2p.ChannelDescriptor { - return p2p.ChannelDescriptor{ - ID: byte(chID), +func MakeChannelDesc(chID p2p.ChannelID) *p2p.ChannelDescriptor { + return &p2p.ChannelDescriptor{ + ID: chID, + MessageType: &Message{}, Priority: 5, SendQueueCapacity: 10, RecvMessageCapacity: 10, - MaxSendBytes: 1000, } } diff --git a/internal/p2p/p2ptest/require.go b/internal/p2p/p2ptest/require.go index 3598baba0..106063bbd 100644 --- a/internal/p2p/p2ptest/require.go +++ b/internal/p2p/p2ptest/require.go @@ -1,6 +1,7 @@ package p2ptest import ( + "context" "testing" "time" @@ -24,6 +25,8 @@ func RequireEmpty(t *testing.T, channels ...*p2p.Channel) { // RequireReceive requires that the given envelope is received on the channel. func RequireReceive(t *testing.T, channel *p2p.Channel, expect p2p.Envelope) { + t.Helper() + timer := time.NewTimer(time.Second) // not time.After due to goroutine leaks defer timer.Stop() @@ -93,11 +96,14 @@ func RequireSendReceive( } // RequireNoUpdates requires that a PeerUpdates subscription is empty. -func RequireNoUpdates(t *testing.T, peerUpdates *p2p.PeerUpdates) { +func RequireNoUpdates(ctx context.Context, t *testing.T, peerUpdates *p2p.PeerUpdates) { t.Helper() select { case update := <-peerUpdates.Updates(): - require.Fail(t, "unexpected peer updates", "got %v", update) + if ctx.Err() == nil { + require.Fail(t, "unexpected peer updates", "got %v", update) + } + case <-ctx.Done(): default: } } diff --git a/internal/p2p/peer.go b/internal/p2p/peer.go deleted file mode 100644 index 709a1294a..000000000 --- a/internal/p2p/peer.go +++ /dev/null @@ -1,371 +0,0 @@ -package p2p - -import ( - "fmt" - "io" - "net" - "runtime/debug" - "time" - - tmconn "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/cmap" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -//go:generate ../../scripts/mockery_generate.sh Peer - -const metricsTickerDuration = 10 * time.Second - -// Peer is an interface representing a peer connected on a reactor. -type Peer interface { - service.Service - FlushStop() - - ID() types.NodeID // peer's cryptographic ID - RemoteIP() net.IP // remote IP of the connection - RemoteAddr() net.Addr // remote address of the connection - - IsOutbound() bool // did we dial the peer - IsPersistent() bool // do we redial this peer when we disconnect - - CloseConn() error // close original connection - - NodeInfo() types.NodeInfo // peer's info - Status() tmconn.ConnectionStatus - SocketAddr() *NetAddress // actual address of the socket - - Send(byte, []byte) bool - TrySend(byte, []byte) bool - - Set(string, interface{}) - Get(string) interface{} -} - -//---------------------------------------------------------- - -// peerConn contains the raw connection and its config. -type peerConn struct { - outbound bool - persistent bool - conn Connection - ip net.IP // cached RemoteIP() -} - -func newPeerConn(outbound, persistent bool, conn Connection) peerConn { - return peerConn{ - outbound: outbound, - persistent: persistent, - conn: conn, - } -} - -// Return the IP from the connection RemoteAddr -func (pc peerConn) RemoteIP() net.IP { - if pc.ip == nil { - pc.ip = pc.conn.RemoteEndpoint().IP - } - return pc.ip -} - -// peer implements Peer. -// -// Before using a peer, you will need to perform a handshake on connection. -type peer struct { - service.BaseService - - // raw peerConn and the multiplex connection - peerConn - - // peer's node info and the channel it knows about - // channels = nodeInfo.Channels - // cached to avoid copying nodeInfo in hasChannel - nodeInfo types.NodeInfo - channels []byte - reactors map[byte]Reactor - onPeerError func(Peer, interface{}) - - // User data - Data *cmap.CMap - - metrics *Metrics - metricsTicker *time.Ticker -} - -type PeerOption func(*peer) - -func newPeer( - nodeInfo types.NodeInfo, - pc peerConn, - reactorsByCh map[byte]Reactor, - onPeerError func(Peer, interface{}), - options ...PeerOption, -) *peer { - p := &peer{ - peerConn: pc, - nodeInfo: nodeInfo, - channels: nodeInfo.Channels, - reactors: reactorsByCh, - onPeerError: onPeerError, - Data: cmap.NewCMap(), - metricsTicker: time.NewTicker(metricsTickerDuration), - metrics: NopMetrics(), - } - - p.BaseService = *service.NewBaseService(nil, "Peer", p) - for _, option := range options { - option(p) - } - - return p -} - -// onError calls the peer error callback. -func (p *peer) onError(err interface{}) { - p.onPeerError(p, err) -} - -// String representation. -func (p *peer) String() string { - if p.outbound { - return fmt.Sprintf("Peer{%v %v out}", p.conn, p.ID()) - } - - return fmt.Sprintf("Peer{%v %v in}", p.conn, p.ID()) -} - -//--------------------------------------------------- -// Implements service.Service - -// SetLogger implements BaseService. -func (p *peer) SetLogger(l log.Logger) { - p.Logger = l -} - -// OnStart implements BaseService. -func (p *peer) OnStart() error { - if err := p.BaseService.OnStart(); err != nil { - return err - } - - go p.processMessages() - go p.metricsReporter() - - return nil -} - -// processMessages processes messages received from the connection. -func (p *peer) processMessages() { - defer func() { - if r := recover(); r != nil { - p.Logger.Error("peer message processing panic", "err", r, "stack", string(debug.Stack())) - p.onError(fmt.Errorf("panic during peer message processing: %v", r)) - } - }() - - for { - chID, msg, err := p.conn.ReceiveMessage() - if err != nil { - p.onError(err) - return - } - reactor, ok := p.reactors[byte(chID)] - if !ok { - p.onError(fmt.Errorf("unknown channel %v", chID)) - return - } - reactor.Receive(byte(chID), p, msg) - } -} - -// FlushStop mimics OnStop but additionally ensures that all successful -// .Send() calls will get flushed before closing the connection. -// NOTE: it is not safe to call this method more than once. -func (p *peer) FlushStop() { - p.metricsTicker.Stop() - p.BaseService.OnStop() - if err := p.conn.FlushClose(); err != nil { - p.Logger.Debug("error while stopping peer", "err", err) - } -} - -// OnStop implements BaseService. -func (p *peer) OnStop() { - p.metricsTicker.Stop() - p.BaseService.OnStop() - if err := p.conn.Close(); err != nil { - p.Logger.Debug("error while stopping peer", "err", err) - } -} - -//--------------------------------------------------- -// Implements Peer - -// ID returns the peer's ID - the hex encoded hash of its pubkey. -func (p *peer) ID() types.NodeID { - return p.nodeInfo.ID() -} - -// IsOutbound returns true if the connection is outbound, false otherwise. -func (p *peer) IsOutbound() bool { - return p.peerConn.outbound -} - -// IsPersistent returns true if the peer is persitent, false otherwise. -func (p *peer) IsPersistent() bool { - return p.peerConn.persistent -} - -// NodeInfo returns a copy of the peer's NodeInfo. -func (p *peer) NodeInfo() types.NodeInfo { - return p.nodeInfo -} - -// SocketAddr returns the address of the socket. -// For outbound peers, it's the address dialed (after DNS resolution). -// For inbound peers, it's the address returned by the underlying connection -// (not what's reported in the peer's NodeInfo). -func (p *peer) SocketAddr() *NetAddress { - endpoint := p.peerConn.conn.RemoteEndpoint() - return &NetAddress{ - ID: p.ID(), - IP: endpoint.IP, - Port: endpoint.Port, - } -} - -// Status returns the peer's ConnectionStatus. -func (p *peer) Status() tmconn.ConnectionStatus { - return p.conn.Status() -} - -// Send msg bytes to the channel identified by chID byte. Returns false if the -// send queue is full after timeout, specified by MConnection. -func (p *peer) Send(chID byte, msgBytes []byte) bool { - if !p.IsRunning() { - // see Switch#Broadcast, where we fetch the list of peers and loop over - // them - while we're looping, one peer may be removed and stopped. - return false - } else if !p.hasChannel(chID) { - return false - } - res, err := p.conn.SendMessage(ChannelID(chID), msgBytes) - if err == io.EOF { - return false - } else if err != nil { - p.onError(err) - return false - } - if res { - labels := []string{ - "peer_id", string(p.ID()), - "chID", fmt.Sprintf("%#x", chID), - } - p.metrics.PeerSendBytesTotal.With(labels...).Add(float64(len(msgBytes))) - } - return res -} - -// TrySend msg bytes to the channel identified by chID byte. Immediately returns -// false if the send queue is full. -func (p *peer) TrySend(chID byte, msgBytes []byte) bool { - if !p.IsRunning() { - return false - } else if !p.hasChannel(chID) { - return false - } - res, err := p.conn.TrySendMessage(ChannelID(chID), msgBytes) - if err == io.EOF { - return false - } else if err != nil { - p.onError(err) - return false - } - if res { - labels := []string{ - "peer_id", string(p.ID()), - "chID", fmt.Sprintf("%#x", chID), - } - p.metrics.PeerSendBytesTotal.With(labels...).Add(float64(len(msgBytes))) - } - return res -} - -// Get the data for a given key. -func (p *peer) Get(key string) interface{} { - return p.Data.Get(key) -} - -// Set sets the data for the given key. -func (p *peer) Set(key string, data interface{}) { - p.Data.Set(key, data) -} - -// hasChannel returns true if the peer reported -// knowing about the given chID. -func (p *peer) hasChannel(chID byte) bool { - for _, ch := range p.channels { - if ch == chID { - return true - } - } - // NOTE: probably will want to remove this - // but could be helpful while the feature is new - p.Logger.Debug( - "Unknown channel for peer", - "channel", - chID, - "channels", - p.channels, - ) - return false -} - -// CloseConn closes original connection. Used for cleaning up in cases where the peer had not been started at all. -func (p *peer) CloseConn() error { - return p.peerConn.conn.Close() -} - -//--------------------------------------------------- -// methods only used for testing -// TODO: can we remove these? - -// CloseConn closes the underlying connection -func (pc *peerConn) CloseConn() { - pc.conn.Close() -} - -// RemoteAddr returns peer's remote network address. -func (p *peer) RemoteAddr() net.Addr { - endpoint := p.conn.RemoteEndpoint() - return &net.TCPAddr{ - IP: endpoint.IP, - Port: int(endpoint.Port), - } -} - -//--------------------------------------------------- - -func PeerMetrics(metrics *Metrics) PeerOption { - return func(p *peer) { - p.metrics = metrics - } -} - -func (p *peer) metricsReporter() { - for { - select { - case <-p.metricsTicker.C: - status := p.conn.Status() - var sendQueueSize float64 - for _, chStatus := range status.Channels { - sendQueueSize += float64(chStatus.SendQueueSize) - } - - p.metrics.PeerPendingSendBytes.With("peer_id", string(p.ID())).Set(sendQueueSize) - case <-p.Quit(): - return - } - } -} diff --git a/internal/p2p/peer_set.go b/internal/p2p/peer_set.go deleted file mode 100644 index 8d4ad4939..000000000 --- a/internal/p2p/peer_set.go +++ /dev/null @@ -1,149 +0,0 @@ -package p2p - -import ( - "net" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/types" -) - -// IPeerSet has a (immutable) subset of the methods of PeerSet. -type IPeerSet interface { - Has(key types.NodeID) bool - HasIP(ip net.IP) bool - Get(key types.NodeID) Peer - List() []Peer - Size() int -} - -//----------------------------------------------------------------------------- - -// PeerSet is a special structure for keeping a table of peers. -// Iteration over the peers is super fast and thread-safe. -type PeerSet struct { - mtx tmsync.Mutex - lookup map[types.NodeID]*peerSetItem - list []Peer -} - -type peerSetItem struct { - peer Peer - index int -} - -// NewPeerSet creates a new peerSet with a list of initial capacity of 256 items. -func NewPeerSet() *PeerSet { - return &PeerSet{ - lookup: make(map[types.NodeID]*peerSetItem), - list: make([]Peer, 0, 256), - } -} - -// Add adds the peer to the PeerSet. -// It returns an error carrying the reason, if the peer is already present. -func (ps *PeerSet) Add(peer Peer) error { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - if ps.lookup[peer.ID()] != nil { - return ErrSwitchDuplicatePeerID{peer.ID()} - } - - index := len(ps.list) - // Appending is safe even with other goroutines - // iterating over the ps.list slice. - ps.list = append(ps.list, peer) - ps.lookup[peer.ID()] = &peerSetItem{peer, index} - return nil -} - -// Has returns true if the set contains the peer referred to by this -// peerKey, otherwise false. -func (ps *PeerSet) Has(peerKey types.NodeID) bool { - ps.mtx.Lock() - _, ok := ps.lookup[peerKey] - ps.mtx.Unlock() - return ok -} - -// HasIP returns true if the set contains the peer referred to by this IP -// address, otherwise false. -func (ps *PeerSet) HasIP(peerIP net.IP) bool { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - return ps.hasIP(peerIP) -} - -// hasIP does not acquire a lock so it can be used in public methods which -// already lock. -func (ps *PeerSet) hasIP(peerIP net.IP) bool { - for _, item := range ps.lookup { - if item.peer.RemoteIP().Equal(peerIP) { - return true - } - } - - return false -} - -// Get looks up a peer by the provided peerKey. Returns nil if peer is not -// found. -func (ps *PeerSet) Get(peerKey types.NodeID) Peer { - ps.mtx.Lock() - defer ps.mtx.Unlock() - item, ok := ps.lookup[peerKey] - if ok { - return item.peer - } - return nil -} - -// Remove discards peer by its Key, if the peer was previously memoized. -// Returns true if the peer was removed, and false if it was not found. -// in the set. -func (ps *PeerSet) Remove(peer Peer) bool { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - item := ps.lookup[peer.ID()] - if item == nil { - return false - } - - index := item.index - // Create a new copy of the list but with one less item. - // (we must copy because we'll be mutating the list). - newList := make([]Peer, len(ps.list)-1) - copy(newList, ps.list) - // If it's the last peer, that's an easy special case. - if index == len(ps.list)-1 { - ps.list = newList - delete(ps.lookup, peer.ID()) - return true - } - - // Replace the popped item with the last item in the old list. - lastPeer := ps.list[len(ps.list)-1] - lastPeerKey := lastPeer.ID() - lastPeerItem := ps.lookup[lastPeerKey] - newList[index] = lastPeer - lastPeerItem.index = index - ps.list = newList - delete(ps.lookup, peer.ID()) - return true -} - -// Size returns the number of unique items in the peerSet. -func (ps *PeerSet) Size() int { - ps.mtx.Lock() - defer ps.mtx.Unlock() - return len(ps.list) -} - -// List returns the threadsafe list of peers. -func (ps *PeerSet) List() []Peer { - ps.mtx.Lock() - defer ps.mtx.Unlock() - return ps.list -} diff --git a/internal/p2p/peer_set_test.go b/internal/p2p/peer_set_test.go deleted file mode 100644 index 3e2397d2d..000000000 --- a/internal/p2p/peer_set_test.go +++ /dev/null @@ -1,190 +0,0 @@ -package p2p - -import ( - "net" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -// mockPeer for testing the PeerSet -type mockPeer struct { - service.BaseService - ip net.IP - id types.NodeID -} - -func (mp *mockPeer) FlushStop() { mp.Stop() } //nolint:errcheck // ignore error -func (mp *mockPeer) TrySend(chID byte, msgBytes []byte) bool { return true } -func (mp *mockPeer) Send(chID byte, msgBytes []byte) bool { return true } -func (mp *mockPeer) NodeInfo() types.NodeInfo { return types.NodeInfo{} } -func (mp *mockPeer) Status() ConnectionStatus { return ConnectionStatus{} } -func (mp *mockPeer) ID() types.NodeID { return mp.id } -func (mp *mockPeer) IsOutbound() bool { return false } -func (mp *mockPeer) IsPersistent() bool { return true } -func (mp *mockPeer) Get(s string) interface{} { return s } -func (mp *mockPeer) Set(string, interface{}) {} -func (mp *mockPeer) RemoteIP() net.IP { return mp.ip } -func (mp *mockPeer) SocketAddr() *NetAddress { return nil } -func (mp *mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } -func (mp *mockPeer) CloseConn() error { return nil } - -// Returns a mock peer -func newMockPeer(ip net.IP) *mockPeer { - if ip == nil { - ip = net.IP{127, 0, 0, 1} - } - nodeKey := types.GenNodeKey() - return &mockPeer{ - ip: ip, - id: nodeKey.ID, - } -} - -func TestPeerSetAddRemoveOne(t *testing.T) { - t.Parallel() - - peerSet := NewPeerSet() - - var peerList []Peer - for i := 0; i < 5; i++ { - p := newMockPeer(net.IP{127, 0, 0, byte(i)}) - if err := peerSet.Add(p); err != nil { - t.Error(err) - } - peerList = append(peerList, p) - } - - n := len(peerList) - // 1. Test removing from the front - for i, peerAtFront := range peerList { - removed := peerSet.Remove(peerAtFront) - assert.True(t, removed) - wantSize := n - i - 1 - for j := 0; j < 2; j++ { - assert.Equal(t, false, peerSet.Has(peerAtFront.ID()), "#%d Run #%d: failed to remove peer", i, j) - assert.Equal(t, wantSize, peerSet.Size(), "#%d Run #%d: failed to remove peer and decrement size", i, j) - // Test the route of removing the now non-existent element - removed := peerSet.Remove(peerAtFront) - assert.False(t, removed) - } - } - - // 2. Next we are testing removing the peer at the end - // a) Replenish the peerSet - for _, peer := range peerList { - if err := peerSet.Add(peer); err != nil { - t.Error(err) - } - } - - // b) In reverse, remove each element - for i := n - 1; i >= 0; i-- { - peerAtEnd := peerList[i] - removed := peerSet.Remove(peerAtEnd) - assert.True(t, removed) - assert.Equal(t, false, peerSet.Has(peerAtEnd.ID()), "#%d: failed to remove item at end", i) - assert.Equal(t, i, peerSet.Size(), "#%d: differing sizes after peerSet.Remove(atEndPeer)", i) - } -} - -func TestPeerSetAddRemoveMany(t *testing.T) { - t.Parallel() - peerSet := NewPeerSet() - - peers := []Peer{} - N := 100 - for i := 0; i < N; i++ { - peer := newMockPeer(net.IP{127, 0, 0, byte(i)}) - if err := peerSet.Add(peer); err != nil { - t.Errorf("failed to add new peer") - } - if peerSet.Size() != i+1 { - t.Errorf("failed to add new peer and increment size") - } - peers = append(peers, peer) - } - - for i, peer := range peers { - removed := peerSet.Remove(peer) - assert.True(t, removed) - if peerSet.Has(peer.ID()) { - t.Errorf("failed to remove peer") - } - if peerSet.Size() != len(peers)-i-1 { - t.Errorf("failed to remove peer and decrement size") - } - } -} - -func TestPeerSetAddDuplicate(t *testing.T) { - t.Parallel() - peerSet := NewPeerSet() - peer := newMockPeer(nil) - - n := 20 - errsChan := make(chan error) - // Add the same asynchronously to test the - // concurrent guarantees of our APIs, and - // our expectation in the end is that only - // one addition succeeded, but the rest are - // instances of ErrSwitchDuplicatePeer. - for i := 0; i < n; i++ { - go func() { - errsChan <- peerSet.Add(peer) - }() - } - - // Now collect and tally the results - errsTally := make(map[string]int) - for i := 0; i < n; i++ { - err := <-errsChan - - switch err.(type) { - case ErrSwitchDuplicatePeerID: - errsTally["duplicateID"]++ - default: - errsTally["other"]++ - } - } - - // Our next procedure is to ensure that only one addition - // succeeded and that the rest are each ErrSwitchDuplicatePeer. - wantErrCount, gotErrCount := n-1, errsTally["duplicateID"] - assert.Equal(t, wantErrCount, gotErrCount, "invalid ErrSwitchDuplicatePeer count") - - wantNilErrCount, gotNilErrCount := 1, errsTally["other"] - assert.Equal(t, wantNilErrCount, gotNilErrCount, "invalid nil errCount") -} - -func TestPeerSetGet(t *testing.T) { - t.Parallel() - - var ( - peerSet = NewPeerSet() - peer = newMockPeer(nil) - ) - - assert.Nil(t, peerSet.Get(peer.ID()), "expecting a nil lookup, before .Add") - - if err := peerSet.Add(peer); err != nil { - t.Fatalf("Failed to add new peer: %v", err) - } - - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - // Add them asynchronously to test the - // concurrent guarantees of our APIs. - wg.Add(1) - go func(i int) { - defer wg.Done() - have, want := peerSet.Get(peer.ID()), peer - assert.Equal(t, have, want, "%d: have %v, want %v", i, have, want) - }(i) - } - wg.Wait() -} diff --git a/internal/p2p/peer_test.go b/internal/p2p/peer_test.go deleted file mode 100644 index dfe7bc798..000000000 --- a/internal/p2p/peer_test.go +++ /dev/null @@ -1,239 +0,0 @@ -package p2p - -import ( - "context" - "fmt" - golog "log" - "net" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/config" - tmconn "github.com/tendermint/tendermint/internal/p2p/conn" -) - -func TestPeerBasic(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - t.Cleanup(rp.Stop) - - p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), cfg, tmconn.DefaultMConnConfig()) - require.Nil(err) - - err = p.Start() - require.Nil(err) - t.Cleanup(func() { - if err := p.Stop(); err != nil { - t.Error(err) - } - }) - - assert.True(p.IsRunning()) - assert.True(p.IsOutbound()) - assert.False(p.IsPersistent()) - p.persistent = true - assert.True(p.IsPersistent()) - assert.Equal(rp.Addr().DialString(), p.RemoteAddr().String()) - assert.Equal(rp.ID(), p.ID()) -} - -func TestPeerSend(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - config := cfg - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: config} - rp.Start() - t.Cleanup(rp.Stop) - - p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config, tmconn.DefaultMConnConfig()) - require.Nil(err) - - err = p.Start() - require.Nil(err) - - t.Cleanup(func() { - if err := p.Stop(); err != nil { - t.Error(err) - } - }) - - assert.True(p.Send(testCh, []byte("Asylum"))) -} - -func createOutboundPeerAndPerformHandshake( - addr *NetAddress, - config *config.P2PConfig, - mConfig tmconn.MConnConfig, -) (*peer, error) { - chDescs := []*tmconn.ChannelDescriptor{ - {ID: testCh, Priority: 1}, - } - pk := ed25519.GenPrivKey() - ourNodeInfo := testNodeInfo(types.NodeIDFromPubKey(pk.PubKey()), "host_peer") - transport := NewMConnTransport(log.TestingLogger(), mConfig, chDescs, MConnTransportOptions{}) - reactorsByCh := map[byte]Reactor{testCh: NewTestReactor(chDescs, true)} - pc, err := testOutboundPeerConn(transport, addr, config, false, pk) - if err != nil { - return nil, err - } - peerInfo, _, err := pc.conn.Handshake(context.Background(), ourNodeInfo, pk) - if err != nil { - return nil, err - } - - p := newPeer(peerInfo, pc, reactorsByCh, func(p Peer, r interface{}) {}) - p.SetLogger(log.TestingLogger().With("peer", addr)) - return p, nil -} - -func testDial(addr *NetAddress, cfg *config.P2PConfig) (net.Conn, error) { - if cfg.TestDialFail { - return nil, fmt.Errorf("dial err (peerConfig.DialFail == true)") - } - - conn, err := addr.DialTimeout(cfg.DialTimeout) - if err != nil { - return nil, err - } - return conn, nil -} - -func testOutboundPeerConn( - transport *MConnTransport, - addr *NetAddress, - config *config.P2PConfig, - persistent bool, - ourNodePrivKey crypto.PrivKey, -) (peerConn, error) { - - var pc peerConn - conn, err := testDial(addr, config) - if err != nil { - return pc, fmt.Errorf("error creating peer: %w", err) - } - - pc, err = testPeerConn(transport, conn, true, persistent) - if err != nil { - if cerr := conn.Close(); cerr != nil { - return pc, fmt.Errorf("%v: %w", cerr.Error(), err) - } - return pc, err - } - - return pc, nil -} - -type remotePeer struct { - PrivKey crypto.PrivKey - Config *config.P2PConfig - Network string - addr *NetAddress - channels bytes.HexBytes - listenAddr string - listener net.Listener -} - -func (rp *remotePeer) Addr() *NetAddress { - return rp.addr -} - -func (rp *remotePeer) ID() types.NodeID { - return types.NodeIDFromPubKey(rp.PrivKey.PubKey()) -} - -func (rp *remotePeer) Start() { - if rp.listenAddr == "" { - rp.listenAddr = "127.0.0.1:0" - } - - l, e := net.Listen("tcp", rp.listenAddr) // any available address - if e != nil { - golog.Fatalf("net.Listen tcp :0: %+v", e) - } - rp.listener = l - rp.addr = types.NewNetAddress(types.NodeIDFromPubKey(rp.PrivKey.PubKey()), l.Addr()) - if rp.channels == nil { - rp.channels = []byte{testCh} - } - go rp.accept() -} - -func (rp *remotePeer) Stop() { - rp.listener.Close() -} - -func (rp *remotePeer) Dial(addr *NetAddress) (net.Conn, error) { - transport := NewMConnTransport(log.TestingLogger(), MConnConfig(rp.Config), - []*ChannelDescriptor{}, MConnTransportOptions{}) - conn, err := addr.DialTimeout(1 * time.Second) - if err != nil { - return nil, err - } - pc, err := testInboundPeerConn(transport, conn) - if err != nil { - return nil, err - } - _, _, err = pc.conn.Handshake(context.Background(), rp.nodeInfo(), rp.PrivKey) - if err != nil { - return nil, err - } - return conn, err -} - -func (rp *remotePeer) accept() { - transport := NewMConnTransport(log.TestingLogger(), MConnConfig(rp.Config), - []*ChannelDescriptor{}, MConnTransportOptions{}) - conns := []net.Conn{} - - for { - conn, err := rp.listener.Accept() - if err != nil { - golog.Printf("Failed to accept conn: %+v", err) - for _, conn := range conns { - _ = conn.Close() - } - return - } - - pc, err := testInboundPeerConn(transport, conn) - if err != nil { - golog.Printf("Failed to create a peer: %+v", err) - } - _, _, err = pc.conn.Handshake(context.Background(), rp.nodeInfo(), rp.PrivKey) - if err != nil { - golog.Printf("Failed to handshake a peer: %+v", err) - } - - conns = append(conns, conn) - } -} - -func (rp *remotePeer) nodeInfo() types.NodeInfo { - ni := types.NodeInfo{ - ProtocolVersion: defaultProtocolVersion, - NodeID: rp.Addr().ID, - ListenAddr: rp.listener.Addr().String(), - Network: "testing", - Version: "1.2.3-rc0-deadbeef", - Channels: rp.channels, - Moniker: "remote_peer", - } - if rp.Network != "" { - ni.Network = rp.Network - } - return ni -} diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index 1e9afb38b..8c37cc1ff 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -180,7 +180,7 @@ func (o *PeerManagerOptions) Validate() error { if o.MaxPeers > 0 { if o.MaxConnected == 0 || o.MaxConnected+o.MaxConnectedUpgrade > o.MaxPeers { - return fmt.Errorf("MaxConnected %v and MaxConnectedUpgrade %v can't exceed MaxPeers %v", // nolint + return fmt.Errorf("MaxConnected %v and MaxConnectedUpgrade %v can't exceed MaxPeers %v", o.MaxConnected, o.MaxConnectedUpgrade, o.MaxPeers) } } @@ -190,7 +190,7 @@ func (o *PeerManagerOptions) Validate() error { return errors.New("can't set MaxRetryTime without MinRetryTime") } if o.MinRetryTime > o.MaxRetryTime { - return fmt.Errorf("MinRetryTime %v is greater than MaxRetryTime %v", // nolint + return fmt.Errorf("MinRetryTime %v is greater than MaxRetryTime %v", o.MinRetryTime, o.MaxRetryTime) } } @@ -200,7 +200,7 @@ func (o *PeerManagerOptions) Validate() error { return errors.New("can't set MaxRetryTimePersistent without MinRetryTime") } if o.MinRetryTime > o.MaxRetryTimePersistent { - return fmt.Errorf("MinRetryTime %v is greater than MaxRetryTimePersistent %v", // nolint + return fmt.Errorf("MinRetryTime %v is greater than MaxRetryTimePersistent %v", o.MinRetryTime, o.MaxRetryTimePersistent) } } @@ -532,6 +532,7 @@ func (m *PeerManager) DialFailed(address NodeAddress) error { if !ok { return nil // Assume the address has been removed, ignore. } + addressInfo.LastDialFailure = time.Now().UTC() addressInfo.DialFailures++ if err := m.store.Set(peer); err != nil { @@ -602,6 +603,7 @@ func (m *PeerManager) Dialed(address NodeAddress) error { addressInfo.LastDialSuccess = now // If not found, assume address has been removed. } + if err := m.store.Set(peer); err != nil { return err } @@ -660,6 +662,11 @@ func (m *PeerManager) Accepted(peerID types.NodeID) error { peer = m.newPeerInfo(peerID) } + // reset this to avoid penalizing peers for their past transgressions + for _, addr := range peer.AddressInfo { + addr.DialFailures = 0 + } + // If all connections slots are full, but we allow upgrades (and we checked // above that we have upgrade capacity), then we can look for a lower-scored // peer to replace and if found accept the connection anyway and evict it. @@ -1287,15 +1294,23 @@ func (p *peerInfo) Score() PeerScore { return PeerScorePersistent } - if p.MutableScore <= 0 { + score := p.MutableScore + + for _, addr := range p.AddressInfo { + // DialFailures is reset when dials succeed, so this + // is either the number of dial failures or 0. + score -= int64(addr.DialFailures) + } + + if score <= 0 { return 0 } - if p.MutableScore >= math.MaxUint8 { + if score >= math.MaxUint8 { return PeerScore(math.MaxUint8) } - return PeerScore(p.MutableScore) + return PeerScore(score) } // Validate validates the peer info. diff --git a/internal/p2p/peermanager_scoring_test.go b/internal/p2p/peermanager_scoring_test.go index 0825af948..edb5fc6fc 100644 --- a/internal/p2p/peermanager_scoring_test.go +++ b/internal/p2p/peermanager_scoring_test.go @@ -6,9 +6,10 @@ import ( "time" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) func TestPeerScoring(t *testing.T) { diff --git a/internal/p2p/peermanager_test.go b/internal/p2p/peermanager_test.go index 69c798d2d..28efe63dd 100644 --- a/internal/p2p/peermanager_test.go +++ b/internal/p2p/peermanager_test.go @@ -273,6 +273,9 @@ func TestPeerManager_Add(t *testing.T) { } func TestPeerManager_DialNext(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -296,6 +299,9 @@ func TestPeerManager_DialNext(t *testing.T) { } func TestPeerManager_DialNext_Retry(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} options := p2p.PeerManagerOptions{ @@ -311,7 +317,7 @@ func TestPeerManager_DialNext_Retry(t *testing.T) { // Do five dial retries (six dials total). The retry time should double for // each failure. At the forth retry, MaxRetryTime should kick in. - ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + ctx, cancel = context.WithTimeout(ctx, 5*time.Second) defer cancel() for i := 0; i <= 5; i++ { @@ -342,6 +348,9 @@ func TestPeerManager_DialNext_Retry(t *testing.T) { } func TestPeerManager_DialNext_WakeOnAdd(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -356,7 +365,7 @@ func TestPeerManager_DialNext_WakeOnAdd(t *testing.T) { }() // This will block until peer is added above. - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() dial, err := peerManager.DialNext(ctx) require.NoError(t, err) @@ -364,6 +373,9 @@ func TestPeerManager_DialNext_WakeOnAdd(t *testing.T) { } func TestPeerManager_DialNext_WakeOnDialFailed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ MaxConnected: 1, }) @@ -395,7 +407,7 @@ func TestPeerManager_DialNext_WakeOnDialFailed(t *testing.T) { }() // This should make b available for dialing (not a, retries are disabled). - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() dial, err = peerManager.DialNext(ctx) require.NoError(t, err) @@ -403,6 +415,9 @@ func TestPeerManager_DialNext_WakeOnDialFailed(t *testing.T) { } func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + options := p2p.PeerManagerOptions{MinRetryTime: 200 * time.Millisecond} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), options) require.NoError(t, err) @@ -421,7 +436,7 @@ func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) { // The retry timer should unblock DialNext and make a available again after // the retry time passes. - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() dial, err = peerManager.DialNext(ctx) require.NoError(t, err) @@ -430,6 +445,9 @@ func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) { } func TestPeerManager_DialNext_WakeOnDisconnected(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -450,7 +468,7 @@ func TestPeerManager_DialNext_WakeOnDisconnected(t *testing.T) { peerManager.Disconnected(a.NodeID) }() - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() dial, err = peerManager.DialNext(ctx) require.NoError(t, err) @@ -1289,6 +1307,9 @@ func TestPeerManager_Ready(t *testing.T) { // See TryEvictNext for most tests, this just tests blocking behavior. func TestPeerManager_EvictNext(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -1322,6 +1343,9 @@ func TestPeerManager_EvictNext(t *testing.T) { } func TestPeerManager_EvictNext_WakeOnError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -1340,7 +1364,7 @@ func TestPeerManager_EvictNext_WakeOnError(t *testing.T) { }() // This will block until peer errors above. - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() evict, err := peerManager.EvictNext(ctx) require.NoError(t, err) @@ -1348,6 +1372,9 @@ func TestPeerManager_EvictNext_WakeOnError(t *testing.T) { } func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} @@ -1378,7 +1405,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) { }() // This will block until peer is upgraded above. - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() evict, err := peerManager.EvictNext(ctx) require.NoError(t, err) @@ -1386,6 +1413,9 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) { } func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} @@ -1410,7 +1440,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) { }() // This will block until peer is upgraded above. - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() evict, err := peerManager.EvictNext(ctx) require.NoError(t, err) diff --git a/internal/p2p/pex/addrbook.go b/internal/p2p/pex/addrbook.go deleted file mode 100644 index 6c5f78663..000000000 --- a/internal/p2p/pex/addrbook.go +++ /dev/null @@ -1,948 +0,0 @@ -// Modified for Tendermint -// Originally Copyright (c) 2013-2014 Conformal Systems LLC. -// https://github.com/conformal/btcd/blob/master/LICENSE - -package pex - -import ( - "encoding/binary" - "fmt" - "hash" - "math" - mrand "math/rand" - "net" - "sync" - "time" - - "github.com/minio/highwayhash" - "github.com/tendermint/tendermint/crypto" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p" - tmmath "github.com/tendermint/tendermint/libs/math" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -const ( - bucketTypeNew = 0x01 - bucketTypeOld = 0x02 -) - -// AddrBook is an address book used for tracking peers -// so we can gossip about them to others and select -// peers to dial. -// TODO: break this up? -type AddrBook interface { - service.Service - - // Add our own addresses so we don't later add ourselves - AddOurAddress(*p2p.NetAddress) - // Check if it is our address - OurAddress(*p2p.NetAddress) bool - - AddPrivateIDs([]string) - - // Add and remove an address - AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error - RemoveAddress(*p2p.NetAddress) - - // Check if the address is in the book - HasAddress(*p2p.NetAddress) bool - - // Do we need more peers? - NeedMoreAddrs() bool - // Is Address Book Empty? Answer should not depend on being in your own - // address book, or private peers - Empty() bool - - // Pick an address to dial - PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress - - // Mark address - MarkGood(types.NodeID) - MarkAttempt(*p2p.NetAddress) - MarkBad(*p2p.NetAddress, time.Duration) // Move peer to bad peers list - // Add bad peers back to addrBook - ReinstateBadPeers() - - IsGood(*p2p.NetAddress) bool - IsBanned(*p2p.NetAddress) bool - - // Send a selection of addresses to peers - GetSelection() []*p2p.NetAddress - // Send a selection of addresses with bias - GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress - - Size() int - - // Persist to disk - Save() -} - -var _ AddrBook = (*addrBook)(nil) - -// addrBook - concurrency safe peer address manager. -// Implements AddrBook. -type addrBook struct { - service.BaseService - - // accessed concurrently - mtx tmsync.Mutex - ourAddrs map[string]struct{} - privateIDs map[types.NodeID]struct{} - addrLookup map[types.NodeID]*knownAddress // new & old - badPeers map[types.NodeID]*knownAddress // blacklisted peers - bucketsOld []map[string]*knownAddress - bucketsNew []map[string]*knownAddress - nOld int - nNew int - - // immutable after creation - filePath string - key string // random prefix for bucket placement - routabilityStrict bool - hasher hash.Hash64 - - wg sync.WaitGroup -} - -func mustNewHasher() hash.Hash64 { - key := crypto.CRandBytes(highwayhash.Size) - hasher, err := highwayhash.New64(key) - if err != nil { - panic(err) - } - return hasher -} - -// NewAddrBook creates a new address book. -// Use Start to begin processing asynchronous address updates. -func NewAddrBook(filePath string, routabilityStrict bool) AddrBook { - am := &addrBook{ - ourAddrs: make(map[string]struct{}), - privateIDs: make(map[types.NodeID]struct{}), - addrLookup: make(map[types.NodeID]*knownAddress), - badPeers: make(map[types.NodeID]*knownAddress), - filePath: filePath, - routabilityStrict: routabilityStrict, - } - am.init() - am.BaseService = *service.NewBaseService(nil, "AddrBook", am) - return am -} - -// Initialize the buckets. -// When modifying this, don't forget to update loadFromFile() -func (a *addrBook) init() { - a.key = crypto.CRandHex(24) // 24/2 * 8 = 96 bits - // New addr buckets - a.bucketsNew = make([]map[string]*knownAddress, newBucketCount) - for i := range a.bucketsNew { - a.bucketsNew[i] = make(map[string]*knownAddress) - } - // Old addr buckets - a.bucketsOld = make([]map[string]*knownAddress, oldBucketCount) - for i := range a.bucketsOld { - a.bucketsOld[i] = make(map[string]*knownAddress) - } - a.hasher = mustNewHasher() -} - -// OnStart implements Service. -func (a *addrBook) OnStart() error { - if err := a.BaseService.OnStart(); err != nil { - return err - } - a.loadFromFile(a.filePath) - - // wg.Add to ensure that any invocation of .Wait() - // later on will wait for saveRoutine to terminate. - a.wg.Add(1) - go a.saveRoutine() - - return nil -} - -// OnStop implements Service. -func (a *addrBook) OnStop() { - a.BaseService.OnStop() -} - -func (a *addrBook) Wait() { - a.wg.Wait() -} - -func (a *addrBook) FilePath() string { - return a.filePath -} - -//------------------------------------------------------- - -// AddOurAddress one of our addresses. -func (a *addrBook) AddOurAddress(addr *p2p.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - - a.Logger.Info("Add our address to book", "addr", addr) - a.ourAddrs[addr.String()] = struct{}{} -} - -// OurAddress returns true if it is our address. -func (a *addrBook) OurAddress(addr *p2p.NetAddress) bool { - a.mtx.Lock() - defer a.mtx.Unlock() - - _, ok := a.ourAddrs[addr.String()] - return ok -} - -func (a *addrBook) AddPrivateIDs(ids []string) { - a.mtx.Lock() - defer a.mtx.Unlock() - - for _, id := range ids { - a.privateIDs[types.NodeID(id)] = struct{}{} - } -} - -// AddAddress implements AddrBook -// Add address to a "new" bucket. If it's already in one, only add it probabilistically. -// Returns error if the addr is non-routable. Does not add self. -// NOTE: addr must not be nil -func (a *addrBook) AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error { - a.mtx.Lock() - defer a.mtx.Unlock() - - return a.addAddress(addr, src) -} - -// RemoveAddress implements AddrBook - removes the address from the book. -func (a *addrBook) RemoveAddress(addr *p2p.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - - a.removeAddress(addr) -} - -// IsGood returns true if peer was ever marked as good and haven't -// done anything wrong since then. -func (a *addrBook) IsGood(addr *p2p.NetAddress) bool { - a.mtx.Lock() - defer a.mtx.Unlock() - - return a.addrLookup[addr.ID].isOld() -} - -// IsBanned returns true if the peer is currently banned -func (a *addrBook) IsBanned(addr *p2p.NetAddress) bool { - a.mtx.Lock() - _, ok := a.badPeers[addr.ID] - a.mtx.Unlock() - - return ok -} - -// HasAddress returns true if the address is in the book. -func (a *addrBook) HasAddress(addr *p2p.NetAddress) bool { - a.mtx.Lock() - defer a.mtx.Unlock() - - ka := a.addrLookup[addr.ID] - return ka != nil -} - -// NeedMoreAddrs implements AddrBook - returns true if there are not have enough addresses in the book. -func (a *addrBook) NeedMoreAddrs() bool { - return a.Size() < needAddressThreshold -} - -// Empty implements AddrBook - returns true if there are no addresses in the address book. -// Does not count the peer appearing in its own address book, or private peers. -func (a *addrBook) Empty() bool { - return a.Size() == 0 -} - -// PickAddress implements AddrBook. It picks an address to connect to. -// The address is picked randomly from an old or new bucket according -// to the biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to that range) -// and determines how biased we are to pick an address from a new bucket. -// PickAddress returns nil if the AddrBook is empty or if we try to pick -// from an empty bucket. -// nolint:gosec // G404: Use of weak random number generator -func (a *addrBook) PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - bookSize := a.size() - if bookSize <= 0 { - if bookSize < 0 { - panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld)) - } - return nil - } - if biasTowardsNewAddrs > 100 { - biasTowardsNewAddrs = 100 - } - if biasTowardsNewAddrs < 0 { - biasTowardsNewAddrs = 0 - } - - // Bias between new and old addresses. - oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(biasTowardsNewAddrs)) - newCorrelation := math.Sqrt(float64(a.nNew)) * float64(biasTowardsNewAddrs) - - // pick a random peer from a random bucket - var bucket map[string]*knownAddress - pickFromOldBucket := (newCorrelation+oldCorrelation)*mrand.Float64() < oldCorrelation - if (pickFromOldBucket && a.nOld == 0) || - (!pickFromOldBucket && a.nNew == 0) { - return nil - } - // loop until we pick a random non-empty bucket - for len(bucket) == 0 { - if pickFromOldBucket { - bucket = a.bucketsOld[mrand.Intn(len(a.bucketsOld))] - } else { - bucket = a.bucketsNew[mrand.Intn(len(a.bucketsNew))] - } - } - // pick a random index and loop over the map to return that index - randIndex := mrand.Intn(len(bucket)) - for _, ka := range bucket { - if randIndex == 0 { - return ka.Addr - } - randIndex-- - } - return nil -} - -// MarkGood implements AddrBook - it marks the peer as good and -// moves it into an "old" bucket. -func (a *addrBook) MarkGood(id types.NodeID) { - a.mtx.Lock() - defer a.mtx.Unlock() - - ka := a.addrLookup[id] - if ka == nil { - return - } - ka.markGood() - if ka.isNew() { - if err := a.moveToOld(ka); err != nil { - a.Logger.Error("Error moving address to old", "err", err) - } - } -} - -// MarkAttempt implements AddrBook - it marks that an attempt was made to connect to the address. -func (a *addrBook) MarkAttempt(addr *p2p.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - - ka := a.addrLookup[addr.ID] - if ka == nil { - return - } - ka.markAttempt() -} - -// MarkBad implements AddrBook. Kicks address out from book, places -// the address in the badPeers pool. -func (a *addrBook) MarkBad(addr *p2p.NetAddress, banTime time.Duration) { - a.mtx.Lock() - defer a.mtx.Unlock() - - if a.addBadPeer(addr, banTime) { - a.removeAddress(addr) - } -} - -// ReinstateBadPeers removes bad peers from ban list and places them into a new -// bucket. -func (a *addrBook) ReinstateBadPeers() { - a.mtx.Lock() - defer a.mtx.Unlock() - - for _, ka := range a.badPeers { - if ka.isBanned() { - continue - } - - bucket, err := a.calcNewBucket(ka.Addr, ka.Src) - if err != nil { - a.Logger.Error("Failed to calculate new bucket (bad peer won't be reinstantiated)", - "addr", ka.Addr, "err", err) - continue - } - - if err := a.addToNewBucket(ka, bucket); err != nil { - a.Logger.Error("Error adding peer to new bucket", "err", err) - } - delete(a.badPeers, ka.ID()) - - a.Logger.Info("Reinstated address", "addr", ka.Addr) - } -} - -// GetSelection implements AddrBook. -// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols. -// Must never return a nil address. -func (a *addrBook) GetSelection() []*p2p.NetAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - bookSize := a.size() - if bookSize <= 0 { - if bookSize < 0 { - panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld)) - } - return nil - } - - numAddresses := tmmath.MaxInt( - tmmath.MinInt(minGetSelection, bookSize), - bookSize*getSelectionPercent/100) - numAddresses = tmmath.MinInt(maxGetSelection, numAddresses) - - // XXX: instead of making a list of all addresses, shuffling, and slicing a random chunk, - // could we just select a random numAddresses of indexes? - allAddr := make([]*p2p.NetAddress, bookSize) - i := 0 - for _, ka := range a.addrLookup { - allAddr[i] = ka.Addr - i++ - } - - // Fisher-Yates shuffle the array. We only need to do the first - // `numAddresses' since we are throwing the rest. - for i := 0; i < numAddresses; i++ { - // pick a number between current index and the end - // nolint:gosec // G404: Use of weak random number generator - j := mrand.Intn(len(allAddr)-i) + i - allAddr[i], allAddr[j] = allAddr[j], allAddr[i] - } - - // slice off the limit we are willing to share. - return allAddr[:numAddresses] -} - -func percentageOfNum(p, n int) int { - return int(math.Round((float64(p) / float64(100)) * float64(n))) -} - -// GetSelectionWithBias implements AddrBook. -// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols. -// Must never return a nil address. -// -// Each address is picked randomly from an old or new bucket according to the -// biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to -// that range) and determines how biased we are to pick an address from a new -// bucket. -func (a *addrBook) GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - bookSize := a.size() - if bookSize <= 0 { - if bookSize < 0 { - panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld)) - } - return nil - } - - if biasTowardsNewAddrs > 100 { - biasTowardsNewAddrs = 100 - } - if biasTowardsNewAddrs < 0 { - biasTowardsNewAddrs = 0 - } - - numAddresses := tmmath.MaxInt( - tmmath.MinInt(minGetSelection, bookSize), - bookSize*getSelectionPercent/100) - numAddresses = tmmath.MinInt(maxGetSelection, numAddresses) - - // number of new addresses that, if possible, should be in the beginning of the selection - // if there are no enough old addrs, will choose new addr instead. - numRequiredNewAdd := tmmath.MaxInt(percentageOfNum(biasTowardsNewAddrs, numAddresses), numAddresses-a.nOld) - selection := a.randomPickAddresses(bucketTypeNew, numRequiredNewAdd) - selection = append(selection, a.randomPickAddresses(bucketTypeOld, numAddresses-len(selection))...) - return selection -} - -//------------------------------------------------ - -// Size returns the number of addresses in the book. -func (a *addrBook) Size() int { - a.mtx.Lock() - defer a.mtx.Unlock() - - return a.size() -} - -func (a *addrBook) size() int { - return a.nNew + a.nOld -} - -//---------------------------------------------------------- - -// Save persists the address book to disk. -func (a *addrBook) Save() { - a.saveToFile(a.filePath) // thread safe -} - -func (a *addrBook) saveRoutine() { - defer a.wg.Done() - - saveFileTicker := time.NewTicker(dumpAddressInterval) -out: - for { - select { - case <-saveFileTicker.C: - a.saveToFile(a.filePath) - case <-a.Quit(): - break out - } - } - saveFileTicker.Stop() - a.saveToFile(a.filePath) -} - -//---------------------------------------------------------- - -func (a *addrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress { - switch bucketType { - case bucketTypeNew: - return a.bucketsNew[bucketIdx] - case bucketTypeOld: - return a.bucketsOld[bucketIdx] - default: - panic("Invalid bucket type") - } -} - -// Adds ka to new bucket. Returns false if it couldn't do it cuz buckets full. -// NOTE: currently it always returns true. -func (a *addrBook) addToNewBucket(ka *knownAddress, bucketIdx int) error { - // Consistency check to ensure we don't add an already known address - if ka.isOld() { - return errAddrBookOldAddressNewBucket{ka.Addr, bucketIdx} - } - - addrStr := ka.Addr.String() - bucket := a.getBucket(bucketTypeNew, bucketIdx) - - // Already exists? - if _, ok := bucket[addrStr]; ok { - return nil - } - - // Enforce max addresses. - if len(bucket) > newBucketSize { - a.Logger.Info("new bucket is full, expiring new") - a.expireNew(bucketIdx) - } - - // Add to bucket. - bucket[addrStr] = ka - // increment nNew if the peer doesnt already exist in a bucket - if ka.addBucketRef(bucketIdx) == 1 { - a.nNew++ - } - - // Add it to addrLookup - a.addrLookup[ka.ID()] = ka - return nil -} - -// Adds ka to old bucket. Returns false if it couldn't do it cuz buckets full. -func (a *addrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool { - // Sanity check - if ka.isNew() { - a.Logger.Error(fmt.Sprintf("Cannot add new address to old bucket: %v", ka)) - return false - } - if len(ka.Buckets) != 0 { - a.Logger.Error(fmt.Sprintf("Cannot add already old address to another old bucket: %v", ka)) - return false - } - - addrStr := ka.Addr.String() - bucket := a.getBucket(bucketTypeOld, bucketIdx) - - // Already exists? - if _, ok := bucket[addrStr]; ok { - return true - } - - // Enforce max addresses. - if len(bucket) > oldBucketSize { - return false - } - - // Add to bucket. - bucket[addrStr] = ka - if ka.addBucketRef(bucketIdx) == 1 { - a.nOld++ - } - - // Ensure in addrLookup - a.addrLookup[ka.ID()] = ka - - return true -} - -func (a *addrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx int) { - if ka.BucketType != bucketType { - a.Logger.Error(fmt.Sprintf("Bucket type mismatch: %v", ka)) - return - } - bucket := a.getBucket(bucketType, bucketIdx) - delete(bucket, ka.Addr.String()) - if ka.removeBucketRef(bucketIdx) == 0 { - if bucketType == bucketTypeNew { - a.nNew-- - } else { - a.nOld-- - } - delete(a.addrLookup, ka.ID()) - } -} - -func (a *addrBook) removeFromAllBuckets(ka *knownAddress) { - for _, bucketIdx := range ka.Buckets { - bucket := a.getBucket(ka.BucketType, bucketIdx) - delete(bucket, ka.Addr.String()) - } - ka.Buckets = nil - if ka.BucketType == bucketTypeNew { - a.nNew-- - } else { - a.nOld-- - } - delete(a.addrLookup, ka.ID()) -} - -//---------------------------------------------------------- - -func (a *addrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress { - bucket := a.getBucket(bucketType, bucketIdx) - var oldest *knownAddress - for _, ka := range bucket { - if oldest == nil || ka.LastAttempt.Before(oldest.LastAttempt) { - oldest = ka - } - } - return oldest -} - -// adds the address to a "new" bucket. if its already in one, -// it only adds it probabilistically -func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error { - if addr == nil || src == nil { - return ErrAddrBookNilAddr{addr, src} - } - - if err := addr.Valid(); err != nil { - return ErrAddrBookInvalidAddr{Addr: addr, AddrErr: err} - } - - if _, ok := a.badPeers[addr.ID]; ok { - return ErrAddressBanned{addr} - } - - if _, ok := a.privateIDs[addr.ID]; ok { - return ErrAddrBookPrivate{addr} - } - - if _, ok := a.privateIDs[src.ID]; ok { - return ErrAddrBookPrivateSrc{src} - } - - // TODO: we should track ourAddrs by ID and by IP:PORT and refuse both. - if _, ok := a.ourAddrs[addr.String()]; ok { - return ErrAddrBookSelf{addr} - } - - if a.routabilityStrict && !addr.Routable() { - return ErrAddrBookNonRoutable{addr} - } - - ka := a.addrLookup[addr.ID] - if ka != nil { - // If its already old and the address ID's are the same, ignore it. - // Thereby avoiding issues with a node on the network attempting to change - // the IP of a known node ID. (Which could yield an eclipse attack on the node) - if ka.isOld() && ka.Addr.ID == addr.ID { - return nil - } - // Already in max new buckets. - if len(ka.Buckets) == maxNewBucketsPerAddress { - return nil - } - // The more entries we have, the less likely we are to add more. - factor := int32(2 * len(ka.Buckets)) - // nolint:gosec // G404: Use of weak random number generator - if mrand.Int31n(factor) != 0 { - return nil - } - } else { - ka = newKnownAddress(addr, src) - } - - bucket, err := a.calcNewBucket(addr, src) - if err != nil { - return err - } - return a.addToNewBucket(ka, bucket) -} - -func (a *addrBook) randomPickAddresses(bucketType byte, num int) []*p2p.NetAddress { - var buckets []map[string]*knownAddress - switch bucketType { - case bucketTypeNew: - buckets = a.bucketsNew - case bucketTypeOld: - buckets = a.bucketsOld - default: - panic("unexpected bucketType") - } - total := 0 - for _, bucket := range buckets { - total += len(bucket) - } - addresses := make([]*knownAddress, 0, total) - for _, bucket := range buckets { - for _, ka := range bucket { - addresses = append(addresses, ka) - } - } - selection := make([]*p2p.NetAddress, 0, num) - chosenSet := make(map[string]bool, num) - rand := tmrand.NewRand() - rand.Shuffle(total, func(i, j int) { - addresses[i], addresses[j] = addresses[j], addresses[i] - }) - for _, addr := range addresses { - if chosenSet[addr.Addr.String()] { - continue - } - chosenSet[addr.Addr.String()] = true - selection = append(selection, addr.Addr) - if len(selection) >= num { - return selection - } - } - return selection -} - -// Make space in the new buckets by expiring the really bad entries. -// If no bad entries are available we remove the oldest. -func (a *addrBook) expireNew(bucketIdx int) { - for addrStr, ka := range a.bucketsNew[bucketIdx] { - // If an entry is bad, throw it away - if ka.isBad() { - a.Logger.Info(fmt.Sprintf("expiring bad address %v", addrStr)) - a.removeFromBucket(ka, bucketTypeNew, bucketIdx) - return - } - } - - // If we haven't thrown out a bad entry, throw out the oldest entry - oldest := a.pickOldest(bucketTypeNew, bucketIdx) - a.removeFromBucket(oldest, bucketTypeNew, bucketIdx) -} - -// Promotes an address from new to old. If the destination bucket is full, -// demote the oldest one to a "new" bucket. -// TODO: Demote more probabilistically? -func (a *addrBook) moveToOld(ka *knownAddress) error { - // Sanity check - if ka.isOld() { - a.Logger.Error(fmt.Sprintf("Cannot promote address that is already old %v", ka)) - return nil - } - if len(ka.Buckets) == 0 { - a.Logger.Error(fmt.Sprintf("Cannot promote address that isn't in any new buckets %v", ka)) - return nil - } - - // Remove from all (new) buckets. - a.removeFromAllBuckets(ka) - // It's officially old now. - ka.BucketType = bucketTypeOld - - // Try to add it to its oldBucket destination. - oldBucketIdx, err := a.calcOldBucket(ka.Addr) - if err != nil { - return err - } - added := a.addToOldBucket(ka, oldBucketIdx) - if !added { - // No room; move the oldest to a new bucket - oldest := a.pickOldest(bucketTypeOld, oldBucketIdx) - a.removeFromBucket(oldest, bucketTypeOld, oldBucketIdx) - newBucketIdx, err := a.calcNewBucket(oldest.Addr, oldest.Src) - if err != nil { - return err - } - if err := a.addToNewBucket(oldest, newBucketIdx); err != nil { - a.Logger.Error("Error adding peer to old bucket", "err", err) - } - - // Finally, add our ka to old bucket again. - added = a.addToOldBucket(ka, oldBucketIdx) - if !added { - a.Logger.Error(fmt.Sprintf("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx)) - } - } - return nil -} - -func (a *addrBook) removeAddress(addr *p2p.NetAddress) { - ka := a.addrLookup[addr.ID] - if ka == nil { - return - } - a.Logger.Info("Remove address from book", "addr", addr) - a.removeFromAllBuckets(ka) -} - -func (a *addrBook) addBadPeer(addr *p2p.NetAddress, banTime time.Duration) bool { - // check it exists in addrbook - ka := a.addrLookup[addr.ID] - // check address is not already there - if ka == nil { - return false - } - - if _, alreadyBadPeer := a.badPeers[addr.ID]; !alreadyBadPeer { - // add to bad peer list - ka.ban(banTime) - a.badPeers[addr.ID] = ka - a.Logger.Info("Add address to blacklist", "addr", addr) - } - return true -} - -//--------------------------------------------------------------------- -// calculate bucket placements - -// hash(key + sourcegroup + int64(hash(key + group + sourcegroup)) % bucket_per_group) % num_new_buckets -func (a *addrBook) calcNewBucket(addr, src *p2p.NetAddress) (int, error) { - data1 := []byte{} - data1 = append(data1, []byte(a.key)...) - data1 = append(data1, []byte(a.groupKey(addr))...) - data1 = append(data1, []byte(a.groupKey(src))...) - hash1, err := a.hash(data1) - if err != nil { - return 0, err - } - hash64 := binary.BigEndian.Uint64(hash1) - hash64 %= newBucketsPerGroup - var hashbuf [8]byte - binary.BigEndian.PutUint64(hashbuf[:], hash64) - data2 := []byte{} - data2 = append(data2, []byte(a.key)...) - data2 = append(data2, a.groupKey(src)...) - data2 = append(data2, hashbuf[:]...) - - hash2, err := a.hash(data2) - if err != nil { - return 0, err - } - result := int(binary.BigEndian.Uint64(hash2) % newBucketCount) - return result, nil -} - -// hash(key + group + int64(hash(key + addr)) % buckets_per_group) % num_old_buckets -func (a *addrBook) calcOldBucket(addr *p2p.NetAddress) (int, error) { - data1 := []byte{} - data1 = append(data1, []byte(a.key)...) - data1 = append(data1, []byte(addr.String())...) - hash1, err := a.hash(data1) - if err != nil { - return 0, err - } - hash64 := binary.BigEndian.Uint64(hash1) - hash64 %= oldBucketsPerGroup - var hashbuf [8]byte - binary.BigEndian.PutUint64(hashbuf[:], hash64) - data2 := []byte{} - data2 = append(data2, []byte(a.key)...) - data2 = append(data2, a.groupKey(addr)...) - data2 = append(data2, hashbuf[:]...) - - hash2, err := a.hash(data2) - if err != nil { - return 0, err - } - result := int(binary.BigEndian.Uint64(hash2) % oldBucketCount) - return result, nil -} - -// Return a string representing the network group of this address. -// This is the /16 for IPv4 (e.g. 1.2.0.0), the /32 (/36 for he.net) for IPv6, the string -// "local" for a local address and the string "unroutable" for an unroutable -// address. -func (a *addrBook) groupKey(na *p2p.NetAddress) string { - return groupKeyFor(na, a.routabilityStrict) -} - -func groupKeyFor(na *p2p.NetAddress, routabilityStrict bool) string { - if routabilityStrict && na.Local() { - return "local" - } - if routabilityStrict && !na.Routable() { - return "unroutable" - } - - if ipv4 := na.IP.To4(); ipv4 != nil { - return na.IP.Mask(net.CIDRMask(16, 32)).String() - } - - if na.RFC6145() || na.RFC6052() { - // last four bytes are the ip address - ip := na.IP[12:16] - return ip.Mask(net.CIDRMask(16, 32)).String() - } - - if na.RFC3964() { - ip := na.IP[2:6] - return ip.Mask(net.CIDRMask(16, 32)).String() - } - - if na.RFC4380() { - // teredo tunnels have the last 4 bytes as the v4 address XOR - // 0xff. - ip := net.IP(make([]byte, 4)) - for i, byte := range na.IP[12:16] { - ip[i] = byte ^ 0xff - } - return ip.Mask(net.CIDRMask(16, 32)).String() - } - - if na.OnionCatTor() { - // group is keyed off the first 4 bits of the actual onion key. - return fmt.Sprintf("tor:%d", na.IP[6]&((1<<4)-1)) - } - - // OK, so now we know ourselves to be a IPv6 address. - // bitcoind uses /32 for everything, except for Hurricane Electric's - // (he.net) IP range, which it uses /36 for. - bits := 32 - heNet := &net.IPNet{IP: net.ParseIP("2001:470::"), Mask: net.CIDRMask(32, 128)} - if heNet.Contains(na.IP) { - bits = 36 - } - ipv6Mask := net.CIDRMask(bits, 128) - return na.IP.Mask(ipv6Mask).String() -} - -func (a *addrBook) hash(b []byte) ([]byte, error) { - a.hasher.Reset() - a.hasher.Write(b) - return a.hasher.Sum(nil), nil -} diff --git a/internal/p2p/pex/addrbook_test.go b/internal/p2p/pex/addrbook_test.go deleted file mode 100644 index 3d21314ad..000000000 --- a/internal/p2p/pex/addrbook_test.go +++ /dev/null @@ -1,777 +0,0 @@ -package pex - -import ( - "encoding/hex" - "fmt" - "io/ioutil" - "math" - mrand "math/rand" - "net" - "os" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/libs/log" - tmmath "github.com/tendermint/tendermint/libs/math" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/types" -) - -// FIXME These tests should not rely on .(*addrBook) assertions - -func TestAddrBookPickAddress(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - - // 0 addresses - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - assert.Zero(t, book.Size()) - - addr := book.PickAddress(50) - assert.Nil(t, addr, "expected no address") - - randAddrs := randNetAddressPairs(t, 1) - addrSrc := randAddrs[0] - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - - // pick an address when we only have new address - addr = book.PickAddress(0) - assert.NotNil(t, addr, "expected an address") - addr = book.PickAddress(50) - assert.NotNil(t, addr, "expected an address") - addr = book.PickAddress(100) - assert.NotNil(t, addr, "expected an address") - - // pick an address when we only have old address - book.MarkGood(addrSrc.addr.ID) - addr = book.PickAddress(0) - assert.NotNil(t, addr, "expected an address") - addr = book.PickAddress(50) - assert.NotNil(t, addr, "expected an address") - - // in this case, nNew==0 but we biased 100% to new, so we return nil - addr = book.PickAddress(100) - assert.Nil(t, addr, "did not expected an address") -} - -func TestAddrBookSaveLoad(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - - // 0 addresses - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - book.Save() - - book = NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - err := book.Start() - require.NoError(t, err) - - assert.True(t, book.Empty()) - - // 100 addresses - randAddrs := randNetAddressPairs(t, 100) - - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - } - - assert.Equal(t, 100, book.Size()) - book.Save() - - book = NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - err = book.Start() - require.NoError(t, err) - - assert.Equal(t, 100, book.Size()) -} - -func TestAddrBookLookup(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - randAddrs := randNetAddressPairs(t, 100) - - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - for _, addrSrc := range randAddrs { - addr := addrSrc.addr - src := addrSrc.src - err := book.AddAddress(addr, src) - require.NoError(t, err) - - ka := book.HasAddress(addr) - assert.True(t, ka, "Expected to find KnownAddress %v but wasn't there.", addr) - } -} - -func TestAddrBookPromoteToOld(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - randAddrs := randNetAddressPairs(t, 100) - - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - } - - // Attempt all addresses. - for _, addrSrc := range randAddrs { - book.MarkAttempt(addrSrc.addr) - } - - // Promote half of them - for i, addrSrc := range randAddrs { - if i%2 == 0 { - book.MarkGood(addrSrc.addr.ID) - } - } - - // TODO: do more testing :) - - selection := book.GetSelection() - t.Logf("selection: %v", selection) - - if len(selection) > book.Size() { - t.Errorf("selection could not be bigger than the book") - } - - selection = book.GetSelectionWithBias(30) - t.Logf("selection: %v", selection) - - if len(selection) > book.Size() { - t.Errorf("selection with bias could not be bigger than the book") - } - - assert.Equal(t, book.Size(), 100, "expecting book size to be 100") -} - -func TestAddrBookHandlesDuplicates(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - - book.SetLogger(log.TestingLogger()) - - randAddrs := randNetAddressPairs(t, 100) - - differentSrc := randIPv4Address(t) - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - err = book.AddAddress(addrSrc.addr, addrSrc.src) // duplicate - require.NoError(t, err) - err = book.AddAddress(addrSrc.addr, differentSrc) // different src - require.NoError(t, err) - } - - assert.Equal(t, 100, book.Size()) -} - -type netAddressPair struct { - addr *p2p.NetAddress - src *p2p.NetAddress -} - -func randNetAddressPairs(t *testing.T, n int) []netAddressPair { - randAddrs := make([]netAddressPair, n) - for i := 0; i < n; i++ { - randAddrs[i] = netAddressPair{addr: randIPv4Address(t), src: randIPv4Address(t)} - } - return randAddrs -} - -func randIPv4Address(t *testing.T) *p2p.NetAddress { - for { - ip := fmt.Sprintf("%v.%v.%v.%v", - mrand.Intn(254)+1, - mrand.Intn(255), - mrand.Intn(255), - mrand.Intn(255), - ) - port := mrand.Intn(65535-1) + 1 - id := types.NodeID(hex.EncodeToString(tmrand.Bytes(types.NodeIDByteLength))) - idAddr := id.AddressString(fmt.Sprintf("%v:%v", ip, port)) - addr, err := types.NewNetAddressString(idAddr) - assert.Nil(t, err, "error generating rand network address") - if addr.Routable() { - return addr - } - } -} - -func TestAddrBookRemoveAddress(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - addr := randIPv4Address(t) - err := book.AddAddress(addr, addr) - require.NoError(t, err) - assert.Equal(t, 1, book.Size()) - - book.RemoveAddress(addr) - assert.Equal(t, 0, book.Size()) - - nonExistingAddr := randIPv4Address(t) - book.RemoveAddress(nonExistingAddr) - assert.Equal(t, 0, book.Size()) -} - -func TestAddrBookGetSelectionWithOneMarkedGood(t *testing.T) { - // create a book with 10 addresses, 1 good/old and 9 new - book, _ := createAddrBookWithMOldAndNNewAddrs(t, 1, 9) - addrs := book.GetSelectionWithBias(biasToSelectNewPeers) - assert.NotNil(t, addrs) - assertMOldAndNNewAddrsInSelection(t, 1, 9, addrs, book) -} - -func TestAddrBookGetSelectionWithOneNotMarkedGood(t *testing.T) { - // create a book with 10 addresses, 9 good/old and 1 new - book, _ := createAddrBookWithMOldAndNNewAddrs(t, 9, 1) - addrs := book.GetSelectionWithBias(biasToSelectNewPeers) - assert.NotNil(t, addrs) - assertMOldAndNNewAddrsInSelection(t, 9, 1, addrs, book) -} - -func TestAddrBookGetSelectionReturnsNilWhenAddrBookIsEmpty(t *testing.T) { - book, _ := createAddrBookWithMOldAndNNewAddrs(t, 0, 0) - addrs := book.GetSelectionWithBias(biasToSelectNewPeers) - assert.Nil(t, addrs) -} - -func TestAddrBookGetSelection(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - // 1) empty book - assert.Empty(t, book.GetSelection()) - - // 2) add one address - addr := randIPv4Address(t) - err := book.AddAddress(addr, addr) - require.NoError(t, err) - - assert.Equal(t, 1, len(book.GetSelection())) - assert.Equal(t, addr, book.GetSelection()[0]) - - // 3) add a bunch of addresses - randAddrs := randNetAddressPairs(t, 100) - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - } - - // check there is no duplicates - addrs := make(map[string]*p2p.NetAddress) - selection := book.GetSelection() - for _, addr := range selection { - if dup, ok := addrs[addr.String()]; ok { - t.Fatalf("selection %v contains duplicates %v", selection, dup) - } - addrs[addr.String()] = addr - } - - if len(selection) > book.Size() { - t.Errorf("selection %v could not be bigger than the book", selection) - } -} - -func TestAddrBookGetSelectionWithBias(t *testing.T) { - const biasTowardsNewAddrs = 30 - - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - // 1) empty book - selection := book.GetSelectionWithBias(biasTowardsNewAddrs) - assert.Empty(t, selection) - - // 2) add one address - addr := randIPv4Address(t) - err := book.AddAddress(addr, addr) - require.NoError(t, err) - - selection = book.GetSelectionWithBias(biasTowardsNewAddrs) - assert.Equal(t, 1, len(selection)) - assert.Equal(t, addr, selection[0]) - - // 3) add a bunch of addresses - randAddrs := randNetAddressPairs(t, 100) - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - } - - // check there is no duplicates - addrs := make(map[string]*p2p.NetAddress) - selection = book.GetSelectionWithBias(biasTowardsNewAddrs) - for _, addr := range selection { - if dup, ok := addrs[addr.String()]; ok { - t.Fatalf("selection %v contains duplicates %v", selection, dup) - } - addrs[addr.String()] = addr - } - - if len(selection) > book.Size() { - t.Fatalf("selection %v could not be bigger than the book", selection) - } - - // 4) mark 80% of the addresses as good - randAddrsLen := len(randAddrs) - for i, addrSrc := range randAddrs { - if int((float64(i)/float64(randAddrsLen))*100) >= 20 { - book.MarkGood(addrSrc.addr.ID) - } - } - - selection = book.GetSelectionWithBias(biasTowardsNewAddrs) - - // check that ~70% of addresses returned are good - good := 0 - for _, addr := range selection { - if book.IsGood(addr) { - good++ - } - } - - got, expected := int((float64(good)/float64(len(selection)))*100), 100-biasTowardsNewAddrs - - // compute some slack to protect against small differences due to rounding: - slack := int(math.Round(float64(100) / float64(len(selection)))) - if got > expected+slack { - t.Fatalf( - "got more good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)", - got, - expected, - good, - len(selection), - ) - } - if got < expected-slack { - t.Fatalf( - "got fewer good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)", - got, - expected, - good, - len(selection), - ) - } -} - -func TestAddrBookHasAddress(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - addr := randIPv4Address(t) - err := book.AddAddress(addr, addr) - require.NoError(t, err) - - assert.True(t, book.HasAddress(addr)) - - book.RemoveAddress(addr) - - assert.False(t, book.HasAddress(addr)) -} - -func testCreatePrivateAddrs(t *testing.T, numAddrs int) ([]*p2p.NetAddress, []string) { - t.Helper() - addrs := make([]*p2p.NetAddress, numAddrs) - for i := 0; i < numAddrs; i++ { - addrs[i] = randIPv4Address(t) - } - - private := make([]string, numAddrs) - for i, addr := range addrs { - private[i] = string(addr.ID) - } - return addrs, private -} - -func TestBanBadPeers(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - addr := randIPv4Address(t) - _ = book.AddAddress(addr, addr) - - book.MarkBad(addr, 1*time.Second) - // addr should not reachable - assert.False(t, book.HasAddress(addr)) - assert.True(t, book.IsBanned(addr)) - - err := book.AddAddress(addr, addr) - // book should not add address from the blacklist - assert.Error(t, err) - - time.Sleep(1 * time.Second) - book.ReinstateBadPeers() - // address should be reinstated in the new bucket - assert.EqualValues(t, 1, book.Size()) - assert.True(t, book.HasAddress(addr)) - assert.False(t, book.IsGood(addr)) -} - -func TestAddrBookEmpty(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - // Check that empty book is empty - require.True(t, book.Empty()) - // Check that book with our address is empty - book.AddOurAddress(randIPv4Address(t)) - require.True(t, book.Empty()) - // Check that book with private addrs is empty - _, privateIds := testCreatePrivateAddrs(t, 5) - book.AddPrivateIDs(privateIds) - require.True(t, book.Empty()) - - // Check that book with address is not empty - err := book.AddAddress(randIPv4Address(t), randIPv4Address(t)) - require.NoError(t, err) - require.False(t, book.Empty()) -} - -func TestPrivatePeers(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - addrs, private := testCreatePrivateAddrs(t, 10) - book.AddPrivateIDs(private) - - // private addrs must not be added - for _, addr := range addrs { - err := book.AddAddress(addr, addr) - if assert.Error(t, err) { - _, ok := err.(ErrAddrBookPrivate) - assert.True(t, ok) - } - } - - // addrs coming from private peers must not be added - err := book.AddAddress(randIPv4Address(t), addrs[0]) - if assert.Error(t, err) { - _, ok := err.(ErrAddrBookPrivateSrc) - assert.True(t, ok) - } -} - -func testAddrBookAddressSelection(t *testing.T, bookSize int) { - // generate all combinations of old (m) and new addresses - for nBookOld := 0; nBookOld <= bookSize; nBookOld++ { - nBookNew := bookSize - nBookOld - dbgStr := fmt.Sprintf("book of size %d (new %d, old %d)", bookSize, nBookNew, nBookOld) - - // create book and get selection - book, _ := createAddrBookWithMOldAndNNewAddrs(t, nBookOld, nBookNew) - addrs := book.GetSelectionWithBias(biasToSelectNewPeers) - assert.NotNil(t, addrs, "%s - expected a non-nil selection", dbgStr) - nAddrs := len(addrs) - assert.NotZero(t, nAddrs, "%s - expected at least one address in selection", dbgStr) - - // check there's no nil addresses - for _, addr := range addrs { - if addr == nil { - t.Fatalf("%s - got nil address in selection %v", dbgStr, addrs) - } - } - - // XXX: shadowing - nOld, nNew := countOldAndNewAddrsInSelection(addrs, book) - - // Given: - // n - num new addrs, m - num old addrs - // k - num new addrs expected in the beginning (based on bias %) - // i=min(n, max(k,r-m)), aka expNew - // j=min(m, r-i), aka expOld - // - // We expect this layout: - // indices: 0...i-1 i...i+j-1 - // addresses: N0..Ni-1 O0..Oj-1 - // - // There is at least one partition and at most three. - var ( - k = percentageOfNum(biasToSelectNewPeers, nAddrs) - expNew = tmmath.MinInt(nNew, tmmath.MaxInt(k, nAddrs-nBookOld)) - expOld = tmmath.MinInt(nOld, nAddrs-expNew) - ) - - // Verify that the number of old and new addresses are as expected - if nNew != expNew { - t.Fatalf("%s - expected new addrs %d, got %d", dbgStr, expNew, nNew) - } - if nOld != expOld { - t.Fatalf("%s - expected old addrs %d, got %d", dbgStr, expOld, nOld) - } - - // Verify that the order of addresses is as expected - // Get the sequence types and lengths of the selection - seqLens, seqTypes, err := analyseSelectionLayout(book, addrs) - assert.NoError(t, err, "%s", dbgStr) - - // Build a list with the expected lengths of partitions and another with the expected types, e.g.: - // expSeqLens = [10, 22], expSeqTypes = [1, 2] - // means we expect 10 new (type 1) addresses followed by 22 old (type 2) addresses. - var expSeqLens []int - var expSeqTypes []int - - switch { - case expOld == 0: // all new addresses - expSeqLens = []int{nAddrs} - expSeqTypes = []int{1} - case expNew == 0: // all old addresses - expSeqLens = []int{nAddrs} - expSeqTypes = []int{2} - case nAddrs-expNew-expOld == 0: // new addresses, old addresses - expSeqLens = []int{expNew, expOld} - expSeqTypes = []int{1, 2} - } - - assert.Equal(t, expSeqLens, seqLens, - "%s - expected sequence lengths of old/new %v, got %v", - dbgStr, expSeqLens, seqLens) - assert.Equal(t, expSeqTypes, seqTypes, - "%s - expected sequence types (1-new, 2-old) was %v, got %v", - dbgStr, expSeqTypes, seqTypes) - } -} - -func TestMultipleAddrBookAddressSelection(t *testing.T) { - // test books with smaller size, < N - const N = 32 - for bookSize := 1; bookSize < N; bookSize++ { - testAddrBookAddressSelection(t, bookSize) - } - - // Test for two books with sizes from following ranges - ranges := [...][]int{{33, 100}, {100, 175}} - bookSizes := make([]int, 0, len(ranges)) - for _, r := range ranges { - bookSizes = append(bookSizes, mrand.Intn(r[1]-r[0])+r[0]) - } - t.Logf("Testing address selection for the following book sizes %v\n", bookSizes) - for _, bookSize := range bookSizes { - testAddrBookAddressSelection(t, bookSize) - } -} - -func TestAddrBookAddDoesNotOverwriteOldIP(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - - // This test creates adds a peer to the address book and marks it good - // It then attempts to override the peer's IP, by adding a peer with the same ID - // but different IP. We distinguish the IP's by "RealIP" and "OverrideAttemptIP" - peerID := "678503e6c8f50db7279c7da3cb9b072aac4bc0d5" - peerRealIP := "1.1.1.1:26656" - peerOverrideAttemptIP := "2.2.2.2:26656" - SrcAddr := "b0dd378c3fbc4c156cd6d302a799f0d2e4227201@159.89.121.174:26656" - - // There is a chance that AddAddress will ignore the new peer its given. - // So we repeat trying to override the peer several times, - // to ensure we aren't in a case that got probabilistically ignored - numOverrideAttempts := 10 - - peerRealAddr, err := types.NewNetAddressString(peerID + "@" + peerRealIP) - require.Nil(t, err) - - peerOverrideAttemptAddr, err := types.NewNetAddressString(peerID + "@" + peerOverrideAttemptIP) - require.Nil(t, err) - - src, err := types.NewNetAddressString(SrcAddr) - require.Nil(t, err) - - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - err = book.AddAddress(peerRealAddr, src) - require.Nil(t, err) - book.MarkAttempt(peerRealAddr) - book.MarkGood(peerRealAddr.ID) - - // Double check that adding a peer again doesn't error - err = book.AddAddress(peerRealAddr, src) - require.Nil(t, err) - - // Try changing ip but keeping the same node id. (change 1.1.1.1 to 2.2.2.2) - // This should just be ignored, and not error. - for i := 0; i < numOverrideAttempts; i++ { - err = book.AddAddress(peerOverrideAttemptAddr, src) - require.Nil(t, err) - } - // Now check that the IP was not overridden. - // This is done by sampling several peers from addr book - // and ensuring they all have the correct IP. - // In the expected functionality, this test should only have 1 Peer, hence will pass. - for i := 0; i < numOverrideAttempts; i++ { - selection := book.GetSelection() - for _, addr := range selection { - require.Equal(t, addr.IP, peerRealAddr.IP) - } - } -} - -func TestAddrBookGroupKey(t *testing.T) { - // non-strict routability - testCases := []struct { - name string - ip string - expKey string - }{ - // IPv4 normal. - {"ipv4 normal class a", "12.1.2.3", "12.1.0.0"}, - {"ipv4 normal class b", "173.1.2.3", "173.1.0.0"}, - {"ipv4 normal class c", "196.1.2.3", "196.1.0.0"}, - - // IPv6/IPv4 translations. - {"ipv6 rfc3964 with ipv4 encap", "2002:0c01:0203::", "12.1.0.0"}, - {"ipv6 rfc4380 toredo ipv4", "2001:0:1234::f3fe:fdfc", "12.1.0.0"}, - {"ipv6 rfc6052 well-known prefix with ipv4", "64:ff9b::0c01:0203", "12.1.0.0"}, - {"ipv6 rfc6145 translated ipv4", "::ffff:0:0c01:0203", "12.1.0.0"}, - - // Tor. - {"ipv6 tor onioncat", "fd87:d87e:eb43:1234::5678", "tor:2"}, - {"ipv6 tor onioncat 2", "fd87:d87e:eb43:1245::6789", "tor:2"}, - {"ipv6 tor onioncat 3", "fd87:d87e:eb43:1345::6789", "tor:3"}, - - // IPv6 normal. - {"ipv6 normal", "2602:100::1", "2602:100::"}, - {"ipv6 normal 2", "2602:0100::1234", "2602:100::"}, - {"ipv6 hurricane electric", "2001:470:1f10:a1::2", "2001:470:1000::"}, - {"ipv6 hurricane electric 2", "2001:0470:1f10:a1::2", "2001:470:1000::"}, - } - - for i, tc := range testCases { - nip := net.ParseIP(tc.ip) - key := groupKeyFor(types.NewNetAddressIPPort(nip, 26656), false) - assert.Equal(t, tc.expKey, key, "#%d", i) - } - - // strict routability - testCases = []struct { - name string - ip string - expKey string - }{ - // Local addresses. - {"ipv4 localhost", "127.0.0.1", "local"}, - {"ipv6 localhost", "::1", "local"}, - {"ipv4 zero", "0.0.0.0", "local"}, - {"ipv4 first octet zero", "0.1.2.3", "local"}, - - // Unroutable addresses. - {"ipv4 invalid bcast", "255.255.255.255", "unroutable"}, - {"ipv4 rfc1918 10/8", "10.1.2.3", "unroutable"}, - {"ipv4 rfc1918 172.16/12", "172.16.1.2", "unroutable"}, - {"ipv4 rfc1918 192.168/16", "192.168.1.2", "unroutable"}, - {"ipv6 rfc3849 2001:db8::/32", "2001:db8::1234", "unroutable"}, - {"ipv4 rfc3927 169.254/16", "169.254.1.2", "unroutable"}, - {"ipv6 rfc4193 fc00::/7", "fc00::1234", "unroutable"}, - {"ipv6 rfc4843 2001:10::/28", "2001:10::1234", "unroutable"}, - {"ipv6 rfc4862 fe80::/64", "fe80::1234", "unroutable"}, - } - - for i, tc := range testCases { - nip := net.ParseIP(tc.ip) - key := groupKeyFor(types.NewNetAddressIPPort(nip, 26656), true) - assert.Equal(t, tc.expKey, key, "#%d", i) - } -} - -func assertMOldAndNNewAddrsInSelection(t *testing.T, m, n int, addrs []*p2p.NetAddress, book *addrBook) { - nOld, nNew := countOldAndNewAddrsInSelection(addrs, book) - assert.Equal(t, m, nOld, "old addresses") - assert.Equal(t, n, nNew, "new addresses") -} - -func createTempFileName(t *testing.T, prefix string) string { - t.Helper() - f, err := ioutil.TempFile("", prefix) - if err != nil { - panic(err) - } - - fname := f.Name() - if err := f.Close(); err != nil { - t.Fatal(err) - } - - t.Cleanup(func() { _ = os.Remove(fname) }) - - return fname -} - -func createAddrBookWithMOldAndNNewAddrs(t *testing.T, nOld, nNew int) (book *addrBook, fname string) { - t.Helper() - fname = createTempFileName(t, "addrbook_test") - - book = NewAddrBook(fname, true).(*addrBook) - book.SetLogger(log.TestingLogger()) - assert.Zero(t, book.Size()) - - randAddrs := randNetAddressPairs(t, nOld) - for _, addr := range randAddrs { - err := book.AddAddress(addr.addr, addr.src) - require.NoError(t, err) - book.MarkGood(addr.addr.ID) - } - - randAddrs = randNetAddressPairs(t, nNew) - for _, addr := range randAddrs { - err := book.AddAddress(addr.addr, addr.src) - require.NoError(t, err) - } - - return -} - -func countOldAndNewAddrsInSelection(addrs []*p2p.NetAddress, book *addrBook) (nOld, nNew int) { - for _, addr := range addrs { - if book.IsGood(addr) { - nOld++ - } else { - nNew++ - } - } - return -} - -// Analyze the layout of the selection specified by 'addrs' -// Returns: -// - seqLens - the lengths of the sequences of addresses of same type -// - seqTypes - the types of sequences in selection -func analyseSelectionLayout(book *addrBook, addrs []*p2p.NetAddress) (seqLens, seqTypes []int, err error) { - // address types are: 0 - nil, 1 - new, 2 - old - var ( - prevType = 0 - currentSeqLen = 0 - ) - - for _, addr := range addrs { - addrType := 0 - if book.IsGood(addr) { - addrType = 2 - } else { - addrType = 1 - } - if addrType != prevType && prevType != 0 { - seqLens = append(seqLens, currentSeqLen) - seqTypes = append(seqTypes, prevType) - currentSeqLen = 0 - } - currentSeqLen++ - prevType = addrType - } - - seqLens = append(seqLens, currentSeqLen) - seqTypes = append(seqTypes, prevType) - - return -} diff --git a/internal/p2p/pex/bench_test.go b/internal/p2p/pex/bench_test.go deleted file mode 100644 index 37019f60a..000000000 --- a/internal/p2p/pex/bench_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package pex - -import ( - "testing" - - "github.com/tendermint/tendermint/types" -) - -func BenchmarkAddrBook_hash(b *testing.B) { - book := &addrBook{ - ourAddrs: make(map[string]struct{}), - privateIDs: make(map[types.NodeID]struct{}), - addrLookup: make(map[types.NodeID]*knownAddress), - badPeers: make(map[types.NodeID]*knownAddress), - filePath: "", - routabilityStrict: true, - } - book.init() - msg := []byte(`foobar`) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = book.hash(msg) - } -} diff --git a/internal/p2p/pex/doc.go b/internal/p2p/pex/doc.go index dc4f5d37a..70a5f6174 100644 --- a/internal/p2p/pex/doc.go +++ b/internal/p2p/pex/doc.go @@ -7,19 +7,14 @@ The PEX reactor is a continuous service which periodically requests addresses and serves addresses to other peers. There are two versions of this service aligning with the two p2p frameworks that Tendermint currently supports. -V1 is coupled with the Switch (which handles peer connections and routing of -messages) and, alongside exchanging peer information in the form of port/IP -pairs, also has the responsibility of dialing peers and ensuring that a -node has a sufficient amount of peers connected. - -V2 is embedded with the new p2p stack and uses the peer manager to advertise +The reactor is embedded with the new p2p stack and uses the peer manager to advertise peers as well as add new peers to the peer store. The V2 reactor passes a different set of proto messages which include a list of [urls](https://golang.org/pkg/net/url/#URL).These can be used to save a set of endpoints that each peer uses. The V2 reactor has backwards compatibility with V1. It can also handle V1 messages. -The V2 reactor is able to tweak the intensity of it's search by decreasing or +The reactor is able to tweak the intensity of it's search by decreasing or increasing the interval between each request. It tracks connected peers via a linked list, sending a request to the node at the front of the list and adding it to the back of the list once a response is received. Using this method, a diff --git a/internal/p2p/pex/errors.go b/internal/p2p/pex/errors.go deleted file mode 100644 index 275e71bf9..000000000 --- a/internal/p2p/pex/errors.go +++ /dev/null @@ -1,89 +0,0 @@ -package pex - -import ( - "errors" - "fmt" - - "github.com/tendermint/tendermint/internal/p2p" -) - -type ErrAddrBookNonRoutable struct { - Addr *p2p.NetAddress -} - -func (err ErrAddrBookNonRoutable) Error() string { - return fmt.Sprintf("Cannot add non-routable address %v", err.Addr) -} - -type errAddrBookOldAddressNewBucket struct { - Addr *p2p.NetAddress - BucketID int -} - -func (err errAddrBookOldAddressNewBucket) Error() string { - return fmt.Sprintf("failed consistency check!"+ - " Cannot add pre-existing address %v into new bucket %v", - err.Addr, err.BucketID) -} - -type ErrAddrBookSelf struct { - Addr *p2p.NetAddress -} - -func (err ErrAddrBookSelf) Error() string { - return fmt.Sprintf("Cannot add ourselves with address %v", err.Addr) -} - -type ErrAddrBookPrivate struct { - Addr *p2p.NetAddress -} - -func (err ErrAddrBookPrivate) Error() string { - return fmt.Sprintf("Cannot add private peer with address %v", err.Addr) -} - -func (err ErrAddrBookPrivate) PrivateAddr() bool { - return true -} - -type ErrAddrBookPrivateSrc struct { - Src *p2p.NetAddress -} - -func (err ErrAddrBookPrivateSrc) Error() string { - return fmt.Sprintf("Cannot add peer coming from private peer with address %v", err.Src) -} - -func (err ErrAddrBookPrivateSrc) PrivateAddr() bool { - return true -} - -type ErrAddrBookNilAddr struct { - Addr *p2p.NetAddress - Src *p2p.NetAddress -} - -func (err ErrAddrBookNilAddr) Error() string { - return fmt.Sprintf("Cannot add a nil address. Got (addr, src) = (%v, %v)", err.Addr, err.Src) -} - -type ErrAddrBookInvalidAddr struct { - Addr *p2p.NetAddress - AddrErr error -} - -func (err ErrAddrBookInvalidAddr) Error() string { - return fmt.Sprintf("Cannot add invalid address %v: %v", err.Addr, err.AddrErr) -} - -// ErrAddressBanned is thrown when the address has been banned and therefore cannot be used -type ErrAddressBanned struct { - Addr *p2p.NetAddress -} - -func (err ErrAddressBanned) Error() string { - return fmt.Sprintf("Address: %v is currently banned", err.Addr) -} - -// ErrUnsolicitedList is thrown when a peer provides a list of addresses that have not been asked for. -var ErrUnsolicitedList = errors.New("unsolicited pexAddrsMessage") diff --git a/internal/p2p/pex/file.go b/internal/p2p/pex/file.go deleted file mode 100644 index ce65f7d4d..000000000 --- a/internal/p2p/pex/file.go +++ /dev/null @@ -1,83 +0,0 @@ -package pex - -import ( - "encoding/json" - "fmt" - "os" - - "github.com/tendermint/tendermint/internal/libs/tempfile" -) - -/* Loading & Saving */ - -type addrBookJSON struct { - Key string `json:"key"` - Addrs []*knownAddress `json:"addrs"` -} - -func (a *addrBook) saveToFile(filePath string) { - a.mtx.Lock() - defer a.mtx.Unlock() - - a.Logger.Info("Saving AddrBook to file", "size", a.size()) - - addrs := make([]*knownAddress, 0, len(a.addrLookup)) - for _, ka := range a.addrLookup { - addrs = append(addrs, ka) - } - aJSON := &addrBookJSON{ - Key: a.key, - Addrs: addrs, - } - - jsonBytes, err := json.MarshalIndent(aJSON, "", "\t") - if err != nil { - a.Logger.Error("Failed to save AddrBook to file", "err", err) - return - } - err = tempfile.WriteFileAtomic(filePath, jsonBytes, 0644) - if err != nil { - a.Logger.Error("Failed to save AddrBook to file", "file", filePath, "err", err) - } -} - -// Returns false if file does not exist. -// cmn.Panics if file is corrupt. -func (a *addrBook) loadFromFile(filePath string) bool { - // If doesn't exist, do nothing. - _, err := os.Stat(filePath) - if os.IsNotExist(err) { - return false - } - - // Load addrBookJSON{} - r, err := os.Open(filePath) - if err != nil { - panic(fmt.Sprintf("Error opening file %s: %v", filePath, err)) - } - defer r.Close() - aJSON := &addrBookJSON{} - dec := json.NewDecoder(r) - err = dec.Decode(aJSON) - if err != nil { - panic(fmt.Sprintf("Error reading file %s: %v", filePath, err)) - } - - // Restore all the fields... - // Restore the key - a.key = aJSON.Key - // Restore .bucketsNew & .bucketsOld - for _, ka := range aJSON.Addrs { - for _, bucketIndex := range ka.Buckets { - bucket := a.getBucket(ka.BucketType, bucketIndex) - bucket[ka.Addr.String()] = ka - } - a.addrLookup[ka.ID()] = ka - if ka.BucketType == bucketTypeNew { - a.nNew++ - } else { - a.nOld++ - } - } - return true -} diff --git a/internal/p2p/pex/known_address.go b/internal/p2p/pex/known_address.go deleted file mode 100644 index 2a2ebe038..000000000 --- a/internal/p2p/pex/known_address.go +++ /dev/null @@ -1,141 +0,0 @@ -package pex - -import ( - "time" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/types" -) - -// knownAddress tracks information about a known network address -// that is used to determine how viable an address is. -type knownAddress struct { - Addr *p2p.NetAddress `json:"addr"` - Src *p2p.NetAddress `json:"src"` - Buckets []int `json:"buckets"` - Attempts int32 `json:"attempts"` - BucketType byte `json:"bucket_type"` - LastAttempt time.Time `json:"last_attempt"` - LastSuccess time.Time `json:"last_success"` - LastBanTime time.Time `json:"last_ban_time"` -} - -func newKnownAddress(addr *p2p.NetAddress, src *p2p.NetAddress) *knownAddress { - return &knownAddress{ - Addr: addr, - Src: src, - Attempts: 0, - LastAttempt: time.Now(), - BucketType: bucketTypeNew, - Buckets: nil, - } -} - -func (ka *knownAddress) ID() types.NodeID { - return ka.Addr.ID -} - -func (ka *knownAddress) isOld() bool { - return ka.BucketType == bucketTypeOld -} - -func (ka *knownAddress) isNew() bool { - return ka.BucketType == bucketTypeNew -} - -func (ka *knownAddress) markAttempt() { - now := time.Now() - ka.LastAttempt = now - ka.Attempts++ -} - -func (ka *knownAddress) markGood() { - now := time.Now() - ka.LastAttempt = now - ka.Attempts = 0 - ka.LastSuccess = now -} - -func (ka *knownAddress) ban(banTime time.Duration) { - if ka.LastBanTime.Before(time.Now().Add(banTime)) { - ka.LastBanTime = time.Now().Add(banTime) - } -} - -func (ka *knownAddress) isBanned() bool { - return ka.LastBanTime.After(time.Now()) -} - -func (ka *knownAddress) addBucketRef(bucketIdx int) int { - for _, bucket := range ka.Buckets { - if bucket == bucketIdx { - // TODO refactor to return error? - // log.Warn(Fmt("Bucket already exists in ka.Buckets: %v", ka)) - return -1 - } - } - ka.Buckets = append(ka.Buckets, bucketIdx) - return len(ka.Buckets) -} - -func (ka *knownAddress) removeBucketRef(bucketIdx int) int { - buckets := []int{} - for _, bucket := range ka.Buckets { - if bucket != bucketIdx { - buckets = append(buckets, bucket) - } - } - if len(buckets) != len(ka.Buckets)-1 { - // TODO refactor to return error? - // log.Warn(Fmt("bucketIdx not found in ka.Buckets: %v", ka)) - return -1 - } - ka.Buckets = buckets - return len(ka.Buckets) -} - -/* - An address is bad if the address in question is a New address, has not been tried in the last - minute, and meets one of the following criteria: - - 1) It claims to be from the future - 2) It hasn't been seen in over a week - 3) It has failed at least three times and never succeeded - 4) It has failed ten times in the last week - - All addresses that meet these criteria are assumed to be worthless and not - worth keeping hold of. - -*/ -func (ka *knownAddress) isBad() bool { - // Is Old --> good - if ka.BucketType == bucketTypeOld { - return false - } - - // Has been attempted in the last minute --> good - if ka.LastAttempt.After(time.Now().Add(-1 * time.Minute)) { - return false - } - - // TODO: From the future? - - // Too old? - // TODO: should be a timestamp of last seen, not just last attempt - if ka.LastAttempt.Before(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) { - return true - } - - // Never succeeded? - if ka.LastSuccess.IsZero() && ka.Attempts >= numRetries { - return true - } - - // Hasn't succeeded in too long? - if ka.LastSuccess.Before(time.Now().Add(-1*minBadDays*time.Hour*24)) && - ka.Attempts >= maxFailures { - return true - } - - return false -} diff --git a/internal/p2p/pex/params.go b/internal/p2p/pex/params.go deleted file mode 100644 index 29b4d45ab..000000000 --- a/internal/p2p/pex/params.go +++ /dev/null @@ -1,55 +0,0 @@ -package pex - -import "time" - -const ( - // addresses under which the address manager will claim to need more addresses. - needAddressThreshold = 1000 - - // interval used to dump the address cache to disk for future use. - dumpAddressInterval = time.Minute * 2 - - // max addresses in each old address bucket. - oldBucketSize = 64 - - // buckets we split old addresses over. - oldBucketCount = 64 - - // max addresses in each new address bucket. - newBucketSize = 64 - - // buckets that we spread new addresses over. - newBucketCount = 256 - - // old buckets over which an address group will be spread. - oldBucketsPerGroup = 4 - - // new buckets over which a source address group will be spread. - newBucketsPerGroup = 32 - - // buckets a frequently seen new address may end up in. - maxNewBucketsPerAddress = 4 - - // days before which we assume an address has vanished - // if we have not seen it announced in that long. - numMissingDays = 7 - - // tries without a single success before we assume an address is bad. - numRetries = 3 - - // max failures we will accept without a success before considering an address bad. - maxFailures = 10 // ? - - // days since the last success before we will consider evicting an address. - minBadDays = 7 - - // % of total addresses known returned by GetSelection. - getSelectionPercent = 23 - - // min addresses that must be returned by GetSelection. Useful for bootstrapping. - minGetSelection = 32 - - // max addresses returned by GetSelection - // NOTE: this must match "maxMsgSize" - maxGetSelection = 250 -) diff --git a/internal/p2p/pex/pex_reactor.go b/internal/p2p/pex/pex_reactor.go deleted file mode 100644 index 049dbd9f1..000000000 --- a/internal/p2p/pex/pex_reactor.go +++ /dev/null @@ -1,863 +0,0 @@ -package pex - -import ( - "errors" - "fmt" - "net" - "sync" - "time" - - "github.com/gogo/protobuf/proto" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/cmap" - tmmath "github.com/tendermint/tendermint/libs/math" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/libs/service" - tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/types" -) - -type Peer = p2p.Peer - -const ( - // PexChannel is a channel for PEX messages - PexChannel = byte(0x00) - - // over-estimate of max NetAddress size - // hexID (40) + IP (16) + Port (2) + Name (100) ... - // NOTE: dont use massive DNS name .. - maxAddressSize = 256 - - // NOTE: amplificaiton factor! - // small request results in up to maxMsgSize response - maxMsgSize = maxAddressSize * maxGetSelection - - // ensure we have enough peers - defaultEnsurePeersPeriod = 30 * time.Second - - // Seed/Crawler constants - - // minTimeBetweenCrawls is a minimum time between attempts to crawl a peer. - minTimeBetweenCrawls = 2 * time.Minute - - // check some peers every this - crawlPeerPeriod = 30 * time.Second - - maxAttemptsToDial = 16 // ~ 35h in total (last attempt - 18h) - - // if node connects to seed, it does not have any trusted peers. - // Especially in the beginning, node should have more trusted peers than - // untrusted. - biasToSelectNewPeers = 30 // 70 to select good peers - - // if a peer is marked bad, it will be banned for at least this time period - defaultBanTime = 24 * time.Hour -) - -type errMaxAttemptsToDial struct { -} - -func (e errMaxAttemptsToDial) Error() string { - return fmt.Sprintf("reached max attempts %d to dial", maxAttemptsToDial) -} - -type errTooEarlyToDial struct { - backoffDuration time.Duration - lastDialed time.Time -} - -func (e errTooEarlyToDial) Error() string { - return fmt.Sprintf( - "too early to dial (backoff duration: %d, last dialed: %v, time since: %v)", - e.backoffDuration, e.lastDialed, time.Since(e.lastDialed)) -} - -// Reactor handles PEX (peer exchange) and ensures that an -// adequate number of peers are connected to the switch. -// -// It uses `AddrBook` (address book) to store `NetAddress`es of the peers. -// -// ## Preventing abuse -// -// Only accept pexAddrsMsg from peers we sent a corresponding pexRequestMsg too. -// Only accept one pexRequestMsg every ~defaultEnsurePeersPeriod. -type Reactor struct { - p2p.BaseReactor - - book AddrBook - config *ReactorConfig - ensurePeersPeriod time.Duration // TODO: should go in the config - - // maps to prevent abuse - requestsSent *cmap.CMap // ID->struct{}: unanswered send requests - lastReceivedRequests *cmap.CMap // ID->time.Time: last time peer requested from us - - seedAddrs []*p2p.NetAddress - - attemptsToDial sync.Map // address (string) -> {number of attempts (int), last time dialed (time.Time)} - - // seed/crawled mode fields - crawlPeerInfos map[types.NodeID]crawlPeerInfo -} - -func (r *Reactor) minReceiveRequestInterval() time.Duration { - // NOTE: must be less than ensurePeersPeriod, otherwise we'll request - // peers too quickly from others and they'll think we're bad! - return r.ensurePeersPeriod / 3 -} - -// ReactorConfig holds reactor specific configuration data. -type ReactorConfig struct { - // Seed/Crawler mode - SeedMode bool - - // We want seeds to only advertise good peers. Therefore they should wait at - // least as long as we expect it to take for a peer to become good before - // disconnecting. - SeedDisconnectWaitPeriod time.Duration - - // Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) - PersistentPeersMaxDialPeriod time.Duration - - // Seeds is a list of addresses reactor may use - // if it can't connect to peers in the addrbook. - Seeds []string -} - -type _attemptsToDial struct { - number int - lastDialed time.Time -} - -// NewReactor creates new PEX reactor. -func NewReactor(b AddrBook, config *ReactorConfig) *Reactor { - r := &Reactor{ - book: b, - config: config, - ensurePeersPeriod: defaultEnsurePeersPeriod, - requestsSent: cmap.NewCMap(), - lastReceivedRequests: cmap.NewCMap(), - crawlPeerInfos: make(map[types.NodeID]crawlPeerInfo), - } - r.BaseReactor = *p2p.NewBaseReactor("PEX", r) - return r -} - -// OnStart implements BaseService -func (r *Reactor) OnStart() error { - err := r.book.Start() - if err != nil && err != service.ErrAlreadyStarted { - return err - } - - numOnline, seedAddrs, err := r.checkSeeds() - if err != nil { - return err - } else if numOnline == 0 && r.book.Empty() { - return errors.New("address book is empty and couldn't resolve any seed nodes") - } - - r.seedAddrs = seedAddrs - - // Check if this node should run - // in seed/crawler mode - if r.config.SeedMode { - go r.crawlPeersRoutine() - } else { - go r.ensurePeersRoutine() - } - return nil -} - -// OnStop implements BaseService -func (r *Reactor) OnStop() { - if err := r.book.Stop(); err != nil { - r.Logger.Error("Error stopping address book", "err", err) - } -} - -// GetChannels implements Reactor -func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { - return []*conn.ChannelDescriptor{ - { - ID: PexChannel, - Priority: 1, - SendQueueCapacity: 10, - RecvMessageCapacity: maxMsgSize, - - MaxSendBytes: 200, - }, - } -} - -// AddPeer implements Reactor by adding peer to the address book (if inbound) -// or by requesting more addresses (if outbound). -func (r *Reactor) AddPeer(p Peer) { - if p.IsOutbound() { - // For outbound peers, the address is already in the books - - // either via DialPeersAsync or r.Receive. - // Ask it for more peers if we need. - if r.book.NeedMoreAddrs() { - r.RequestAddrs(p) - } - } else { - // inbound peer is its own source - addr, err := p.NodeInfo().NetAddress() - if err != nil { - r.Logger.Error("Failed to get peer NetAddress", "err", err, "peer", p) - return - } - - // Make it explicit that addr and src are the same for an inbound peer. - src := addr - - // add to book. dont RequestAddrs right away because - // we don't trust inbound as much - let ensurePeersRoutine handle it. - err = r.book.AddAddress(addr, src) - r.logErrAddrBook(err) - } -} - -// RemovePeer implements Reactor by resetting peer's requests info. -func (r *Reactor) RemovePeer(p Peer, reason interface{}) { - id := string(p.ID()) - r.requestsSent.Delete(id) - r.lastReceivedRequests.Delete(id) -} - -func (r *Reactor) logErrAddrBook(err error) { - if err != nil { - switch err.(type) { - case ErrAddrBookNilAddr: - r.Logger.Error("Failed to add new address", "err", err) - default: - // non-routable, self, full book, private, etc. - r.Logger.Debug("Failed to add new address", "err", err) - } - } -} - -// Receive implements Reactor by handling incoming PEX messages. -// XXX: do not call any methods that can block or incur heavy processing. -// https://github.com/tendermint/tendermint/issues/2888 -func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) { - msg, err := decodeMsg(msgBytes) - if err != nil { - r.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) - r.Switch.StopPeerForError(src, err) - return - } - r.Logger.Debug("Received message", "src", src, "chId", chID, "msg", msg) - - switch msg := msg.(type) { - case *tmp2p.PexRequest: - - // NOTE: this is a prime candidate for amplification attacks, - // so it's important we - // 1) restrict how frequently peers can request - // 2) limit the output size - - // If we're a seed and this is an inbound peer, - // respond once and disconnect. - if r.config.SeedMode && !src.IsOutbound() { - id := string(src.ID()) - v := r.lastReceivedRequests.Get(id) - if v != nil { - // FlushStop/StopPeer are already - // running in a go-routine. - return - } - r.lastReceivedRequests.Set(id, time.Now()) - - // Send addrs and disconnect - r.SendAddrs(src, r.book.GetSelectionWithBias(biasToSelectNewPeers)) - go func() { - // In a go-routine so it doesn't block .Receive. - src.FlushStop() - r.Switch.StopPeerGracefully(src) - }() - - } else { - // Check we're not receiving requests too frequently. - if err := r.receiveRequest(src); err != nil { - r.Switch.StopPeerForError(src, err) - r.book.MarkBad(src.SocketAddr(), defaultBanTime) - return - } - r.SendAddrs(src, r.book.GetSelection()) - } - - case *tmp2p.PexResponse: - // If we asked for addresses, add them to the book - addrs, err := NetAddressesFromProto(msg.Addresses) - if err != nil { - r.Switch.StopPeerForError(src, err) - r.book.MarkBad(src.SocketAddr(), defaultBanTime) - return - } - err = r.ReceiveAddrs(addrs, src) - if err != nil { - r.Switch.StopPeerForError(src, err) - if err == ErrUnsolicitedList { - r.book.MarkBad(src.SocketAddr(), defaultBanTime) - } - return - } - - default: - r.Logger.Error(fmt.Sprintf("Unknown message type %T", msg)) - } -} - -// enforces a minimum amount of time between requests -func (r *Reactor) receiveRequest(src Peer) error { - id := string(src.ID()) - v := r.lastReceivedRequests.Get(id) - if v == nil { - // initialize with empty time - lastReceived := time.Time{} - r.lastReceivedRequests.Set(id, lastReceived) - return nil - } - - lastReceived := v.(time.Time) - if lastReceived.Equal(time.Time{}) { - // first time gets a free pass. then we start tracking the time - lastReceived = time.Now() - r.lastReceivedRequests.Set(id, lastReceived) - return nil - } - - now := time.Now() - minInterval := r.minReceiveRequestInterval() - if now.Sub(lastReceived) < minInterval { - return fmt.Errorf( - "peer (%v) sent next PEX request too soon. lastReceived: %v, now: %v, minInterval: %v. Disconnecting", - src.ID(), - lastReceived, - now, - minInterval, - ) - } - r.lastReceivedRequests.Set(id, now) - return nil -} - -// RequestAddrs asks peer for more addresses if we do not already have a -// request out for this peer. -func (r *Reactor) RequestAddrs(p Peer) { - id := string(p.ID()) - if r.requestsSent.Has(id) { - return - } - r.Logger.Debug("Request addrs", "from", p) - r.requestsSent.Set(id, struct{}{}) - p.Send(PexChannel, mustEncode(&tmp2p.PexRequest{})) -} - -// ReceiveAddrs adds the given addrs to the addrbook if theres an open -// request for this peer and deletes the open request. -// If there's no open request for the src peer, it returns an error. -func (r *Reactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { - id := string(src.ID()) - if !r.requestsSent.Has(id) { - return ErrUnsolicitedList - } - r.requestsSent.Delete(id) - - srcAddr, err := src.NodeInfo().NetAddress() - if err != nil { - return err - } - - srcIsSeed := false - for _, seedAddr := range r.seedAddrs { - if seedAddr.Equals(srcAddr) { - srcIsSeed = true - break - } - } - - for _, netAddr := range addrs { - // NOTE: we check netAddr validity and routability in book#AddAddress. - err = r.book.AddAddress(netAddr, srcAddr) - if err != nil { - r.logErrAddrBook(err) - // XXX: should we be strict about incoming data and disconnect from a - // peer here too? - continue - } - - // If this address came from a seed node, try to connect to it without - // waiting (#2093) - if srcIsSeed { - r.Logger.Info("Will dial address, which came from seed", "addr", netAddr, "seed", srcAddr) - go func(addr *p2p.NetAddress) { - err := r.dialPeer(addr) - if err != nil { - switch err.(type) { - case errMaxAttemptsToDial, errTooEarlyToDial, p2p.ErrCurrentlyDialingOrExistingAddress: - r.Logger.Debug(err.Error(), "addr", addr) - default: - r.Logger.Error(err.Error(), "addr", addr) - } - } - }(netAddr) - } - } - - return nil -} - -// SendAddrs sends addrs to the peer. -func (r *Reactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) { - p.Send(PexChannel, mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto(netAddrs)})) -} - -// SetEnsurePeersPeriod sets period to ensure peers connected. -func (r *Reactor) SetEnsurePeersPeriod(d time.Duration) { - r.ensurePeersPeriod = d -} - -// Ensures that sufficient peers are connected. (continuous) -func (r *Reactor) ensurePeersRoutine() { - var ( - seed = tmrand.NewRand() - jitter = seed.Int63n(r.ensurePeersPeriod.Nanoseconds()) - ) - - // Randomize first round of communication to avoid thundering herd. - // If no peers are present directly start connecting so we guarantee swift - // setup with the help of configured seeds. - if r.nodeHasSomePeersOrDialingAny() { - time.Sleep(time.Duration(jitter)) - } - - // fire once immediately. - // ensures we dial the seeds right away if the book is empty - r.ensurePeers() - - // fire periodically - ticker := time.NewTicker(r.ensurePeersPeriod) - for { - select { - case <-ticker.C: - r.ensurePeers() - case <-r.Quit(): - ticker.Stop() - return - } - } -} - -// ensurePeers ensures that sufficient peers are connected. (once) -// -// heuristic that we haven't perfected yet, or, perhaps is manually edited by -// the node operator. It should not be used to compute what addresses are -// already connected or not. -func (r *Reactor) ensurePeers() { - var ( - out, in, dial = r.Switch.NumPeers() - numToDial = r.Switch.MaxNumOutboundPeers() - (out + dial) - ) - r.Logger.Info( - "Ensure peers", - "numOutPeers", out, - "numInPeers", in, - "numDialing", dial, - "numToDial", numToDial, - ) - - if numToDial <= 0 { - return - } - - // bias to prefer more vetted peers when we have fewer connections. - // not perfect, but somewhate ensures that we prioritize connecting to more-vetted - // NOTE: range here is [10, 90]. Too high ? - newBias := tmmath.MinInt(out, 8)*10 + 10 - - toDial := make(map[types.NodeID]*p2p.NetAddress) - // Try maxAttempts times to pick numToDial addresses to dial - maxAttempts := numToDial * 3 - - for i := 0; i < maxAttempts && len(toDial) < numToDial; i++ { - try := r.book.PickAddress(newBias) - if try == nil { - continue - } - if _, selected := toDial[try.ID]; selected { - continue - } - if r.Switch.IsDialingOrExistingAddress(try) { - continue - } - // TODO: consider moving some checks from toDial into here - // so we don't even consider dialing peers that we want to wait - // before dialing again, or have dialed too many times already - r.Logger.Info("Will dial address", "addr", try) - toDial[try.ID] = try - } - - // Dial picked addresses - for _, addr := range toDial { - go func(addr *p2p.NetAddress) { - err := r.dialPeer(addr) - if err != nil { - switch err.(type) { - case errMaxAttemptsToDial, errTooEarlyToDial: - r.Logger.Debug(err.Error(), "addr", addr) - default: - r.Logger.Error(err.Error(), "addr", addr) - } - } - }(addr) - } - - if r.book.NeedMoreAddrs() { - // Check if banned nodes can be reinstated - r.book.ReinstateBadPeers() - } - - if r.book.NeedMoreAddrs() { - - // 1) Pick a random peer and ask for more. - peers := r.Switch.Peers().List() - peersCount := len(peers) - if peersCount > 0 { - rand := tmrand.NewRand() - peer := peers[rand.Int()%peersCount] - r.Logger.Info("We need more addresses. Sending pexRequest to random peer", "peer", peer) - r.RequestAddrs(peer) - } - - // 2) Dial seeds if we are not dialing anyone. - // This is done in addition to asking a peer for addresses to work-around - // peers not participating in PEX. - if len(toDial) == 0 { - r.Logger.Info("No addresses to dial. Falling back to seeds") - r.dialSeeds() - } - } -} - -func (r *Reactor) dialAttemptsInfo(addr *p2p.NetAddress) (attempts int, lastDialed time.Time) { - _attempts, ok := r.attemptsToDial.Load(addr.DialString()) - if !ok { - return - } - atd := _attempts.(_attemptsToDial) - return atd.number, atd.lastDialed -} - -func (r *Reactor) dialPeer(addr *p2p.NetAddress) error { - attempts, lastDialed := r.dialAttemptsInfo(addr) - if !r.Switch.IsPeerPersistent(addr) && attempts > maxAttemptsToDial { - r.book.MarkBad(addr, defaultBanTime) - return errMaxAttemptsToDial{} - } - - // exponential backoff if it's not our first attempt to dial given address - if attempts > 0 { - rand := tmrand.NewRand() - jitter := time.Duration(rand.Float64() * float64(time.Second)) // 1s == (1e9 ns) - backoffDuration := jitter + ((1 << uint(attempts)) * time.Second) - backoffDuration = r.maxBackoffDurationForPeer(addr, backoffDuration) - sinceLastDialed := time.Since(lastDialed) - if sinceLastDialed < backoffDuration { - return errTooEarlyToDial{backoffDuration, lastDialed} - } - } - - err := r.Switch.DialPeerWithAddress(addr) - if err != nil { - if _, ok := err.(p2p.ErrCurrentlyDialingOrExistingAddress); ok { - return err - } - - markAddrInBookBasedOnErr(addr, r.book, err) - switch err.(type) { - case p2p.ErrSwitchAuthenticationFailure: - // NOTE: addr is removed from addrbook in markAddrInBookBasedOnErr - r.attemptsToDial.Delete(addr.DialString()) - default: - r.attemptsToDial.Store(addr.DialString(), _attemptsToDial{attempts + 1, time.Now()}) - } - return fmt.Errorf("dialing failed (attempts: %d): %w", attempts+1, err) - } - - // cleanup any history - r.attemptsToDial.Delete(addr.DialString()) - return nil -} - -// maxBackoffDurationForPeer caps the backoff duration for persistent peers. -func (r *Reactor) maxBackoffDurationForPeer(addr *p2p.NetAddress, planned time.Duration) time.Duration { - if r.config.PersistentPeersMaxDialPeriod > 0 && - planned > r.config.PersistentPeersMaxDialPeriod && - r.Switch.IsPeerPersistent(addr) { - return r.config.PersistentPeersMaxDialPeriod - } - return planned -} - -// checkSeeds checks that addresses are well formed. -// Returns number of seeds we can connect to, along with all seeds addrs. -// return err if user provided any badly formatted seed addresses. -// Doesn't error if the seed node can't be reached. -// numOnline returns -1 if no seed nodes were in the initial configuration. -func (r *Reactor) checkSeeds() (numOnline int, netAddrs []*p2p.NetAddress, err error) { - lSeeds := len(r.config.Seeds) - if lSeeds == 0 { - return -1, nil, nil - } - netAddrs, errs := p2p.NewNetAddressStrings(r.config.Seeds) - numOnline = lSeeds - len(errs) - for _, err := range errs { - switch e := err.(type) { - case types.ErrNetAddressLookup: - r.Logger.Error("Connecting to seed failed", "err", e) - default: - return 0, nil, fmt.Errorf("seed node configuration has error: %w", e) - } - } - return numOnline, netAddrs, nil -} - -// randomly dial seeds until we connect to one or exhaust them -func (r *Reactor) dialSeeds() { - rand := tmrand.NewRand() - perm := rand.Perm(len(r.seedAddrs)) - // perm := r.Switch.rng.Perm(lSeeds) - for _, i := range perm { - // dial a random seed - seedAddr := r.seedAddrs[i] - err := r.Switch.DialPeerWithAddress(seedAddr) - - switch err.(type) { - case nil, p2p.ErrCurrentlyDialingOrExistingAddress: - return - } - r.Switch.Logger.Error("Error dialing seed", "err", err, "seed", seedAddr) - } - // do not write error message if there were no seeds specified in config - if len(r.seedAddrs) > 0 { - r.Switch.Logger.Error("Couldn't connect to any seeds") - } -} - -// AttemptsToDial returns the number of attempts to dial specific address. It -// returns 0 if never attempted or successfully connected. -func (r *Reactor) AttemptsToDial(addr *p2p.NetAddress) int { - lAttempts, attempted := r.attemptsToDial.Load(addr.DialString()) - if attempted { - return lAttempts.(_attemptsToDial).number - } - return 0 -} - -//---------------------------------------------------------- - -// Explores the network searching for more peers. (continuous) -// Seed/Crawler Mode causes this node to quickly disconnect -// from peers, except other seed nodes. -func (r *Reactor) crawlPeersRoutine() { - // If we have any seed nodes, consult them first - if len(r.seedAddrs) > 0 { - r.dialSeeds() - } else { - // Do an initial crawl - r.crawlPeers(r.book.GetSelection()) - } - - // Fire periodically - ticker := time.NewTicker(crawlPeerPeriod) - - for { - select { - case <-ticker.C: - r.attemptDisconnects() - r.crawlPeers(r.book.GetSelection()) - r.cleanupCrawlPeerInfos() - case <-r.Quit(): - return - } - } -} - -// nodeHasSomePeersOrDialingAny returns true if the node is connected to some -// peers or dialing them currently. -func (r *Reactor) nodeHasSomePeersOrDialingAny() bool { - out, in, dial := r.Switch.NumPeers() - return out+in+dial > 0 -} - -// crawlPeerInfo handles temporary data needed for the network crawling -// performed during seed/crawler mode. -type crawlPeerInfo struct { - Addr *p2p.NetAddress `json:"addr"` - // The last time we crawled the peer or attempted to do so. - LastCrawled time.Time `json:"last_crawled"` -} - -// crawlPeers will crawl the network looking for new peer addresses. -func (r *Reactor) crawlPeers(addrs []*p2p.NetAddress) { - now := time.Now() - - for _, addr := range addrs { - peerInfo, ok := r.crawlPeerInfos[addr.ID] - - // Do not attempt to connect with peers we recently crawled. - if ok && now.Sub(peerInfo.LastCrawled) < minTimeBetweenCrawls { - continue - } - - // Record crawling attempt. - r.crawlPeerInfos[addr.ID] = crawlPeerInfo{ - Addr: addr, - LastCrawled: now, - } - - err := r.dialPeer(addr) - if err != nil { - switch err.(type) { - case errMaxAttemptsToDial, errTooEarlyToDial, p2p.ErrCurrentlyDialingOrExistingAddress: - r.Logger.Debug(err.Error(), "addr", addr) - default: - r.Logger.Error(err.Error(), "addr", addr) - } - continue - } - - peer := r.Switch.Peers().Get(addr.ID) - if peer != nil { - r.RequestAddrs(peer) - } - } -} - -func (r *Reactor) cleanupCrawlPeerInfos() { - for id, info := range r.crawlPeerInfos { - // If we did not crawl a peer for 24 hours, it means the peer was removed - // from the addrbook => remove - // - // 10000 addresses / maxGetSelection = 40 cycles to get all addresses in - // the ideal case, - // 40 * crawlPeerPeriod ~ 20 minutes - if time.Since(info.LastCrawled) > 24*time.Hour { - delete(r.crawlPeerInfos, id) - } - } -} - -// attemptDisconnects checks if we've been with each peer long enough to disconnect -func (r *Reactor) attemptDisconnects() { - for _, peer := range r.Switch.Peers().List() { - if peer.Status().Duration < r.config.SeedDisconnectWaitPeriod { - continue - } - if peer.IsPersistent() { - continue - } - r.Switch.StopPeerGracefully(peer) - } -} - -func markAddrInBookBasedOnErr(addr *p2p.NetAddress, book AddrBook, err error) { - // TODO: detect more "bad peer" scenarios - switch err.(type) { - case p2p.ErrSwitchAuthenticationFailure: - book.MarkBad(addr, defaultBanTime) - default: - book.MarkAttempt(addr) - } -} - -//----------------------------------------------------------------------------- -// Messages - -// mustEncode proto encodes a tmp2p.Message -func mustEncode(pb proto.Message) []byte { - msg := tmp2p.PexMessage{} - switch pb := pb.(type) { - case *tmp2p.PexRequest: - msg.Sum = &tmp2p.PexMessage_PexRequest{PexRequest: pb} - case *tmp2p.PexResponse: - msg.Sum = &tmp2p.PexMessage_PexResponse{PexResponse: pb} - default: - panic(fmt.Sprintf("Unknown message type %T", pb)) - } - - bz, err := msg.Marshal() - if err != nil { - panic(fmt.Errorf("unable to marshal %T: %w", pb, err)) - } - return bz -} - -func decodeMsg(bz []byte) (proto.Message, error) { - pb := &tmp2p.PexMessage{} - - err := pb.Unmarshal(bz) - if err != nil { - return nil, err - } - - switch msg := pb.Sum.(type) { - case *tmp2p.PexMessage_PexRequest: - return msg.PexRequest, nil - case *tmp2p.PexMessage_PexResponse: - return msg.PexResponse, nil - default: - return nil, fmt.Errorf("unknown message: %T", msg) - } -} - -//----------------------------------------------------------------------------- -// address converters - -// NetAddressFromProto converts a Protobuf PexAddress into a native struct. -func NetAddressFromProto(pb tmp2p.PexAddress) (*types.NetAddress, error) { - ip := net.ParseIP(pb.IP) - if ip == nil { - return nil, fmt.Errorf("invalid IP address %v", pb.IP) - } - if pb.Port >= 1<<16 { - return nil, fmt.Errorf("invalid port number %v", pb.Port) - } - return &types.NetAddress{ - ID: types.NodeID(pb.ID), - IP: ip, - Port: uint16(pb.Port), - }, nil -} - -// NetAddressesFromProto converts a slice of Protobuf PexAddresses into a native slice. -func NetAddressesFromProto(pbs []tmp2p.PexAddress) ([]*types.NetAddress, error) { - nas := make([]*types.NetAddress, 0, len(pbs)) - for _, pb := range pbs { - na, err := NetAddressFromProto(pb) - if err != nil { - return nil, err - } - nas = append(nas, na) - } - return nas, nil -} - -// NetAddressesToProto converts a slice of NetAddresses into a Protobuf PexAddress slice. -func NetAddressesToProto(nas []*types.NetAddress) []tmp2p.PexAddress { - pbs := make([]tmp2p.PexAddress, 0, len(nas)) - for _, na := range nas { - if na != nil { - pbs = append(pbs, tmp2p.PexAddress{ - ID: string(na.ID), - IP: na.IP.String(), - Port: uint32(na.Port), - }) - } - } - return pbs -} diff --git a/internal/p2p/pex/pex_reactor_test.go b/internal/p2p/pex/pex_reactor_test.go deleted file mode 100644 index 56f24457f..000000000 --- a/internal/p2p/pex/pex_reactor_test.go +++ /dev/null @@ -1,680 +0,0 @@ -package pex - -import ( - "encoding/hex" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - "time" - - "github.com/gogo/protobuf/proto" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/mock" - "github.com/tendermint/tendermint/libs/log" - tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/types" -) - -var ( - cfg *config.P2PConfig -) - -func init() { - cfg = config.DefaultP2PConfig() - cfg.PexReactor = true - cfg.AllowDuplicateIP = true -} - -func TestPEXReactorBasic(t *testing.T) { - r, _ := createReactor(t, &ReactorConfig{}) - - assert.NotNil(t, r) - assert.NotEmpty(t, r.GetChannels()) -} - -func TestPEXReactorAddRemovePeer(t *testing.T) { - r, book := createReactor(t, &ReactorConfig{}) - - size := book.Size() - peer := p2p.CreateRandomPeer(false) - - r.AddPeer(peer) - assert.Equal(t, size+1, book.Size()) - - r.RemovePeer(peer, "peer not available") - - outboundPeer := p2p.CreateRandomPeer(true) - - r.AddPeer(outboundPeer) - assert.Equal(t, size+1, book.Size(), "outbound peers should not be added to the address book") - - r.RemovePeer(outboundPeer, "peer not available") -} - -// --- FAIL: TestPEXReactorRunning (11.10s) -// pex_reactor_test.go:411: expected all switches to be connected to at -// least one peer (switches: 0 => {outbound: 1, inbound: 0}, 1 => -// {outbound: 0, inbound: 1}, 2 => {outbound: 0, inbound: 0}, ) -// -// EXPLANATION: peers are getting rejected because in switch#addPeer we check -// if any peer (who we already connected to) has the same IP. Even though local -// peers have different IP addresses, they all have the same underlying remote -// IP: 127.0.0.1. -// -func TestPEXReactorRunning(t *testing.T) { - N := 3 - switches := make([]*p2p.Switch, N) - - // directory to store address books - dir := tempDir(t) - - books := make([]AddrBook, N) - logger := log.TestingLogger() - - // create switches - for i := 0; i < N; i++ { - switches[i] = p2p.MakeSwitch(cfg, i, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { - books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false) - books[i].SetLogger(logger.With("pex", i)) - sw.SetAddrBook(books[i]) - - sw.SetLogger(logger.With("pex", i)) - - r := NewReactor(books[i], &ReactorConfig{}) - r.SetLogger(logger.With("pex", i)) - r.SetEnsurePeersPeriod(250 * time.Millisecond) - sw.AddReactor("pex", r) - - return sw - }, logger) - } - - for _, sw := range switches { - err := sw.Start() // start switch and reactors - require.Nil(t, err) - } - - addOtherNodeAddrToAddrBook := func(switchIndex, otherSwitchIndex int) { - addr := switches[otherSwitchIndex].NetAddress() - err := books[switchIndex].AddAddress(addr, addr) - require.NoError(t, err) - } - - addOtherNodeAddrToAddrBook(0, 1) - addOtherNodeAddrToAddrBook(1, 0) - addOtherNodeAddrToAddrBook(2, 1) - - assertPeersWithTimeout(t, switches, 10*time.Millisecond, 10*time.Second, N-1) - - // stop them - for _, s := range switches { - err := s.Stop() - require.NoError(t, err) - } -} - -func TestPEXReactorReceive(t *testing.T) { - r, book := createReactor(t, &ReactorConfig{}) - peer := p2p.CreateRandomPeer(false) - - // we have to send a request to receive responses - r.RequestAddrs(peer) - - size := book.Size() - na, err := peer.NodeInfo().NetAddress() - require.NoError(t, err) - msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{na})}) - r.Receive(PexChannel, peer, msg) - assert.Equal(t, size+1, book.Size()) - - msg = mustEncode(&tmp2p.PexRequest{}) - r.Receive(PexChannel, peer, msg) // should not panic. -} - -func TestPEXReactorRequestMessageAbuse(t *testing.T) { - r, book := createReactor(t, &ReactorConfig{}) - sw := createSwitchAndAddReactors(r) - sw.SetAddrBook(book) - - peer := mock.NewPeer(nil) - peerAddr := peer.SocketAddr() - p2p.AddPeerToSwitchPeerSet(sw, peer) - assert.True(t, sw.Peers().Has(peer.ID())) - err := book.AddAddress(peerAddr, peerAddr) - require.NoError(t, err) - require.True(t, book.HasAddress(peerAddr)) - - id := string(peer.ID()) - msg := mustEncode(&tmp2p.PexRequest{}) - - // first time creates the entry - r.Receive(PexChannel, peer, msg) - assert.True(t, r.lastReceivedRequests.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - // next time sets the last time value - r.Receive(PexChannel, peer, msg) - assert.True(t, r.lastReceivedRequests.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - // third time is too many too soon - peer is removed - r.Receive(PexChannel, peer, msg) - assert.False(t, r.lastReceivedRequests.Has(id)) - assert.False(t, sw.Peers().Has(peer.ID())) - assert.True(t, book.IsBanned(peerAddr)) -} - -func TestPEXReactorAddrsMessageAbuse(t *testing.T) { - r, book := createReactor(t, &ReactorConfig{}) - sw := createSwitchAndAddReactors(r) - sw.SetAddrBook(book) - - peer := mock.NewPeer(nil) - p2p.AddPeerToSwitchPeerSet(sw, peer) - assert.True(t, sw.Peers().Has(peer.ID())) - - id := string(peer.ID()) - - // request addrs from the peer - r.RequestAddrs(peer) - assert.True(t, r.requestsSent.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{peer.SocketAddr()})}) - - // receive some addrs. should clear the request - r.Receive(PexChannel, peer, msg) - assert.False(t, r.requestsSent.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - // receiving more unsolicited addrs causes a disconnect and ban - r.Receive(PexChannel, peer, msg) - assert.False(t, sw.Peers().Has(peer.ID())) - assert.True(t, book.IsBanned(peer.SocketAddr())) -} - -func TestCheckSeeds(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - // 1. test creating peer with no seeds works - peerSwitch := testCreateDefaultPeer(dir, 0) - require.Nil(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests - - // 2. create seed - seed := testCreateSeed(dir, 1, []*p2p.NetAddress{}, []*p2p.NetAddress{}) - - // 3. test create peer with online seed works - peerSwitch = testCreatePeerWithSeed(dir, 2, seed) - require.Nil(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests - - // 4. test create peer with all seeds having unresolvable DNS fails - badPeerConfig := &ReactorConfig{ - Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", - "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657"}, - } - peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig) - require.Error(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests - - // 5. test create peer with one good seed address succeeds - badPeerConfig = &ReactorConfig{ - Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", - "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657", - seed.NetAddress().String()}, - } - peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig) - require.Nil(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests -} - -func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - // 1. create seed - seed := testCreateSeed(dir, 0, []*p2p.NetAddress{}, []*p2p.NetAddress{}) - require.Nil(t, seed.Start()) - t.Cleanup(func() { _ = seed.Stop() }) - - // 2. create usual peer with only seed configured. - peer := testCreatePeerWithSeed(dir, 1, seed) - require.Nil(t, peer.Start()) - t.Cleanup(func() { _ = peer.Stop() }) - - // 3. check that the peer connects to seed immediately - assertPeersWithTimeout(t, []*p2p.Switch{peer}, 10*time.Millisecond, 3*time.Second, 1) -} - -func TestConnectionSpeedForPeerReceivedFromSeed(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - // 1. create peer - peerSwitch := testCreateDefaultPeer(dir, 1) - require.Nil(t, peerSwitch.Start()) - t.Cleanup(func() { _ = peerSwitch.Stop() }) - - // 2. Create seed which knows about the peer - peerAddr := peerSwitch.NetAddress() - seed := testCreateSeed(dir, 2, []*p2p.NetAddress{peerAddr}, []*p2p.NetAddress{peerAddr}) - require.Nil(t, seed.Start()) - t.Cleanup(func() { _ = seed.Stop() }) - - // 3. create another peer with only seed configured. - secondPeer := testCreatePeerWithSeed(dir, 3, seed) - require.Nil(t, secondPeer.Start()) - t.Cleanup(func() { _ = secondPeer.Stop() }) - - // 4. check that the second peer connects to seed immediately - assertPeersWithTimeout(t, []*p2p.Switch{secondPeer}, 10*time.Millisecond, 3*time.Second, 1) - - // 5. check that the second peer connects to the first peer immediately - assertPeersWithTimeout(t, []*p2p.Switch{secondPeer}, 10*time.Millisecond, 1*time.Second, 2) -} - -func TestPEXReactorSeedMode(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - pexRConfig := &ReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 10 * time.Millisecond} - pexR, book := createReactor(t, pexRConfig) - sw := createSwitchAndAddReactors(pexR) - - sw.SetAddrBook(book) - require.NoError(t, sw.Start()) - t.Cleanup(func() { _ = sw.Stop() }) - - assert.Zero(t, sw.Peers().Size()) - - peerSwitch := testCreateDefaultPeer(dir, 1) - require.NoError(t, peerSwitch.Start()) - t.Cleanup(func() { _ = peerSwitch.Stop() }) - - // 1. Test crawlPeers dials the peer - pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()}) - assert.Equal(t, 1, sw.Peers().Size()) - assert.True(t, sw.Peers().Has(peerSwitch.NodeInfo().ID())) - - // 2. attemptDisconnects should not disconnect because of wait period - pexR.attemptDisconnects() - assert.Equal(t, 1, sw.Peers().Size()) - - // sleep for SeedDisconnectWaitPeriod - time.Sleep(pexRConfig.SeedDisconnectWaitPeriod + 1*time.Millisecond) - - // 3. attemptDisconnects should disconnect after wait period - pexR.attemptDisconnects() - assert.Equal(t, 0, sw.Peers().Size()) -} - -func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - pexRConfig := &ReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 1 * time.Millisecond} - pexR, book := createReactor(t, pexRConfig) - sw := createSwitchAndAddReactors(pexR) - - sw.SetAddrBook(book) - require.NoError(t, sw.Start()) - t.Cleanup(func() { _ = sw.Stop() }) - - assert.Zero(t, sw.Peers().Size()) - - peerSwitch := testCreatePeerWithConfig(dir, 1, pexRConfig) - require.NoError(t, peerSwitch.Start()) - t.Cleanup(func() { _ = peerSwitch.Stop() }) - - require.NoError(t, sw.AddPersistentPeers([]string{peerSwitch.NetAddress().String()})) - - // 1. Test crawlPeers dials the peer - pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()}) - assert.Equal(t, 1, sw.Peers().Size()) - assert.True(t, sw.Peers().Has(peerSwitch.NodeInfo().ID())) - - // sleep for SeedDisconnectWaitPeriod - time.Sleep(pexRConfig.SeedDisconnectWaitPeriod + 1*time.Millisecond) - - // 2. attemptDisconnects should not disconnect because the peer is persistent - pexR.attemptDisconnects() - assert.Equal(t, 1, sw.Peers().Size()) -} - -func TestPEXReactorDialsPeerUpToMaxAttemptsInSeedMode(t *testing.T) { - // directory to store address books - pexR, book := createReactor(t, &ReactorConfig{SeedMode: true}) - sw := createSwitchAndAddReactors(pexR) - - sw.SetAddrBook(book) - // No need to start sw since crawlPeers is called manually here. - - peer := mock.NewPeer(nil) - addr := peer.SocketAddr() - - require.NoError(t, book.AddAddress(addr, addr)) - - assert.True(t, book.HasAddress(addr)) - - // imitate maxAttemptsToDial reached - pexR.attemptsToDial.Store(addr.DialString(), _attemptsToDial{maxAttemptsToDial + 1, time.Now()}) - pexR.crawlPeers([]*p2p.NetAddress{addr}) - - assert.False(t, book.HasAddress(addr)) -} - -// connect a peer to a seed, wait a bit, then stop it. -// this should give it time to request addrs and for the seed -// to call FlushStop, and allows us to test calling Stop concurrently -// with FlushStop. Before a fix, this non-deterministically reproduced -// https://github.com/tendermint/tendermint/issues/3231. -func TestPEXReactorSeedModeFlushStop(t *testing.T) { - t.Skip("flaky test, will be replaced by new P2P stack") - N := 2 - switches := make([]*p2p.Switch, N) - - // directory to store address books - dir := tempDir(t) - - books := make([]AddrBook, N) - logger := log.TestingLogger() - - // create switches - for i := 0; i < N; i++ { - switches[i] = p2p.MakeSwitch(cfg, i, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { - books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false) - books[i].SetLogger(logger.With("pex", i)) - sw.SetAddrBook(books[i]) - - sw.SetLogger(logger.With("pex", i)) - - config := &ReactorConfig{} - if i == 0 { - // first one is a seed node - config = &ReactorConfig{SeedMode: true} - } - r := NewReactor(books[i], config) - r.SetLogger(logger.With("pex", i)) - r.SetEnsurePeersPeriod(250 * time.Millisecond) - sw.AddReactor("pex", r) - - return sw - }, logger) - } - - for _, sw := range switches { - err := sw.Start() // start switch and reactors - require.Nil(t, err) - } - - reactor := switches[0].Reactors()["pex"].(*Reactor) - peerID := switches[1].NodeInfo().ID() - - assert.NoError(t, switches[1].DialPeerWithAddress(switches[0].NetAddress())) - - // sleep up to a second while waiting for the peer to send us a message. - // this isn't perfect since it's possible the peer sends us a msg and we FlushStop - // before this loop catches it. but non-deterministically it works pretty well. - for i := 0; i < 1000; i++ { - v := reactor.lastReceivedRequests.Get(string(peerID)) - if v != nil { - break - } - time.Sleep(time.Millisecond) - } - - // by now the FlushStop should have happened. Try stopping the peer. - // it should be safe to do this. - peers := switches[0].Peers().List() - for _, peer := range peers { - err := peer.Stop() - require.NoError(t, err) - } - - // stop the switches - for _, s := range switches { - err := s.Stop() - require.NoError(t, err) - } -} - -func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) { - peer := p2p.CreateRandomPeer(false) - - pexR, book := createReactor(t, &ReactorConfig{}) - book.AddPrivateIDs([]string{string(peer.NodeInfo().ID())}) - - // we have to send a request to receive responses - pexR.RequestAddrs(peer) - - size := book.Size() - na, err := peer.NodeInfo().NetAddress() - require.NoError(t, err) - - msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{na})}) - pexR.Receive(PexChannel, peer, msg) - assert.Equal(t, size, book.Size()) - - pexR.AddPeer(peer) - assert.Equal(t, size, book.Size()) -} - -func TestPEXReactorDialPeer(t *testing.T) { - pexR, book := createReactor(t, &ReactorConfig{}) - sw := createSwitchAndAddReactors(pexR) - - sw.SetAddrBook(book) - - peer := mock.NewPeer(nil) - addr := peer.SocketAddr() - - assert.Equal(t, 0, pexR.AttemptsToDial(addr)) - - // 1st unsuccessful attempt - err := pexR.dialPeer(addr) - require.Error(t, err) - - assert.Equal(t, 1, pexR.AttemptsToDial(addr)) - - // 2nd unsuccessful attempt - err = pexR.dialPeer(addr) - require.Error(t, err) - - // must be skipped because it is too early - assert.Equal(t, 1, pexR.AttemptsToDial(addr)) - - if !testing.Short() { - time.Sleep(3 * time.Second) - - // 3rd attempt - err = pexR.dialPeer(addr) - require.Error(t, err) - - assert.Equal(t, 2, pexR.AttemptsToDial(addr)) - } -} - -func assertPeersWithTimeout( - t *testing.T, - switches []*p2p.Switch, - checkPeriod, timeout time.Duration, - nPeers int, -) { - var ( - ticker = time.NewTicker(checkPeriod) - remaining = timeout - ) - - for { - select { - case <-ticker.C: - // check peers are connected - allGood := true - for _, s := range switches { - outbound, inbound, _ := s.NumPeers() - if outbound+inbound < nPeers { - allGood = false - break - } - } - remaining -= checkPeriod - if remaining < 0 { - remaining = 0 - } - if allGood { - return - } - case <-time.After(remaining): - numPeersStr := "" - for i, s := range switches { - outbound, inbound, _ := s.NumPeers() - numPeersStr += fmt.Sprintf("%d => {outbound: %d, inbound: %d}, ", i, outbound, inbound) - } - t.Errorf( - "expected all switches to be connected to at least %d peer(s) (switches: %s)", - nPeers, numPeersStr, - ) - return - } - } -} - -// Creates a peer with the provided config -func testCreatePeerWithConfig(dir string, id int, config *ReactorConfig) *p2p.Switch { - peer := p2p.MakeSwitch( - cfg, - id, - "127.0.0.1", - "123.123.123", - func(i int, sw *p2p.Switch) *p2p.Switch { - book := NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", id)), false) - book.SetLogger(log.TestingLogger()) - sw.SetAddrBook(book) - - sw.SetLogger(log.TestingLogger()) - - r := NewReactor( - book, - config, - ) - r.SetLogger(log.TestingLogger()) - sw.AddReactor("pex", r) - return sw - }, - log.TestingLogger(), - ) - return peer -} - -// Creates a peer with the default config -func testCreateDefaultPeer(dir string, id int) *p2p.Switch { - return testCreatePeerWithConfig(dir, id, &ReactorConfig{}) -} - -// Creates a seed which knows about the provided addresses / source address pairs. -// Starting and stopping the seed is left to the caller -func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress) *p2p.Switch { - seed := p2p.MakeSwitch( - cfg, - id, - "127.0.0.1", - "123.123.123", - func(i int, sw *p2p.Switch) *p2p.Switch { - book := NewAddrBook(filepath.Join(dir, "addrbookSeed.json"), false) - book.SetLogger(log.TestingLogger()) - for j := 0; j < len(knownAddrs); j++ { - book.AddAddress(knownAddrs[j], srcAddrs[j]) // nolint:errcheck // ignore for tests - book.MarkGood(knownAddrs[j].ID) - } - sw.SetAddrBook(book) - - sw.SetLogger(log.TestingLogger()) - - r := NewReactor(book, &ReactorConfig{}) - r.SetLogger(log.TestingLogger()) - sw.AddReactor("pex", r) - return sw - }, - log.TestingLogger(), - ) - return seed -} - -// Creates a peer which knows about the provided seed. -// Starting and stopping the peer is left to the caller -func testCreatePeerWithSeed(dir string, id int, seed *p2p.Switch) *p2p.Switch { - conf := &ReactorConfig{ - Seeds: []string{seed.NetAddress().String()}, - } - return testCreatePeerWithConfig(dir, id, conf) -} - -func createReactor(t *testing.T, conf *ReactorConfig) (r *Reactor, book AddrBook) { - // directory to store address book - book = NewAddrBook(filepath.Join(tempDir(t), "addrbook.json"), true) - book.SetLogger(log.TestingLogger()) - - r = NewReactor(book, conf) - r.SetLogger(log.TestingLogger()) - return -} - -func createSwitchAndAddReactors(reactors ...p2p.Reactor) *p2p.Switch { - sw := p2p.MakeSwitch(cfg, 0, "127.0.0.1", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { - for _, r := range reactors { - sw.AddReactor(r.String(), r) - } - return sw - }, log.TestingLogger()) - return sw -} - -func TestPexVectors(t *testing.T) { - addr := tmp2p.PexAddress{ - ID: "1", - IP: "127.0.0.1", - Port: 9090, - } - - testCases := []struct { - testName string - msg proto.Message - expBytes string - }{ - {"PexRequest", &tmp2p.PexRequest{}, "0a00"}, - {"PexAddrs", &tmp2p.PexResponse{Addresses: []tmp2p.PexAddress{addr}}, "12130a110a013112093132372e302e302e31188247"}, - } - - for _, tc := range testCases { - tc := tc - - bz := mustEncode(tc.msg) - - require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) - } -} - -// FIXME: This function is used in place of testing.TB.TempDir() -// as the latter seems to cause test cases to fail when it is -// unable to remove the temporary directory once the test case -// execution terminates. This seems to happen often with pex -// reactor test cases. -// -// References: -// https://github.com/tendermint/tendermint/pull/5733 -// https://github.com/tendermint/tendermint/issues/5732 -func tempDir(t *testing.T) string { - t.Helper() - dir, err := ioutil.TempDir("", "") - require.NoError(t, err) - t.Cleanup(func() { _ = os.RemoveAll(dir) }) - return dir -} diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index 8cff2f95b..f6fcad5e1 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -17,22 +17,33 @@ import ( ) var ( - _ service.Service = (*ReactorV2)(nil) + _ service.Service = (*Reactor)(nil) _ p2p.Wrapper = (*protop2p.PexMessage)(nil) ) -// TODO: Consolidate with params file. -// See https://github.com/tendermint/tendermint/issues/6371 const ( + // PexChannel is a channel for PEX messages + PexChannel = 0x00 + + // over-estimate of max NetAddress size + // hexID (40) + IP (16) + Port (2) + Name (100) ... + // NOTE: dont use massive DNS name .. + maxAddressSize = 256 + + // max addresses returned by GetSelection + // NOTE: this must match "maxMsgSize" + maxGetSelection = 250 + + // NOTE: amplification factor! + // small request results in up to maxMsgSize response + maxMsgSize = maxAddressSize * maxGetSelection + // the minimum time one peer can send another request to the same peer minReceiveRequestInterval = 100 * time.Millisecond // the maximum amount of addresses that can be included in a response maxAddresses uint16 = 100 - // allocated time to resolve a node address into a set of endpoints - resolveTimeout = 3 * time.Second - // How long to wait when there are no peers available before trying again noAvailablePeersWaitPeriod = 1 * time.Second @@ -46,22 +57,17 @@ const ( // within each reactor (as they are now) or, considering that the reactor doesn't // really need to care about the channel descriptors, if they should be housed // in the node module. -func ChannelDescriptor() conn.ChannelDescriptor { - return conn.ChannelDescriptor{ +func ChannelDescriptor() *conn.ChannelDescriptor { + return &conn.ChannelDescriptor{ ID: PexChannel, + MessageType: new(protop2p.PexMessage), Priority: 1, SendQueueCapacity: 10, RecvMessageCapacity: maxMsgSize, - RecvBufferCapacity: 32, - MaxSendBytes: 200, + RecvBufferCapacity: 128, } } -// ReactorV2 is a PEX reactor for the new P2P stack. The legacy reactor -// is Reactor. -// -// FIXME: Rename this when Reactor is removed, and consider moving to p2p/. -// // The peer exchange or PEX reactor supports the peer manager by sending // requests to other peers for addresses that can be given to the peer manager // and at the same time advertises addresses to peers that need more. @@ -70,7 +76,7 @@ func ChannelDescriptor() conn.ChannelDescriptor { // increasing the interval between each request. It tracks connected peers via // a linked list, sending a request to the node at the front of the list and // adding it to the back of the list once a response is received. -type ReactorV2 struct { +type Reactor struct { service.BaseService peerManager *p2p.PeerManager @@ -109,14 +115,14 @@ type ReactorV2 struct { } // NewReactor returns a reference to a new reactor. -func NewReactorV2( +func NewReactor( logger log.Logger, peerManager *p2p.PeerManager, pexCh *p2p.Channel, peerUpdates *p2p.PeerUpdates, -) *ReactorV2 { +) *Reactor { - r := &ReactorV2{ + r := &Reactor{ peerManager: peerManager, pexCh: pexCh, peerUpdates: peerUpdates, @@ -134,7 +140,7 @@ func NewReactorV2( // envelopes on each. In addition, it also listens for peer updates and handles // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. -func (r *ReactorV2) OnStart() error { +func (r *Reactor) OnStart(ctx context.Context) error { go r.processPexCh() go r.processPeerUpdates() return nil @@ -142,7 +148,7 @@ func (r *ReactorV2) OnStart() error { // OnStop stops the reactor by signaling to all spawned goroutines to exit and // blocking until they all exit. -func (r *ReactorV2) OnStop() { +func (r *Reactor) OnStop() { // Close closeCh to signal to all spawned goroutines to gracefully exit. All // p2p Channels should execute Close(). close(r.closeCh) @@ -156,7 +162,7 @@ func (r *ReactorV2) OnStop() { // processPexCh implements a blocking event loop where we listen for p2p // Envelope messages from the pexCh. -func (r *ReactorV2) processPexCh() { +func (r *Reactor) processPexCh() { defer r.pexCh.Close() for { @@ -186,7 +192,7 @@ func (r *ReactorV2) processPexCh() { // processPeerUpdates initiates a blocking process where we listen for and handle // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. -func (r *ReactorV2) processPeerUpdates() { +func (r *Reactor) processPeerUpdates() { defer r.peerUpdates.Close() for { @@ -202,20 +208,26 @@ func (r *ReactorV2) processPeerUpdates() { } // handlePexMessage handles envelopes sent from peers on the PexChannel. -func (r *ReactorV2) handlePexMessage(envelope p2p.Envelope) error { +func (r *Reactor) handlePexMessage(envelope p2p.Envelope) error { logger := r.Logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { - case *protop2p.PexRequest: - // Check if the peer hasn't sent a prior request too close to this one - // in time. + // check if the peer hasn't sent a prior request too close to this one + // in time if err := r.markPeerRequest(envelope.From); err != nil { return err } - // parse and send the legacy PEX addresses - pexAddresses := r.resolve(r.peerManager.Advertise(envelope.From, maxAddresses)) + // request peers from the peer manager and parse the NodeAddresses into + // URL strings + nodeAddresses := r.peerManager.Advertise(envelope.From, maxAddresses) + pexAddresses := make([]protop2p.PexAddress, len(nodeAddresses)) + for idx, addr := range nodeAddresses { + pexAddresses[idx] = protop2p.PexAddress{ + URL: addr.String(), + } + } r.pexCh.Out <- p2p.Envelope{ To: envelope.From, Message: &protop2p.PexResponse{Addresses: pexAddresses}, @@ -236,9 +248,7 @@ func (r *ReactorV2) handlePexMessage(envelope p2p.Envelope) error { } for _, pexAddress := range msg.Addresses { - // no protocol is prefixed so we assume the default (mconn) - peerAddress, err := p2p.ParseNodeAddress( - fmt.Sprintf("%s@%s:%d", pexAddress.ID, pexAddress.IP, pexAddress.Port)) + peerAddress, err := p2p.ParseNodeAddress(pexAddress.URL) if err != nil { continue } @@ -253,58 +263,6 @@ func (r *ReactorV2) handlePexMessage(envelope p2p.Envelope) error { r.totalPeers++ } - // V2 PEX MESSAGES - case *protop2p.PexRequestV2: - // check if the peer hasn't sent a prior request too close to this one - // in time - if err := r.markPeerRequest(envelope.From); err != nil { - return err - } - - // request peers from the peer manager and parse the NodeAddresses into - // URL strings - nodeAddresses := r.peerManager.Advertise(envelope.From, maxAddresses) - pexAddressesV2 := make([]protop2p.PexAddressV2, len(nodeAddresses)) - for idx, addr := range nodeAddresses { - pexAddressesV2[idx] = protop2p.PexAddressV2{ - URL: addr.String(), - } - } - r.pexCh.Out <- p2p.Envelope{ - To: envelope.From, - Message: &protop2p.PexResponseV2{Addresses: pexAddressesV2}, - } - - case *protop2p.PexResponseV2: - // check if the response matches a request that was made to that peer - if err := r.markPeerResponse(envelope.From); err != nil { - return err - } - - // check the size of the response - if len(msg.Addresses) > int(maxAddresses) { - return fmt.Errorf("peer sent too many addresses (max: %d, got: %d)", - maxAddresses, - len(msg.Addresses), - ) - } - - for _, pexAddress := range msg.Addresses { - peerAddress, err := p2p.ParseNodeAddress(pexAddress.URL) - if err != nil { - continue - } - added, err := r.peerManager.Add(peerAddress) - if err != nil { - logger.Error("failed to add V2 PEX address", "address", peerAddress, "err", err) - } - if added { - r.newPeers++ - logger.Debug("added V2 PEX address", "address", peerAddress) - } - r.totalPeers++ - } - default: return fmt.Errorf("received unknown message: %T", msg) } @@ -312,59 +270,10 @@ func (r *ReactorV2) handlePexMessage(envelope p2p.Envelope) error { return nil } -// resolve resolves a set of peer addresses into PEX addresses. -// -// FIXME: This is necessary because the current PEX protocol only supports -// IP/port pairs, while the P2P stack uses NodeAddress URLs. The PEX protocol -// should really use URLs too, to exchange DNS names instead of IPs and allow -// different transport protocols (e.g. QUIC and MemoryTransport). -// -// FIXME: We may want to cache and parallelize this, but for now we'll just rely -// on the operating system to cache it for us. -func (r *ReactorV2) resolve(addresses []p2p.NodeAddress) []protop2p.PexAddress { - limit := len(addresses) - pexAddresses := make([]protop2p.PexAddress, 0, limit) - - for _, address := range addresses { - ctx, cancel := context.WithTimeout(context.Background(), resolveTimeout) - endpoints, err := address.Resolve(ctx) - r.Logger.Debug("resolved node address", "endpoints", endpoints) - cancel() - - if err != nil { - r.Logger.Debug("failed to resolve address", "address", address, "err", err) - continue - } - - for _, endpoint := range endpoints { - r.Logger.Debug("checking endpint", "IP", endpoint.IP, "Port", endpoint.Port) - if len(pexAddresses) >= limit { - return pexAddresses - - } else if endpoint.IP != nil { - r.Logger.Debug("appending pex address") - // PEX currently only supports IP-networked transports (as - // opposed to e.g. p2p.MemoryTransport). - // - // FIXME: as the PEX address contains no information about the - // protocol, we jam this into the ID. We won't need to this once - // we support URLs - pexAddresses = append(pexAddresses, protop2p.PexAddress{ - ID: string(address.NodeID), - IP: endpoint.IP.String(), - Port: uint32(endpoint.Port), - }) - } - } - } - - return pexAddresses -} - // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *ReactorV2) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) @@ -391,7 +300,7 @@ func (r *ReactorV2) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (er // processPeerUpdate processes a PeerUpdate. For added peers, PeerStatusUp, we // send a request for addresses. -func (r *ReactorV2) processPeerUpdate(peerUpdate p2p.PeerUpdate) { +func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { r.Logger.Debug("received PEX peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) r.mtx.Lock() @@ -408,7 +317,7 @@ func (r *ReactorV2) processPeerUpdate(peerUpdate p2p.PeerUpdate) { } } -func (r *ReactorV2) waitUntilNextRequest() <-chan time.Time { +func (r *Reactor) waitUntilNextRequest() <-chan time.Time { return time.After(time.Until(r.nextRequestTime)) } @@ -416,7 +325,7 @@ func (r *ReactorV2) waitUntilNextRequest() <-chan time.Time { // peer a request for more peer addresses. The function then moves the // peer into the requestsSent bucket and calculates when the next request // time should be -func (r *ReactorV2) sendRequestForPeers() { +func (r *Reactor) sendRequestForPeers() { r.mtx.Lock() defer r.mtx.Unlock() if len(r.availablePeers) == 0 { @@ -433,17 +342,10 @@ func (r *ReactorV2) sendRequestForPeers() { break } - // The node accommodates for both pex systems - if r.isLegacyPeer(peerID) { - r.pexCh.Out <- p2p.Envelope{ - To: peerID, - Message: &protop2p.PexRequest{}, - } - } else { - r.pexCh.Out <- p2p.Envelope{ - To: peerID, - Message: &protop2p.PexRequestV2{}, - } + // send out the pex request + r.pexCh.Out <- p2p.Envelope{ + To: peerID, + Message: &protop2p.PexRequest{}, } // remove the peer from the abvailable peers list and mark it in the requestsSent map @@ -464,7 +366,7 @@ func (r *ReactorV2) sendRequestForPeers() { // new nodes will plummet to a very small number, meaning the interval expands // to its upper bound. // CONTRACT: Must use a write lock as nextRequestTime is updated -func (r *ReactorV2) calculateNextRequestTime() { +func (r *Reactor) calculateNextRequestTime() { // check if the peer store is full. If so then there is no need // to send peer requests too often if ratio := r.peerManager.PeerRatio(); ratio >= 0.95 { @@ -500,7 +402,7 @@ func (r *ReactorV2) calculateNextRequestTime() { r.nextRequestTime = time.Now().Add(baseTime * time.Duration(r.discoveryRatio)) } -func (r *ReactorV2) markPeerRequest(peer types.NodeID) error { +func (r *Reactor) markPeerRequest(peer types.NodeID) error { r.mtx.Lock() defer r.mtx.Unlock() if lastRequestTime, ok := r.lastReceivedRequests[peer]; ok { @@ -513,7 +415,7 @@ func (r *ReactorV2) markPeerRequest(peer types.NodeID) error { return nil } -func (r *ReactorV2) markPeerResponse(peer types.NodeID) error { +func (r *Reactor) markPeerResponse(peer types.NodeID) error { r.mtx.Lock() defer r.mtx.Unlock() // check if a request to this peer was sent @@ -527,14 +429,3 @@ func (r *ReactorV2) markPeerResponse(peer types.NodeID) error { r.availablePeers[peer] = struct{}{} return nil } - -// all addresses must use a MCONN protocol for the peer to be considered part of the -// legacy p2p pex system -func (r *ReactorV2) isLegacyPeer(peer types.NodeID) bool { - for _, addr := range r.peerManager.Addresses(peer) { - if addr.Protocol != p2p.MConnProtocol { - return false - } - } - return true -} diff --git a/internal/p2p/pex/reactor_test.go b/internal/p2p/pex/reactor_test.go index a5acb0d5e..63b182fc0 100644 --- a/internal/p2p/pex/reactor_test.go +++ b/internal/p2p/pex/reactor_test.go @@ -14,7 +14,7 @@ import ( "github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/internal/p2p/pex" "github.com/tendermint/tendermint/libs/log" - proto "github.com/tendermint/tendermint/proto/tendermint/p2p" + p2pproto "github.com/tendermint/tendermint/proto/tendermint/p2p" "github.com/tendermint/tendermint/types" ) @@ -27,36 +27,40 @@ const ( firstNode = 0 secondNode = 1 thirdNode = 2 - fourthNode = 3 ) func TestReactorBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // start a network with one mock reactor and one "real" reactor - testNet := setupNetwork(t, testOptions{ + testNet := setupNetwork(ctx, t, testOptions{ MockNodes: 1, TotalNodes: 2, }) testNet.connectAll(t) - testNet.start(t) + testNet.start(ctx, t) // assert that the mock node receives a request from the real node testNet.listenForRequest(t, secondNode, firstNode, shortWait) // assert that when a mock node sends a request it receives a response (and // the correct one) - testNet.sendRequest(t, firstNode, secondNode, true) - testNet.listenForResponse(t, secondNode, firstNode, shortWait, []proto.PexAddressV2(nil)) + testNet.sendRequest(t, firstNode, secondNode) + testNet.listenForResponse(t, secondNode, firstNode, shortWait, []p2pproto.PexAddress(nil)) } func TestReactorConnectFullNetwork(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ TotalNodes: 4, }) // make every node be only connected with one other node (it actually ends up // being two because of two way connections but oh well) testNet.connectN(t, 1) - testNet.start(t) + testNet.start(ctx, t) // assert that all nodes add each other in the network for idx := 0; idx < len(testNet.nodes); idx++ { @@ -65,23 +69,26 @@ func TestReactorConnectFullNetwork(t *testing.T) { } func TestReactorSendsRequestsTooOften(t *testing.T) { - r := setupSingle(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + r := setupSingle(ctx, t) badNode := newNodeID(t, "b") r.pexInCh <- p2p.Envelope{ From: badNode, - Message: &proto.PexRequestV2{}, + Message: &p2pproto.PexRequest{}, } resp := <-r.pexOutCh - msg, ok := resp.Message.(*proto.PexResponseV2) + msg, ok := resp.Message.(*p2pproto.PexResponse) require.True(t, ok) require.Empty(t, msg.Addresses) r.pexInCh <- p2p.Envelope{ From: badNode, - Message: &proto.PexRequestV2{}, + Message: &p2pproto.PexRequest{}, } peerErr := <-r.pexErrCh @@ -92,32 +99,38 @@ func TestReactorSendsRequestsTooOften(t *testing.T) { } func TestReactorSendsResponseWithoutRequest(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ MockNodes: 1, TotalNodes: 3, }) testNet.connectAll(t) - testNet.start(t) + testNet.start(ctx, t) // firstNode sends the secondNode an unrequested response // NOTE: secondNode will send a request by default during startup so we send // two responses to counter that. - testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode}, true) - testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode}, true) + testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode}) + testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode}) // secondNode should evict the firstNode testNet.listenForPeerUpdate(t, secondNode, firstNode, p2p.PeerStatusDown, shortWait) } func TestReactorNeverSendsTooManyPeers(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ MockNodes: 1, TotalNodes: 2, }) testNet.connectAll(t) - testNet.start(t) + testNet.start(ctx, t) - testNet.addNodes(t, 110) + testNet.addNodes(ctx, t, 110) nodes := make([]int, 110) for i := 0; i < len(nodes); i++ { nodes[i] = i + 2 @@ -130,16 +143,19 @@ func TestReactorNeverSendsTooManyPeers(t *testing.T) { } func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) { - r := setupSingle(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + r := setupSingle(ctx, t) peer := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)} added, err := r.manager.Add(peer) require.NoError(t, err) require.True(t, added) - addresses := make([]proto.PexAddressV2, 101) + addresses := make([]p2pproto.PexAddress, 101) for i := 0; i < len(addresses); i++ { nodeAddress := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)} - addresses[i] = proto.PexAddressV2{ + addresses[i] = p2pproto.PexAddress{ URL: nodeAddress.String(), } } @@ -152,12 +168,12 @@ func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) { select { // wait for a request and then send a response with too many addresses case req := <-r.pexOutCh: - if _, ok := req.Message.(*proto.PexRequestV2); !ok { + if _, ok := req.Message.(*p2pproto.PexRequest); !ok { t.Fatal("expected v2 pex request") } r.pexInCh <- p2p.Envelope{ From: peer.NodeID, - Message: &proto.PexResponseV2{ + Message: &p2pproto.PexResponse{ Addresses: addresses, }, } @@ -174,14 +190,17 @@ func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) { } func TestReactorSmallPeerStoreInALargeNetwork(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ TotalNodes: 8, MaxPeers: 4, MaxConnected: 3, BufferSize: 8, }) testNet.connectN(t, 1) - testNet.start(t) + testNet.start(ctx, t) // test that all nodes reach full capacity for _, nodeID := range testNet.nodes { @@ -193,14 +212,17 @@ func TestReactorSmallPeerStoreInALargeNetwork(t *testing.T) { } func TestReactorLargePeerStoreInASmallNetwork(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ TotalNodes: 3, MaxPeers: 25, MaxConnected: 25, BufferSize: 5, }) testNet.connectN(t, 1) - testNet.start(t) + testNet.start(ctx, t) // assert that all nodes add each other in the network for idx := 0; idx < len(testNet.nodes); idx++ { @@ -209,12 +231,15 @@ func TestReactorLargePeerStoreInASmallNetwork(t *testing.T) { } func TestReactorWithNetworkGrowth(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ TotalNodes: 5, BufferSize: 5, }) testNet.connectAll(t) - testNet.start(t) + testNet.start(ctx, t) // assert that all nodes add each other in the network for idx := 0; idx < len(testNet.nodes); idx++ { @@ -222,10 +247,10 @@ func TestReactorWithNetworkGrowth(t *testing.T) { } // now we inject 10 more nodes - testNet.addNodes(t, 10) + testNet.addNodes(ctx, t, 10) for i := 5; i < testNet.total; i++ { node := testNet.nodes[i] - require.NoError(t, testNet.reactors[node].Start()) + require.NoError(t, testNet.reactors[node].Start(ctx)) require.True(t, testNet.reactors[node].IsRunning()) // we connect all new nodes to a single entry point and check that the // node can distribute the addresses to all the others @@ -239,40 +264,8 @@ func TestReactorWithNetworkGrowth(t *testing.T) { } } -func TestReactorIntegrationWithLegacyHandleRequest(t *testing.T) { - testNet := setupNetwork(t, testOptions{ - MockNodes: 1, - TotalNodes: 3, - }) - testNet.connectAll(t) - testNet.start(t) - t.Log(testNet.nodes) - - // mock node sends a V1 Pex message to the second node - testNet.sendRequest(t, firstNode, secondNode, false) - addrs := testNet.getAddressesFor(t, []int{thirdNode}) - testNet.listenForLegacyResponse(t, secondNode, firstNode, shortWait, addrs) -} - -func TestReactorIntegrationWithLegacyHandleResponse(t *testing.T) { - testNet := setupNetwork(t, testOptions{ - MockNodes: 1, - TotalNodes: 4, - BufferSize: 4, - }) - testNet.connectPeers(t, firstNode, secondNode) - testNet.connectPeers(t, firstNode, thirdNode) - testNet.connectPeers(t, firstNode, fourthNode) - testNet.start(t) - - testNet.listenForRequest(t, secondNode, firstNode, shortWait) - // send a v1 response instead - testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode, fourthNode}, false) - testNet.requireNumberOfPeers(t, secondNode, len(testNet.nodes)-1, shortWait) -} - type singleTestReactor struct { - reactor *pex.ReactorV2 + reactor *pex.Reactor pexInCh chan p2p.Envelope pexOutCh chan p2p.Envelope pexErrCh chan p2p.PeerError @@ -281,7 +274,7 @@ type singleTestReactor struct { manager *p2p.PeerManager } -func setupSingle(t *testing.T) *singleTestReactor { +func setupSingle(ctx context.Context, t *testing.T) *singleTestReactor { t.Helper() nodeID := newNodeID(t, "a") chBuf := 2 @@ -290,7 +283,7 @@ func setupSingle(t *testing.T) *singleTestReactor { pexErrCh := make(chan p2p.PeerError, chBuf) pexCh := p2p.NewChannel( p2p.ChannelID(pex.PexChannel), - new(proto.PexMessage), + new(p2pproto.PexMessage), pexInCh, pexOutCh, pexErrCh, @@ -301,15 +294,12 @@ func setupSingle(t *testing.T) *singleTestReactor { peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - reactor := pex.NewReactorV2(log.TestingLogger(), peerManager, pexCh, peerUpdates) - require.NoError(t, reactor.Start()) + reactor := pex.NewReactor(log.TestingLogger(), peerManager, pexCh, peerUpdates) + require.NoError(t, reactor.Start(ctx)) t.Cleanup(func() { - err := reactor.Stop() - if err != nil { - t.Fatal(err) - } pexCh.Close() peerUpdates.Close() + reactor.Wait() }) return &singleTestReactor{ @@ -327,7 +317,7 @@ type reactorTestSuite struct { network *p2ptest.Network logger log.Logger - reactors map[types.NodeID]*pex.ReactorV2 + reactors map[types.NodeID]*pex.Reactor pexChannels map[types.NodeID]*p2p.Channel peerChans map[types.NodeID]chan p2p.PeerUpdate @@ -349,7 +339,7 @@ type testOptions struct { // setup setups a test suite with a network of nodes. Mocknodes represent the // hollow nodes that the test can listen and send on -func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { +func setupNetwork(ctx context.Context, t *testing.T, opts testOptions) *reactorTestSuite { t.Helper() require.Greater(t, opts.TotalNodes, opts.MockNodes) @@ -369,8 +359,8 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { rts := &reactorTestSuite{ logger: log.TestingLogger().With("testCase", t.Name()), - network: p2ptest.MakeNetwork(t, networkOpts), - reactors: make(map[types.NodeID]*pex.ReactorV2, realNodes), + network: p2ptest.MakeNetwork(ctx, t, networkOpts), + reactors: make(map[types.NodeID]*pex.Reactor, realNodes), pexChannels: make(map[types.NodeID]*p2p.Channel, opts.TotalNodes), peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, opts.TotalNodes), peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, opts.TotalNodes), @@ -380,9 +370,7 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { // NOTE: we don't assert that the channels get drained after stopping the // reactor - rts.pexChannels = rts.network.MakeChannelsNoCleanup( - t, pex.ChannelDescriptor(), new(proto.PexMessage), chBuf, - ) + rts.pexChannels = rts.network.MakeChannelsNoCleanup(t, pex.ChannelDescriptor()) idx := 0 for nodeID := range rts.network.Nodes { @@ -394,7 +382,7 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { if idx < opts.MockNodes { rts.mocks = append(rts.mocks, nodeID) } else { - rts.reactors[nodeID] = pex.NewReactorV2( + rts.reactors[nodeID] = pex.NewReactor( rts.logger.With("nodeID", nodeID), rts.network.Nodes[nodeID].PeerManager, rts.pexChannels[nodeID], @@ -411,7 +399,7 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { t.Cleanup(func() { for nodeID, reactor := range rts.reactors { if reactor.IsRunning() { - require.NoError(t, reactor.Stop()) + reactor.Wait() require.False(t, reactor.IsRunning()) } rts.pexChannels[nodeID].Close() @@ -427,32 +415,30 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { } // starts up the pex reactors for each node -func (r *reactorTestSuite) start(t *testing.T) { +func (r *reactorTestSuite) start(ctx context.Context, t *testing.T) { t.Helper() for _, reactor := range r.reactors { - require.NoError(t, reactor.Start()) + require.NoError(t, reactor.Start(ctx)) require.True(t, reactor.IsRunning()) } } -func (r *reactorTestSuite) addNodes(t *testing.T, nodes int) { +func (r *reactorTestSuite) addNodes(ctx context.Context, t *testing.T, nodes int) { t.Helper() for i := 0; i < nodes; i++ { - node := r.network.MakeNode(t, p2ptest.NodeOptions{ + node := r.network.MakeNode(ctx, t, p2ptest.NodeOptions{ MaxPeers: r.opts.MaxPeers, MaxConnected: r.opts.MaxConnected, }) r.network.Nodes[node.NodeID] = node nodeID := node.NodeID - r.pexChannels[nodeID] = node.MakeChannelNoCleanup( - t, pex.ChannelDescriptor(), new(proto.PexMessage), r.opts.BufferSize, - ) + r.pexChannels[nodeID] = node.MakeChannelNoCleanup(t, pex.ChannelDescriptor()) r.peerChans[nodeID] = make(chan p2p.PeerUpdate, r.opts.BufferSize) r.peerUpdates[nodeID] = p2p.NewPeerUpdates(r.peerChans[nodeID], r.opts.BufferSize) r.network.Nodes[nodeID].PeerManager.Register(r.peerUpdates[nodeID]) - r.reactors[nodeID] = pex.NewReactorV2( + r.reactors[nodeID] = pex.NewReactor( r.logger.With("nodeID", nodeID), r.network.Nodes[nodeID].PeerManager, r.pexChannels[nodeID], @@ -488,11 +474,11 @@ func (r *reactorTestSuite) listenForRequest(t *testing.T, fromNode, toNode int, r.logger.Info("Listening for request", "from", fromNode, "to", toNode) to, from := r.checkNodePair(t, toNode, fromNode) conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*proto.PexRequestV2) + _, ok := msg.Message.(*p2pproto.PexRequest) return ok && msg.From == from } assertion := func(t *testing.T, msg p2p.Envelope) bool { - require.Equal(t, &proto.PexRequestV2{}, msg.Message) + require.Equal(t, &p2pproto.PexRequest{}, msg.Message) return true } r.listenFor(t, to, conditional, assertion, waitPeriod) @@ -507,11 +493,11 @@ func (r *reactorTestSuite) pingAndlistenForNAddresses( r.logger.Info("Listening for addresses", "from", fromNode, "to", toNode) to, from := r.checkNodePair(t, toNode, fromNode) conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*proto.PexResponseV2) + _, ok := msg.Message.(*p2pproto.PexResponse) return ok && msg.From == from } assertion := func(t *testing.T, msg p2p.Envelope) bool { - m, ok := msg.Message.(*proto.PexResponseV2) + m, ok := msg.Message.(*p2pproto.PexResponse) if !ok { require.Fail(t, "expected pex response v2") return true @@ -523,10 +509,10 @@ func (r *reactorTestSuite) pingAndlistenForNAddresses( // if we didn't get the right length, we wait and send the // request again time.Sleep(300 * time.Millisecond) - r.sendRequest(t, toNode, fromNode, true) + r.sendRequest(t, toNode, fromNode) return false } - r.sendRequest(t, toNode, fromNode, true) + r.sendRequest(t, toNode, fromNode) r.listenFor(t, to, conditional, assertion, waitPeriod) } @@ -534,36 +520,17 @@ func (r *reactorTestSuite) listenForResponse( t *testing.T, fromNode, toNode int, waitPeriod time.Duration, - addresses []proto.PexAddressV2, + addresses []p2pproto.PexAddress, ) { r.logger.Info("Listening for response", "from", fromNode, "to", toNode) to, from := r.checkNodePair(t, toNode, fromNode) conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*proto.PexResponseV2) + _, ok := msg.Message.(*p2pproto.PexResponse) r.logger.Info("message", msg, "ok", ok) return ok && msg.From == from } assertion := func(t *testing.T, msg p2p.Envelope) bool { - require.Equal(t, &proto.PexResponseV2{Addresses: addresses}, msg.Message) - return true - } - r.listenFor(t, to, conditional, assertion, waitPeriod) -} - -func (r *reactorTestSuite) listenForLegacyResponse( - t *testing.T, - fromNode, toNode int, - waitPeriod time.Duration, - addresses []proto.PexAddress, -) { - r.logger.Info("Listening for response", "from", fromNode, "to", toNode) - to, from := r.checkNodePair(t, toNode, fromNode) - conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*proto.PexResponse) - return ok && msg.From == from - } - assertion := func(t *testing.T, msg p2p.Envelope) bool { - require.Equal(t, &proto.PexResponse{Addresses: addresses}, msg.Message) + require.Equal(t, &p2pproto.PexResponse{Addresses: addresses}, msg.Message) return true } r.listenFor(t, to, conditional, assertion, waitPeriod) @@ -595,46 +562,22 @@ func (r *reactorTestSuite) listenForPeerUpdate( } } -func (r *reactorTestSuite) getV2AddressesFor(nodes []int) []proto.PexAddressV2 { - addresses := make([]proto.PexAddressV2, len(nodes)) +func (r *reactorTestSuite) getAddressesFor(nodes []int) []p2pproto.PexAddress { + addresses := make([]p2pproto.PexAddress, len(nodes)) for idx, node := range nodes { nodeID := r.nodes[node] - addresses[idx] = proto.PexAddressV2{ + addresses[idx] = p2pproto.PexAddress{ URL: r.network.Nodes[nodeID].NodeAddress.String(), } } return addresses } -func (r *reactorTestSuite) getAddressesFor(t *testing.T, nodes []int) []proto.PexAddress { - addresses := make([]proto.PexAddress, len(nodes)) - for idx, node := range nodes { - nodeID := r.nodes[node] - nodeAddrs := r.network.Nodes[nodeID].NodeAddress - endpoints, err := nodeAddrs.Resolve(context.Background()) - require.NoError(t, err) - require.Len(t, endpoints, 1) - addresses[idx] = proto.PexAddress{ - ID: string(nodeAddrs.NodeID), - IP: endpoints[0].IP.String(), - Port: uint32(endpoints[0].Port), - } - } - return addresses -} - -func (r *reactorTestSuite) sendRequest(t *testing.T, fromNode, toNode int, v2 bool) { +func (r *reactorTestSuite) sendRequest(t *testing.T, fromNode, toNode int) { to, from := r.checkNodePair(t, toNode, fromNode) - if v2 { - r.pexChannels[from].Out <- p2p.Envelope{ - To: to, - Message: &proto.PexRequestV2{}, - } - } else { - r.pexChannels[from].Out <- p2p.Envelope{ - To: to, - Message: &proto.PexRequest{}, - } + r.pexChannels[from].Out <- p2p.Envelope{ + To: to, + Message: &p2pproto.PexRequest{}, } } @@ -642,25 +585,14 @@ func (r *reactorTestSuite) sendResponse( t *testing.T, fromNode, toNode int, withNodes []int, - v2 bool, ) { from, to := r.checkNodePair(t, fromNode, toNode) - if v2 { - addrs := r.getV2AddressesFor(withNodes) - r.pexChannels[from].Out <- p2p.Envelope{ - To: to, - Message: &proto.PexResponseV2{ - Addresses: addrs, - }, - } - } else { - addrs := r.getAddressesFor(t, withNodes) - r.pexChannels[from].Out <- p2p.Envelope{ - To: to, - Message: &proto.PexResponse{ - Addresses: addrs, - }, - } + addrs := r.getAddressesFor(withNodes) + r.pexChannels[from].Out <- p2p.Envelope{ + To: to, + Message: &p2pproto.PexResponse{ + Addresses: addrs, + }, } } @@ -763,32 +695,6 @@ func (r *reactorTestSuite) connectPeers(t *testing.T, sourceNode, targetNode int require.True(t, added) } -// nolint: unused -func (r *reactorTestSuite) pexAddresses(t *testing.T, nodeIndices []int) []proto.PexAddress { - var addresses []proto.PexAddress - for _, i := range nodeIndices { - if i < len(r.nodes) { - require.Fail(t, "index for pex address is greater than number of nodes") - } - nodeAddrs := r.network.Nodes[r.nodes[i]].NodeAddress - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - endpoints, err := nodeAddrs.Resolve(ctx) - cancel() - require.NoError(t, err) - for _, endpoint := range endpoints { - if endpoint.IP != nil { - addresses = append(addresses, proto.PexAddress{ - ID: string(nodeAddrs.NodeID), - IP: endpoint.IP.String(), - Port: uint32(endpoint.Port), - }) - } - } - - } - return addresses -} - func (r *reactorTestSuite) checkNodePair(t *testing.T, first, second int) (types.NodeID, types.NodeID) { require.NotEqual(t, first, second) require.Less(t, first, r.total) diff --git a/internal/p2p/pqueue.go b/internal/p2p/pqueue.go index e4560c7bd..11cdbd130 100644 --- a/internal/p2p/pqueue.go +++ b/internal/p2p/pqueue.go @@ -71,7 +71,7 @@ type pqScheduler struct { size uint sizes map[uint]uint // cumulative priority sizes pq *priorityQueue - chDescs []ChannelDescriptor + chDescs []*ChannelDescriptor capacity uint chPriorities map[ChannelID]uint @@ -84,12 +84,12 @@ type pqScheduler struct { func newPQScheduler( logger log.Logger, m *Metrics, - chDescs []ChannelDescriptor, + chDescs []*ChannelDescriptor, enqueueBuf, dequeueBuf, capacity uint, ) *pqScheduler { // copy each ChannelDescriptor and sort them by ascending channel priority - chDescsCopy := make([]ChannelDescriptor, len(chDescs)) + chDescsCopy := make([]*ChannelDescriptor, len(chDescs)) copy(chDescsCopy, chDescs) sort.Slice(chDescsCopy, func(i, j int) bool { return chDescsCopy[i].Priority < chDescsCopy[j].Priority }) @@ -99,7 +99,7 @@ func newPQScheduler( ) for _, chDesc := range chDescsCopy { - chID := ChannelID(chDesc.ID) + chID := chDesc.ID chPriorities[chID] = uint(chDesc.Priority) sizes[uint(chDesc.Priority)] = 0 } @@ -167,13 +167,12 @@ func (s *pqScheduler) process() { timestamp: time.Now().UTC(), } - s.metrics.PeerPendingSendBytes.With("peer_id", string(pqEnv.envelope.To)).Add(float64(pqEnv.size)) - // enqueue // Check if we have sufficient capacity to simply enqueue the incoming // Envelope. if s.size+pqEnv.size <= s.capacity { + s.metrics.PeerPendingSendBytes.With("peer_id", string(pqEnv.envelope.To)).Add(float64(pqEnv.size)) // enqueue the incoming Envelope s.push(pqEnv) } else { @@ -213,6 +212,8 @@ func (s *pqScheduler) process() { "capacity", s.capacity, ) + s.metrics.PeerPendingSendBytes.With("peer_id", string(pqEnvTmp.envelope.To)).Add(float64(-pqEnvTmp.size)) + // dequeue/drop from the priority queue heap.Remove(s.pq, pqEnvTmp.index) @@ -256,7 +257,10 @@ func (s *pqScheduler) process() { s.metrics.PeerSendBytesTotal.With( "chID", chIDStr, - "peer_id", string(pqEnv.envelope.To)).Add(float64(pqEnv.size)) + "peer_id", string(pqEnv.envelope.To), + "message_type", s.metrics.ValueToMetricLabel(pqEnv.envelope.Message)).Add(float64(pqEnv.size)) + s.metrics.PeerPendingSendBytes.With( + "peer_id", string(pqEnv.envelope.To)).Add(float64(-pqEnv.size)) select { case s.dequeueCh <- pqEnv.envelope: case <-s.closer.Done(): diff --git a/internal/p2p/pqueue_test.go b/internal/p2p/pqueue_test.go index ddb7addbe..ffa7e39a8 100644 --- a/internal/p2p/pqueue_test.go +++ b/internal/p2p/pqueue_test.go @@ -4,13 +4,16 @@ import ( "testing" "time" + gogotypes "github.com/gogo/protobuf/types" "github.com/tendermint/tendermint/libs/log" ) +type testMessage = gogotypes.StringValue + func TestCloseWhileDequeueFull(t *testing.T) { enqueueLength := 5 - chDescs := []ChannelDescriptor{ - {ID: 0x01, Priority: 1, MaxSendBytes: 4}, + chDescs := []*ChannelDescriptor{ + {ID: 0x01, Priority: 1}, } pqueue := newPQScheduler(log.NewNopLogger(), NopMetrics(), chDescs, uint(enqueueLength), 1, 120) diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 1171566d1..29646e327 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -21,9 +21,6 @@ import ( const queueBufferDefault = 32 -// ChannelID is an arbitrary channel ID. -type ChannelID uint16 - // Envelope contains a message with sender/receiver routing info. type Envelope struct { From types.NodeID // sender (empty if outbound) @@ -131,8 +128,8 @@ type RouterOptions struct { // no timeout. HandshakeTimeout time.Duration - // QueueType must be "wdrr" (Weighed Deficit Round Robin), "priority", or - // "fifo". Defaults to "fifo". + // QueueType must be, "priority", or "fifo". Defaults to + // "fifo". QueueType string // MaxIncomingConnectionAttempts rate limits the number of incoming connection @@ -174,7 +171,6 @@ type RouterOptions struct { const ( queueTypeFifo = "fifo" queueTypePriority = "priority" - queueTypeWDRR = "wdrr" ) // Validate validates router options. @@ -182,8 +178,8 @@ func (o *RouterOptions) Validate() error { switch o.QueueType { case "": o.QueueType = queueTypeFifo - case queueTypeFifo, queueTypeWDRR, queueTypePriority: - // passI me + case queueTypeFifo, queueTypePriority: + // pass default: return fmt.Errorf("queue type %q is not supported", o.QueueType) } @@ -251,8 +247,9 @@ type Router struct { nodeInfo types.NodeInfo privKey crypto.PrivKey peerManager *PeerManager - chDescs []ChannelDescriptor + chDescs []*ChannelDescriptor transports []Transport + endpoints []Endpoint connTracker connectionTracker protocolTransports map[Protocol]Transport stopCh chan struct{} // signals Router shutdown @@ -281,6 +278,7 @@ func NewRouter( privKey crypto.PrivKey, peerManager *PeerManager, transports []Transport, + endpoints []Endpoint, options RouterOptions, ) (*Router, error) { @@ -297,8 +295,9 @@ func NewRouter( options.MaxIncomingConnectionAttempts, options.IncomingConnectionWindow, ), - chDescs: make([]ChannelDescriptor, 0), + chDescs: make([]*ChannelDescriptor, 0), transports: transports, + endpoints: endpoints, protocolTransports: map[Protocol]Transport{}, peerManager: peerManager, options: options, @@ -345,17 +344,6 @@ func (r *Router) createQueueFactory() (func(int) queue, error) { return q }, nil - case queueTypeWDRR: - return func(size int) queue { - if size%2 != 0 { - size++ - } - - q := newWDRRScheduler(r.logger, r.metrics, r.chDescs, uint(size)/2, uint(size)/2, defaultCapacity) - q.start() - return q - }, nil - default: return nil, fmt.Errorf("cannot construct queue of type %q", r.options.QueueType) } @@ -367,19 +355,21 @@ func (r *Router) createQueueFactory() (func(int) queue, error) { // implement Wrapper to automatically (un)wrap multiple message types in a // wrapper message. The caller may provide a size to make the channel buffered, // which internally makes the inbound, outbound, and error channel buffered. -func (r *Router) OpenChannel(chDesc ChannelDescriptor, messageType proto.Message, size int) (*Channel, error) { +func (r *Router) OpenChannel(chDesc *ChannelDescriptor) (*Channel, error) { r.channelMtx.Lock() defer r.channelMtx.Unlock() - id := ChannelID(chDesc.ID) + id := chDesc.ID if _, ok := r.channelQueues[id]; ok { return nil, fmt.Errorf("channel %v already exists", id) } r.chDescs = append(r.chDescs, chDesc) - queue := r.queueFactory(size) - outCh := make(chan Envelope, size) - errCh := make(chan PeerError, size) + messageType := chDesc.MessageType + + queue := r.queueFactory(chDesc.RecvBufferCapacity) + outCh := make(chan Envelope, chDesc.RecvBufferCapacity) + errCh := make(chan PeerError, chDesc.RecvBufferCapacity) channel := NewChannel(id, messageType, queue.dequeue(), outCh, errCh) var wrapper Wrapper @@ -393,6 +383,10 @@ func (r *Router) OpenChannel(chDesc ChannelDescriptor, messageType proto.Message // add the channel to the nodeInfo if it's not already there. r.nodeInfo.AddChannel(uint16(chDesc.ID)) + for _, t := range r.transports { + t.AddChannelDescriptors([]*ChannelDescriptor{chDesc}) + } + go func() { defer func() { r.channelMtx.Lock() @@ -543,8 +537,15 @@ func (r *Router) filterPeersID(ctx context.Context, id types.NodeID) error { func (r *Router) dialSleep(ctx context.Context) { if r.options.DialSleep == nil { + const ( + maxDialerInterval = 3000 + minDialerInterval = 250 + ) + // nolint:gosec // G404: Use of weak random number generator - timer := time.NewTimer(time.Duration(rand.Int63n(dialRandomizerIntervalMilliseconds)) * time.Millisecond) + dur := time.Duration(rand.Int63n(maxDialerInterval-minDialerInterval+1) + minDialerInterval) + + timer := time.NewTimer(dur * time.Millisecond) defer timer.Stop() select { @@ -620,7 +621,7 @@ func (r *Router) openConnection(ctx context.Context, conn Connection) { // The Router should do the handshake and have a final ack/fail // message to make sure both ends have accepted the connection, such // that it can be coordinated with the peer manager. - peerInfo, _, err := r.handshakePeer(ctx, conn, "") + peerInfo, err := r.handshakePeer(ctx, conn, "") switch { case errors.Is(err, context.Canceled): return @@ -714,7 +715,7 @@ func (r *Router) connectPeer(ctx context.Context, address NodeAddress) { return } - peerInfo, _, err := r.handshakePeer(ctx, conn, address.NodeID) + peerInfo, err := r.handshakePeer(ctx, conn, address.NodeID) switch { case errors.Is(err, context.Canceled): conn.Close() @@ -809,7 +810,7 @@ func (r *Router) handshakePeer( ctx context.Context, conn Connection, expectID types.NodeID, -) (types.NodeInfo, crypto.PubKey, error) { +) (types.NodeInfo, error) { if r.options.HandshakeTimeout > 0 { var cancel context.CancelFunc @@ -819,27 +820,27 @@ func (r *Router) handshakePeer( peerInfo, peerKey, err := conn.Handshake(ctx, r.nodeInfo, r.privKey) if err != nil { - return peerInfo, peerKey, err + return peerInfo, err } if err = peerInfo.Validate(); err != nil { - return peerInfo, peerKey, fmt.Errorf("invalid handshake NodeInfo: %w", err) + return peerInfo, fmt.Errorf("invalid handshake NodeInfo: %w", err) } if types.NodeIDFromPubKey(peerKey) != peerInfo.NodeID { - return peerInfo, peerKey, fmt.Errorf("peer's public key did not match its node ID %q (expected %q)", + return peerInfo, fmt.Errorf("peer's public key did not match its node ID %q (expected %q)", peerInfo.NodeID, types.NodeIDFromPubKey(peerKey)) } if expectID != "" && expectID != peerInfo.NodeID { - return peerInfo, peerKey, fmt.Errorf("expected to connect with peer %q, got %q", + return peerInfo, fmt.Errorf("expected to connect with peer %q, got %q", expectID, peerInfo.NodeID) } if err := r.nodeInfo.CompatibleWith(peerInfo); err != nil { - return peerInfo, peerKey, ErrRejected{ + return peerInfo, ErrRejected{ err: err, id: peerInfo.ID(), isIncompatible: true, } } - return peerInfo, peerKey, nil + return peerInfo, nil } func (r *Router) runWithPeerMutex(fn func() error) error { @@ -938,7 +939,8 @@ func (r *Router) receivePeer(peerID types.NodeID, conn Connection) error { case queue.enqueue() <- Envelope{From: peerID, Message: msg}: r.metrics.PeerReceiveBytesTotal.With( "chID", fmt.Sprint(chID), - "peer_id", string(peerID)).Add(float64(proto.Size(msg))) + "peer_id", string(peerID), + "message_type", r.metrics.ValueToMetricLabel(msg)).Add(float64(proto.Size(msg))) r.metrics.RouterChannelQueueSend.Observe(time.Since(start).Seconds()) r.logger.Debug("received message", "peer", peerID, "message", msg) @@ -970,8 +972,7 @@ func (r *Router) sendPeer(peerID types.NodeID, conn Connection, peerQueue queue) continue } - _, err = conn.SendMessage(envelope.channelID, bz) - if err != nil { + if err = conn.SendMessage(envelope.channelID, bz); err != nil { return err } @@ -1022,14 +1023,21 @@ func (r *Router) NodeInfo() types.NodeInfo { } // OnStart implements service.Service. -func (r *Router) OnStart() error { - netAddr, _ := r.nodeInfo.NetAddress() +func (r *Router) OnStart(ctx context.Context) error { + for _, transport := range r.transports { + for _, endpoint := range r.endpoints { + if err := transport.Listen(endpoint); err != nil { + return err + } + } + } + r.Logger.Info( "starting router", "node_id", r.nodeInfo.NodeID, "channels", r.nodeInfo.Channels, "listen_addr", r.nodeInfo.ListenAddr, - "net_addr", netAddr, + "transports", len(r.transports), ) go r.dialPeers() diff --git a/internal/p2p/router_init_test.go b/internal/p2p/router_init_test.go index 3622c0cc1..c8bef696a 100644 --- a/internal/p2p/router_init_test.go +++ b/internal/p2p/router_init_test.go @@ -18,37 +18,29 @@ func TestRouter_ConstructQueueFactory(t *testing.T) { t.Run("Default", func(t *testing.T) { require.Zero(t, os.Getenv("TM_P2P_QUEUE")) opts := RouterOptions{} - r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) + r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, nil, opts) require.NoError(t, err) _, ok := r.queueFactory(1).(*fifoQueue) require.True(t, ok) }) t.Run("Fifo", func(t *testing.T) { opts := RouterOptions{QueueType: queueTypeFifo} - r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) + r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, nil, opts) require.NoError(t, err) _, ok := r.queueFactory(1).(*fifoQueue) require.True(t, ok) }) t.Run("Priority", func(t *testing.T) { opts := RouterOptions{QueueType: queueTypePriority} - r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) + r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, nil, opts) require.NoError(t, err) q, ok := r.queueFactory(1).(*pqScheduler) require.True(t, ok) defer q.close() }) - t.Run("WDRR", func(t *testing.T) { - opts := RouterOptions{QueueType: queueTypeWDRR} - r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) - require.NoError(t, err) - q, ok := r.queueFactory(1).(*wdrrScheduler) - require.True(t, ok) - defer q.close() - }) t.Run("NonExistant", func(t *testing.T) { opts := RouterOptions{QueueType: "fast"} - _, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) + _, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, nil, opts) require.Error(t, err) require.Contains(t, err.Error(), "fast") }) diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index 436e3f004..c77e9e44d 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -44,13 +44,16 @@ func echoReactor(channel *p2p.Channel) { } func TestRouter_Network(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + t.Cleanup(leaktest.Check(t)) // Create a test network and open a channel where all peers run echoReactor. - network := p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: 8}) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 8}) local := network.RandomNode() peers := network.Peers(local.NodeID) - channels := network.MakeChannels(t, chDesc, &p2ptest.Message{}, 0) + channels := network.MakeChannels(t, chDesc) network.Start(t) @@ -109,35 +112,38 @@ func TestRouter_Channel_Basic(t *testing.T) { selfKey, peerManager, nil, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) - t.Cleanup(func() { - require.NoError(t, router.Stop()) - }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + require.NoError(t, router.Start(ctx)) + t.Cleanup(router.Wait) // Opening a channel should work. - channel, err := router.OpenChannel(chDesc, &p2ptest.Message{}, 0) + channel, err := router.OpenChannel(chDesc) require.NoError(t, err) - require.Contains(t, router.NodeInfo().Channels, chDesc.ID) + require.Contains(t, router.NodeInfo().Channels, byte(chDesc.ID)) // Opening the same channel again should fail. - _, err = router.OpenChannel(chDesc, &p2ptest.Message{}, 0) + _, err = router.OpenChannel(chDesc) require.Error(t, err) // Opening a different channel should work. - chDesc2 := p2p.ChannelDescriptor{ID: byte(2)} - _, err = router.OpenChannel(chDesc2, &p2ptest.Message{}, 0) + chDesc2 := &p2p.ChannelDescriptor{ID: 2, MessageType: &p2ptest.Message{}} + _, err = router.OpenChannel(chDesc2) + require.NoError(t, err) - require.Contains(t, router.NodeInfo().Channels, chDesc2.ID) + require.Contains(t, router.NodeInfo().Channels, byte(chDesc2.ID)) // Closing the channel, then opening it again should be fine. channel.Close() time.Sleep(100 * time.Millisecond) // yes yes, but Close() is async... - channel, err = router.OpenChannel(chDesc, &p2ptest.Message{}, 0) + channel, err = router.OpenChannel(chDesc) require.NoError(t, err) // We should be able to send on the channel, even though there are no peers. @@ -156,16 +162,19 @@ func TestRouter_Channel_Basic(t *testing.T) { // Channel tests are hairy to mock, so we use an in-memory network instead. func TestRouter_Channel_SendReceive(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + t.Cleanup(leaktest.Check(t)) // Create a test network and open a channel on all nodes. - network := p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: 3}) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 3}) ids := network.NodeIDs() aID, bID, cID := ids[0], ids[1], ids[2] - channels := network.MakeChannels(t, chDesc, &p2ptest.Message{}, 0) + channels := network.MakeChannels(t, chDesc) a, b, c := channels[aID], channels[bID], channels[cID] - otherChannels := network.MakeChannels(t, p2ptest.MakeChannelDesc(9), &p2ptest.Message{}, 0) + otherChannels := network.MakeChannels(t, p2ptest.MakeChannelDesc(9)) network.Start(t) @@ -217,12 +226,15 @@ func TestRouter_Channel_SendReceive(t *testing.T) { func TestRouter_Channel_Broadcast(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Create a test network and open a channel on all nodes. - network := p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: 4}) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 4}) ids := network.NodeIDs() aID, bID, cID, dID := ids[0], ids[1], ids[2], ids[3] - channels := network.MakeChannels(t, chDesc, &p2ptest.Message{}, 0) + channels := network.MakeChannels(t, chDesc) a, b, c, d := channels[aID], channels[bID], channels[cID], channels[dID] network.Start(t) @@ -245,12 +257,23 @@ func TestRouter_Channel_Broadcast(t *testing.T) { func TestRouter_Channel_Wrapper(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Create a test network and open a channel on all nodes. - network := p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: 2}) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 2}) ids := network.NodeIDs() aID, bID := ids[0], ids[1] - channels := network.MakeChannels(t, chDesc, &wrapperMessage{}, 0) + chDesc := &p2p.ChannelDescriptor{ + ID: chID, + MessageType: &wrapperMessage{}, + Priority: 5, + SendQueueCapacity: 10, + RecvMessageCapacity: 10, + } + + channels := network.MakeChannels(t, chDesc) a, b := channels[aID], channels[bID] network.Start(t) @@ -304,17 +327,20 @@ func (w *wrapperMessage) Unwrap() (proto.Message, error) { func TestRouter_Channel_Error(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Create a test network and open a channel on all nodes. - network := p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: 3}) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 3}) network.Start(t) ids := network.NodeIDs() aID, bID := ids[0], ids[1] - channels := network.MakeChannels(t, chDesc, &p2ptest.Message{}, 0) + channels := network.MakeChannels(t, chDesc) a := channels[aID] // Erroring b should cause it to be disconnected. It will reconnect shortly after. - sub := network.Nodes[aID].MakePeerUpdates(t) + sub := network.Nodes[aID].MakePeerUpdates(ctx, t) p2ptest.RequireError(t, a, p2p.PeerError{NodeID: bID, Err: errors.New("boom")}) p2ptest.RequireUpdates(t, sub, []p2p.PeerUpdate{ {NodeID: bID, Status: p2p.PeerStatusDown}, @@ -343,9 +369,16 @@ func TestRouter_AcceptPeers(t *testing.T) { false, }, } + + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(bctx) + defer cancel() + t.Cleanup(leaktest.Check(t)) // Set up a mock transport that handshakes. @@ -367,7 +400,7 @@ func TestRouter_AcceptPeers(t *testing.T) { mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) mockTransport.On("Accept").Once().Return(mockConnection, nil) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Accept").Maybe().Return(nil, io.EOF) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -384,10 +417,11 @@ func TestRouter_AcceptPeers(t *testing.T) { selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) if tc.ok { p2ptest.RequireUpdate(t, sub, p2p.PeerUpdate{ @@ -416,6 +450,9 @@ func TestRouter_AcceptPeers(t *testing.T) { func TestRouter_AcceptPeers_Error(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Set up a mock transport that returns an error, which should prevent // the router from calling Accept again. mockTransport := &mocks.Transport{} @@ -436,11 +473,12 @@ func TestRouter_AcceptPeers_Error(t *testing.T) { selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) time.Sleep(time.Second) require.NoError(t, router.Stop()) @@ -470,11 +508,15 @@ func TestRouter_AcceptPeers_ErrorEOF(t *testing.T) { selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + require.NoError(t, router.Start(ctx)) time.Sleep(time.Second) require.NoError(t, router.Stop()) @@ -484,6 +526,9 @@ func TestRouter_AcceptPeers_ErrorEOF(t *testing.T) { func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Set up a mock transport that returns a connection that blocks during the // handshake. It should be able to accept several of these in parallel, i.e. // a single connection can't halt other connections being accepted. @@ -518,10 +563,11 @@ func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) { selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) require.Eventually(t, func() bool { return len(acceptCh) == 3 @@ -560,10 +606,16 @@ func TestRouter_DialPeers(t *testing.T) { false, }, } + + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(bctx) + defer cancel() address := p2p.NodeAddress{Protocol: "mock", NodeID: tc.dialID} endpoint := p2p.Endpoint{Protocol: "mock", Path: string(tc.dialID)} @@ -617,10 +669,11 @@ func TestRouter_DialPeers(t *testing.T) { selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) if tc.ok { p2ptest.RequireUpdate(t, sub, p2p.PeerUpdate{ @@ -649,6 +702,9 @@ func TestRouter_DialPeers(t *testing.T) { func TestRouter_DialPeers_Parallel(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "mock", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "mock", NodeID: types.NodeID(strings.Repeat("b", 40))} c := p2p.NodeAddress{Protocol: "mock", NodeID: types.NodeID(strings.Repeat("c", 40))} @@ -700,6 +756,7 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{ DialSleep: func(_ context.Context) {}, NumConcurrentDials: func() int { @@ -713,7 +770,7 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) require.Eventually(t, func() bool { @@ -734,6 +791,9 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { func TestRouter_EvictPeers(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Set up a mock transport that we can evict. closeCh := make(chan time.Time) closeOnce := sync.Once{} @@ -755,7 +815,7 @@ func TestRouter_EvictPeers(t *testing.T) { mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) mockTransport.On("Accept").Once().Return(mockConnection, nil) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Accept").Maybe().Return(nil, io.EOF) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -772,10 +832,11 @@ func TestRouter_EvictPeers(t *testing.T) { selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) // Wait for the mock peer to connect, then evict it by reporting an error. p2ptest.RequireUpdate(t, sub, p2p.PeerUpdate{ @@ -798,6 +859,8 @@ func TestRouter_EvictPeers(t *testing.T) { func TestRouter_ChannelCompatability(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() incompatiblePeer := types.NodeInfo{ NodeID: peerID, @@ -833,10 +896,11 @@ func TestRouter_ChannelCompatability(t *testing.T) { selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) time.Sleep(1 * time.Second) require.NoError(t, router.Stop()) require.Empty(t, peerManager.Peers()) @@ -847,6 +911,8 @@ func TestRouter_ChannelCompatability(t *testing.T) { func TestRouter_DontSendOnInvalidChannel(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() peer := types.NodeInfo{ NodeID: peerID, @@ -865,11 +931,12 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) { mockConnection.On("ReceiveMessage").Return(chID, nil, io.EOF) mockTransport := &mocks.Transport{} + mockTransport.On("AddChannelDescriptors", mock.Anything).Return() mockTransport.On("String").Maybe().Return("mock") mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) mockTransport.On("Accept").Once().Return(mockConnection, nil) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Accept").Maybe().Return(nil, io.EOF) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -886,17 +953,18 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) { selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) p2ptest.RequireUpdate(t, sub, p2p.PeerUpdate{ NodeID: peerInfo.NodeID, Status: p2p.PeerStatusUp, }) - channel, err := router.OpenChannel(chDesc, &p2ptest.Message{}, 0) + channel, err := router.OpenChannel(chDesc) require.NoError(t, err) channel.Out <- p2p.Envelope{ diff --git a/internal/p2p/shim.go b/internal/p2p/shim.go deleted file mode 100644 index 07d1ad156..000000000 --- a/internal/p2p/shim.go +++ /dev/null @@ -1,334 +0,0 @@ -package p2p - -import ( - "errors" - "sort" - - "github.com/gogo/protobuf/proto" - "github.com/tendermint/tendermint/libs/log" -) - -// ============================================================================ -// TODO: Types and business logic below are temporary and will be removed once -// the legacy p2p stack is removed in favor of the new model. -// -// ref: https://github.com/tendermint/tendermint/issues/5670 -// ============================================================================ - -var _ Reactor = (*ReactorShim)(nil) - -type ( - messageValidator interface { - Validate() error - } - - // ReactorShim defines a generic shim wrapper around a BaseReactor. It is - // responsible for wiring up legacy p2p behavior to the new p2p semantics - // (e.g. proxying Envelope messages to legacy peers). - ReactorShim struct { - BaseReactor - - Name string - PeerUpdates *PeerUpdates - Channels map[ChannelID]*ChannelShim - } - - // ChannelShim defines a generic shim wrapper around a legacy p2p channel - // and the new p2p Channel. It also includes the raw bi-directional Go channels - // so we can proxy message delivery. - ChannelShim struct { - Descriptor *ChannelDescriptor - Channel *Channel - inCh chan<- Envelope - outCh <-chan Envelope - errCh <-chan PeerError - } - - // ChannelDescriptorShim defines a shim wrapper around a legacy p2p channel - // and the proto.Message the new p2p Channel is responsible for handling. - // A ChannelDescriptorShim is not contained in ReactorShim, but is rather - // used to construct a ReactorShim. - ChannelDescriptorShim struct { - MsgType proto.Message - Descriptor *ChannelDescriptor - } -) - -func NewReactorShim(logger log.Logger, name string, descriptors map[ChannelID]*ChannelDescriptorShim) *ReactorShim { - channels := make(map[ChannelID]*ChannelShim) - - for _, cds := range descriptors { - chShim := NewChannelShim(cds, 0) - channels[chShim.Channel.ID] = chShim - } - - rs := &ReactorShim{ - Name: name, - PeerUpdates: NewPeerUpdates(make(chan PeerUpdate), 0), - Channels: channels, - } - - rs.BaseReactor = *NewBaseReactor(name, rs) - rs.SetLogger(logger) - - return rs -} - -func NewChannelShim(cds *ChannelDescriptorShim, buf uint) *ChannelShim { - inCh := make(chan Envelope, buf) - outCh := make(chan Envelope, buf) - errCh := make(chan PeerError, buf) - return &ChannelShim{ - Descriptor: cds.Descriptor, - Channel: NewChannel( - ChannelID(cds.Descriptor.ID), - cds.MsgType, - inCh, - outCh, - errCh, - ), - inCh: inCh, - outCh: outCh, - errCh: errCh, - } -} - -// proxyPeerEnvelopes iterates over each p2p Channel and starts a separate -// go-routine where we listen for outbound envelopes sent during Receive -// executions (or anything else that may send on the Channel) and proxy them to -// the corresponding Peer using the To field from the envelope. -func (rs *ReactorShim) proxyPeerEnvelopes() { - for _, cs := range rs.Channels { - go func(cs *ChannelShim) { - for e := range cs.outCh { - msg := proto.Clone(cs.Channel.messageType) - msg.Reset() - - wrapper, ok := msg.(Wrapper) - if ok { - if err := wrapper.Wrap(e.Message); err != nil { - rs.Logger.Error( - "failed to proxy envelope; failed to wrap message", - "ch_id", cs.Descriptor.ID, - "err", err, - ) - continue - } - } else { - msg = e.Message - } - - bz, err := proto.Marshal(msg) - if err != nil { - rs.Logger.Error( - "failed to proxy envelope; failed to encode message", - "ch_id", cs.Descriptor.ID, - "err", err, - ) - continue - } - - switch { - case e.Broadcast: - rs.Switch.Broadcast(cs.Descriptor.ID, bz) - - case e.To != "": - src := rs.Switch.peers.Get(e.To) - if src == nil { - rs.Logger.Debug( - "failed to proxy envelope; failed to find peer", - "ch_id", cs.Descriptor.ID, - "peer", e.To, - ) - continue - } - - if !src.Send(cs.Descriptor.ID, bz) { - // This usually happens when we try to send across a channel - // that the peer doesn't have open. To avoid bloating the - // logs we set this to be Debug - rs.Logger.Debug( - "failed to proxy message to peer", - "ch_id", cs.Descriptor.ID, - "peer", e.To, - ) - } - - default: - rs.Logger.Error("failed to proxy envelope; missing peer ID", "ch_id", cs.Descriptor.ID) - } - } - }(cs) - } -} - -// handlePeerErrors iterates over each p2p Channel and starts a separate go-routine -// where we listen for peer errors. For each peer error, we find the peer from -// the legacy p2p Switch and execute a StopPeerForError call with the corresponding -// peer error. -func (rs *ReactorShim) handlePeerErrors() { - for _, cs := range rs.Channels { - go func(cs *ChannelShim) { - for pErr := range cs.errCh { - if pErr.NodeID != "" { - peer := rs.Switch.peers.Get(pErr.NodeID) - if peer == nil { - rs.Logger.Error("failed to handle peer error; failed to find peer", "peer", pErr.NodeID) - continue - } - - rs.Switch.StopPeerForError(peer, pErr.Err) - } - } - }(cs) - } -} - -// OnStart executes the reactor shim's OnStart hook where we start all the -// necessary go-routines in order to proxy peer envelopes and errors per p2p -// Channel. -func (rs *ReactorShim) OnStart() error { - if rs.Switch == nil { - return errors.New("proxyPeerEnvelopes: reactor shim switch is nil") - } - - // start envelope proxying and peer error handling in separate go routines - rs.proxyPeerEnvelopes() - rs.handlePeerErrors() - - return nil -} - -// GetChannel returns a p2p Channel reference for a given ChannelID. If no -// Channel exists, nil is returned. -func (rs *ReactorShim) GetChannel(cID ChannelID) *Channel { - channelShim, ok := rs.Channels[cID] - if ok { - return channelShim.Channel - } - - return nil -} - -// GetChannels implements the legacy Reactor interface for getting a slice of all -// the supported ChannelDescriptors. -func (rs *ReactorShim) GetChannels() []*ChannelDescriptor { - sortedChIDs := make([]ChannelID, 0, len(rs.Channels)) - for cID := range rs.Channels { - sortedChIDs = append(sortedChIDs, cID) - } - - sort.Slice(sortedChIDs, func(i, j int) bool { return sortedChIDs[i] < sortedChIDs[j] }) - - descriptors := make([]*ChannelDescriptor, len(rs.Channels)) - for i, cID := range sortedChIDs { - descriptors[i] = rs.Channels[cID].Descriptor - } - - return descriptors -} - -// AddPeer sends a PeerUpdate with status PeerStatusUp on the PeerUpdateCh. -// The embedding reactor must be sure to listen for messages on this channel to -// handle adding a peer. -func (rs *ReactorShim) AddPeer(peer Peer) { - select { - case rs.PeerUpdates.reactorUpdatesCh <- PeerUpdate{NodeID: peer.ID(), Status: PeerStatusUp}: - rs.Logger.Debug("sent peer update", "reactor", rs.Name, "peer", peer.ID(), "status", PeerStatusUp) - - case <-rs.PeerUpdates.Done(): - // NOTE: We explicitly DO NOT close the PeerUpdatesCh's updateCh go channel. - // This is because there may be numerous spawned goroutines that are - // attempting to send on the updateCh go channel and when the reactor stops - // we do not want to preemptively close the channel as that could result in - // panics sending on a closed channel. This also means that reactors MUST - // be certain there are NO listeners on the updateCh channel when closing or - // stopping. - } -} - -// RemovePeer sends a PeerUpdate with status PeerStatusDown on the PeerUpdateCh. -// The embedding reactor must be sure to listen for messages on this channel to -// handle removing a peer. -func (rs *ReactorShim) RemovePeer(peer Peer, reason interface{}) { - select { - case rs.PeerUpdates.reactorUpdatesCh <- PeerUpdate{NodeID: peer.ID(), Status: PeerStatusDown}: - rs.Logger.Debug( - "sent peer update", - "reactor", rs.Name, - "peer", peer.ID(), - "reason", reason, - "status", PeerStatusDown, - ) - - case <-rs.PeerUpdates.Done(): - // NOTE: We explicitly DO NOT close the PeerUpdatesCh's updateCh go channel. - // This is because there may be numerous spawned goroutines that are - // attempting to send on the updateCh go channel and when the reactor stops - // we do not want to preemptively close the channel as that could result in - // panics sending on a closed channel. This also means that reactors MUST - // be certain there are NO listeners on the updateCh channel when closing or - // stopping. - } -} - -// Receive implements a generic wrapper around implementing the Receive method -// on the legacy Reactor p2p interface. If the reactor is running, Receive will -// find the corresponding new p2p Channel, create and decode the appropriate -// proto.Message from the msgBytes, execute any validation and finally construct -// and send a p2p Envelope on the appropriate p2p Channel. -func (rs *ReactorShim) Receive(chID byte, src Peer, msgBytes []byte) { - if !rs.IsRunning() { - return - } - - cID := ChannelID(chID) - channelShim, ok := rs.Channels[cID] - if !ok { - rs.Logger.Error("unexpected channel", "peer", src, "ch_id", chID) - return - } - - msg := proto.Clone(channelShim.Channel.messageType) - msg.Reset() - - if err := proto.Unmarshal(msgBytes, msg); err != nil { - rs.Logger.Error("error decoding message", "peer", src, "ch_id", cID, "err", err) - rs.Switch.StopPeerForError(src, err) - return - } - - validator, ok := msg.(messageValidator) - if ok { - if err := validator.Validate(); err != nil { - rs.Logger.Error("invalid message", "peer", src, "ch_id", cID, "err", err) - rs.Switch.StopPeerForError(src, err) - return - } - } - - wrapper, ok := msg.(Wrapper) - if ok { - var err error - - msg, err = wrapper.Unwrap() - if err != nil { - rs.Logger.Error("failed to unwrap message", "peer", src, "ch_id", chID, "err", err) - return - } - } - - select { - case channelShim.inCh <- Envelope{From: src.ID(), Message: msg}: - rs.Logger.Debug("proxied envelope", "reactor", rs.Name, "ch_id", cID, "peer", src.ID()) - - case <-channelShim.Channel.Done(): - // NOTE: We explicitly DO NOT close the p2p Channel's inbound go channel. - // This is because there may be numerous spawned goroutines that are - // attempting to send on the inbound channel and when the reactor stops we - // do not want to preemptively close the channel as that could result in - // panics sending on a closed channel. This also means that reactors MUST - // be certain there are NO listeners on the inbound channel when closing or - // stopping. - } -} diff --git a/internal/p2p/shim_test.go b/internal/p2p/shim_test.go deleted file mode 100644 index d8b9e30c3..000000000 --- a/internal/p2p/shim_test.go +++ /dev/null @@ -1,207 +0,0 @@ -package p2p_test - -import ( - "sync" - "testing" - - "github.com/gogo/protobuf/proto" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/p2p" - p2pmocks "github.com/tendermint/tendermint/internal/p2p/mocks" - "github.com/tendermint/tendermint/libs/log" - ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - "github.com/tendermint/tendermint/types" -) - -var ( - channelID1 = byte(0x01) - channelID2 = byte(0x02) - - p2pCfg = config.DefaultP2PConfig() - - testChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - p2p.ChannelID(channelID1): { - MsgType: new(ssproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: channelID1, - Priority: 3, - SendQueueCapacity: 10, - RecvMessageCapacity: int(4e6), - }, - }, - p2p.ChannelID(channelID2): { - MsgType: new(ssproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: channelID2, - Priority: 1, - SendQueueCapacity: 4, - RecvMessageCapacity: int(16e6), - }, - }, - } -) - -type reactorShimTestSuite struct { - shim *p2p.ReactorShim - sw *p2p.Switch -} - -func setup(t *testing.T, peers []p2p.Peer) *reactorShimTestSuite { - t.Helper() - - rts := &reactorShimTestSuite{ - shim: p2p.NewReactorShim(log.TestingLogger(), "TestShim", testChannelShims), - } - - rts.sw = p2p.MakeSwitch(p2pCfg, 1, "testing", "123.123.123", func(_ int, sw *p2p.Switch) *p2p.Switch { - for _, peer := range peers { - p2p.AddPeerToSwitchPeerSet(sw, peer) - } - - sw.AddReactor(rts.shim.Name, rts.shim) - return sw - }, log.TestingLogger()) - - // start the reactor shim - require.NoError(t, rts.shim.Start()) - - t.Cleanup(func() { - require.NoError(t, rts.shim.Stop()) - - for _, chs := range rts.shim.Channels { - chs.Channel.Close() - } - }) - - return rts -} - -func simplePeer(t *testing.T, id string) (*p2pmocks.Peer, types.NodeID) { - t.Helper() - - peerID := types.NodeID(id) - peer := &p2pmocks.Peer{} - peer.On("ID").Return(peerID) - - return peer, peerID -} - -func TestReactorShim_GetChannel(t *testing.T) { - rts := setup(t, nil) - - p2pCh := rts.shim.GetChannel(p2p.ChannelID(channelID1)) - require.NotNil(t, p2pCh) - require.Equal(t, p2pCh.ID, p2p.ChannelID(channelID1)) - - p2pCh = rts.shim.GetChannel(p2p.ChannelID(byte(0x03))) - require.Nil(t, p2pCh) -} - -func TestReactorShim_GetChannels(t *testing.T) { - rts := setup(t, nil) - - p2pChs := rts.shim.GetChannels() - require.Len(t, p2pChs, 2) - require.Equal(t, p2p.ChannelID(p2pChs[0].ID), p2p.ChannelID(channelID1)) - require.Equal(t, p2p.ChannelID(p2pChs[1].ID), p2p.ChannelID(channelID2)) -} - -func TestReactorShim_AddPeer(t *testing.T) { - peerA, peerIDA := simplePeer(t, "aa") - rts := setup(t, []p2p.Peer{peerA}) - - var wg sync.WaitGroup - wg.Add(1) - - var peerUpdate p2p.PeerUpdate - go func() { - peerUpdate = <-rts.shim.PeerUpdates.Updates() - wg.Done() - }() - - rts.shim.AddPeer(peerA) - wg.Wait() - - require.Equal(t, peerIDA, peerUpdate.NodeID) - require.Equal(t, p2p.PeerStatusUp, peerUpdate.Status) -} - -func TestReactorShim_RemovePeer(t *testing.T) { - peerA, peerIDA := simplePeer(t, "aa") - rts := setup(t, []p2p.Peer{peerA}) - - var wg sync.WaitGroup - wg.Add(1) - - var peerUpdate p2p.PeerUpdate - go func() { - peerUpdate = <-rts.shim.PeerUpdates.Updates() - wg.Done() - }() - - rts.shim.RemovePeer(peerA, "test reason") - wg.Wait() - - require.Equal(t, peerIDA, peerUpdate.NodeID) - require.Equal(t, p2p.PeerStatusDown, peerUpdate.Status) -} - -func TestReactorShim_Receive(t *testing.T) { - peerA, peerIDA := simplePeer(t, "aa") - rts := setup(t, []p2p.Peer{peerA}) - - msg := &ssproto.Message{ - Sum: &ssproto.Message_ChunkRequest{ - ChunkRequest: &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, - }, - } - - bz, err := proto.Marshal(msg) - require.NoError(t, err) - - var wg sync.WaitGroup - - var response *ssproto.Message - peerA.On("Send", channelID1, mock.Anything).Run(func(args mock.Arguments) { - m := &ssproto.Message{} - require.NoError(t, proto.Unmarshal(args[1].([]byte), m)) - - response = m - wg.Done() - }).Return(true) - - p2pCh := rts.shim.Channels[p2p.ChannelID(channelID1)] - - wg.Add(2) - - // Simulate receiving the envelope in some real reactor and replying back with - // the same envelope and then closing the Channel. - go func() { - e := <-p2pCh.Channel.In - require.Equal(t, peerIDA, e.From) - require.NotNil(t, e.Message) - - p2pCh.Channel.Out <- p2p.Envelope{To: e.From, Message: e.Message} - p2pCh.Channel.Close() - wg.Done() - }() - - rts.shim.Receive(channelID1, peerA, bz) - - // wait until the mock peer called Send and we (fake) proxied the envelope - wg.Wait() - require.NotNil(t, response) - - m, err := response.Unwrap() - require.NoError(t, err) - require.Equal(t, msg.GetChunkRequest(), m) - - // Since p2pCh was closed in the simulated reactor above, calling Receive - // should not block. - rts.shim.Receive(channelID1, peerA, bz) - require.Empty(t, p2pCh.Channel.In) - - peerA.AssertExpectations(t) -} diff --git a/internal/p2p/switch.go b/internal/p2p/switch.go deleted file mode 100644 index eeb93a994..000000000 --- a/internal/p2p/switch.go +++ /dev/null @@ -1,1065 +0,0 @@ -package p2p - -import ( - "context" - "fmt" - "io" - "math" - mrand "math/rand" - "net" - "sync" - "time" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/cmap" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -const ( - // wait a random amount of time from this interval - // before dialing peers or reconnecting to help prevent DoS - dialRandomizerIntervalMilliseconds = 3000 - - // repeatedly try to reconnect for a few minutes - // ie. 5 * 20 = 100s - reconnectAttempts = 20 - reconnectInterval = 5 * time.Second - - // then move into exponential backoff mode for ~1day - // ie. 3**10 = 16hrs - reconnectBackOffAttempts = 10 - reconnectBackOffBaseSeconds = 3 - - defaultFilterTimeout = 5 * time.Second -) - -// MConnConfig returns an MConnConfig with fields updated -// from the P2PConfig. -func MConnConfig(cfg *config.P2PConfig) conn.MConnConfig { - mConfig := conn.DefaultMConnConfig() - mConfig.FlushThrottle = cfg.FlushThrottleTimeout - mConfig.SendRate = cfg.SendRate - mConfig.RecvRate = cfg.RecvRate - mConfig.MaxPacketMsgPayloadSize = cfg.MaxPacketMsgPayloadSize - return mConfig -} - -//----------------------------------------------------------------------------- - -// An AddrBook represents an address book from the pex package, which is used -// to store peer addresses. -type AddrBook interface { - AddAddress(addr *NetAddress, src *NetAddress) error - AddPrivateIDs([]string) - AddOurAddress(*NetAddress) - OurAddress(*NetAddress) bool - MarkGood(types.NodeID) - RemoveAddress(*NetAddress) - HasAddress(*NetAddress) bool - Save() -} - -// ConnFilterFunc is a callback for connection filtering. If it returns an -// error, the connection is rejected. The set of existing connections is passed -// along with the new connection and all resolved IPs. -type ConnFilterFunc func(ConnSet, net.Conn, []net.IP) error - -// PeerFilterFunc to be implemented by filter hooks after a new Peer has been -// fully setup. -type PeerFilterFunc func(IPeerSet, Peer) error - -// ConnDuplicateIPFilter resolves and keeps all ips for an incoming connection -// and refuses new ones if they come from a known ip. -var ConnDuplicateIPFilter ConnFilterFunc = func(cs ConnSet, c net.Conn, ips []net.IP) error { - for _, ip := range ips { - if cs.HasIP(ip) { - return ErrRejected{ - conn: c, - err: fmt.Errorf("ip<%v> already connected", ip), - isDuplicate: true, - } - } - } - return nil -} - -//----------------------------------------------------------------------------- - -// Switch handles peer connections and exposes an API to receive incoming messages -// on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one -// or more `Channels`. So while sending outgoing messages is typically performed on the peer, -// incoming messages are received on the reactor. -type Switch struct { - service.BaseService - - config *config.P2PConfig - reactors map[string]Reactor - chDescs []*conn.ChannelDescriptor - reactorsByCh map[byte]Reactor - peers *PeerSet - dialing *cmap.CMap - reconnecting *cmap.CMap - nodeInfo types.NodeInfo // our node info - nodeKey types.NodeKey // our node privkey - addrBook AddrBook - // peers addresses with whom we'll maintain constant connection - persistentPeersAddrs []*NetAddress - unconditionalPeerIDs map[types.NodeID]struct{} - - transport Transport - - filterTimeout time.Duration - peerFilters []PeerFilterFunc - connFilters []ConnFilterFunc - conns ConnSet - - metrics *Metrics -} - -// NetAddress returns the first address the switch is listening on, -// or nil if no addresses are found. -func (sw *Switch) NetAddress() *NetAddress { - endpoints := sw.transport.Endpoints() - if len(endpoints) == 0 { - return nil - } - return &NetAddress{ - ID: sw.nodeInfo.NodeID, - IP: endpoints[0].IP, - Port: endpoints[0].Port, - } -} - -// SwitchOption sets an optional parameter on the Switch. -type SwitchOption func(*Switch) - -// NewSwitch creates a new Switch with the given config. -func NewSwitch( - cfg *config.P2PConfig, - transport Transport, - options ...SwitchOption, -) *Switch { - sw := &Switch{ - config: cfg, - reactors: make(map[string]Reactor), - chDescs: make([]*conn.ChannelDescriptor, 0), - reactorsByCh: make(map[byte]Reactor), - peers: NewPeerSet(), - dialing: cmap.NewCMap(), - reconnecting: cmap.NewCMap(), - metrics: NopMetrics(), - transport: transport, - persistentPeersAddrs: make([]*NetAddress, 0), - unconditionalPeerIDs: make(map[types.NodeID]struct{}), - filterTimeout: defaultFilterTimeout, - conns: NewConnSet(), - } - - // Ensure PRNG is reseeded. - tmrand.Reseed() - - sw.BaseService = *service.NewBaseService(nil, "P2P Switch", sw) - - for _, option := range options { - option(sw) - } - - return sw -} - -// SwitchFilterTimeout sets the timeout used for peer filters. -func SwitchFilterTimeout(timeout time.Duration) SwitchOption { - return func(sw *Switch) { sw.filterTimeout = timeout } -} - -// SwitchPeerFilters sets the filters for rejection of new peers. -func SwitchPeerFilters(filters ...PeerFilterFunc) SwitchOption { - return func(sw *Switch) { sw.peerFilters = filters } -} - -// SwitchConnFilters sets the filters for rejection of connections. -func SwitchConnFilters(filters ...ConnFilterFunc) SwitchOption { - return func(sw *Switch) { sw.connFilters = filters } -} - -// WithMetrics sets the metrics. -func WithMetrics(metrics *Metrics) SwitchOption { - return func(sw *Switch) { sw.metrics = metrics } -} - -//--------------------------------------------------------------------- -// Switch setup - -// AddReactor adds the given reactor to the switch. -// NOTE: Not goroutine safe. -func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor { - for _, chDesc := range reactor.GetChannels() { - chID := chDesc.ID - // No two reactors can share the same channel. - if sw.reactorsByCh[chID] != nil { - panic(fmt.Sprintf("Channel %X has multiple reactors %v & %v", chID, sw.reactorsByCh[chID], reactor)) - } - sw.chDescs = append(sw.chDescs, chDesc) - sw.reactorsByCh[chID] = reactor - } - sw.reactors[name] = reactor - reactor.SetSwitch(sw) - return reactor -} - -// RemoveReactor removes the given Reactor from the Switch. -// NOTE: Not goroutine safe. -func (sw *Switch) RemoveReactor(name string, reactor Reactor) { - for _, chDesc := range reactor.GetChannels() { - // remove channel description - for i := 0; i < len(sw.chDescs); i++ { - if chDesc.ID == sw.chDescs[i].ID { - sw.chDescs = append(sw.chDescs[:i], sw.chDescs[i+1:]...) - break - } - } - delete(sw.reactorsByCh, chDesc.ID) - } - delete(sw.reactors, name) - reactor.SetSwitch(nil) -} - -// Reactors returns a map of reactors registered on the switch. -// NOTE: Not goroutine safe. -func (sw *Switch) Reactors() map[string]Reactor { - return sw.reactors -} - -// Reactor returns the reactor with the given name. -// NOTE: Not goroutine safe. -func (sw *Switch) Reactor(name string) Reactor { - return sw.reactors[name] -} - -// SetNodeInfo sets the switch's NodeInfo for checking compatibility and handshaking with other nodes. -// NOTE: Not goroutine safe. -func (sw *Switch) SetNodeInfo(nodeInfo types.NodeInfo) { - sw.nodeInfo = nodeInfo -} - -// NodeInfo returns the switch's NodeInfo. -// NOTE: Not goroutine safe. -func (sw *Switch) NodeInfo() types.NodeInfo { - return sw.nodeInfo -} - -// SetNodeKey sets the switch's private key for authenticated encryption. -// NOTE: Not goroutine safe. -func (sw *Switch) SetNodeKey(nodeKey types.NodeKey) { - sw.nodeKey = nodeKey -} - -//--------------------------------------------------------------------- -// Service start/stop - -// OnStart implements BaseService. It starts all the reactors and peers. -func (sw *Switch) OnStart() error { - - // FIXME: Temporary hack to pass channel descriptors to MConn transport, - // since they are not available when it is constructed. This will be - // fixed when we implement the new router abstraction. - if t, ok := sw.transport.(*MConnTransport); ok { - t.channelDescs = sw.chDescs - } - - // Start reactors - for _, reactor := range sw.reactors { - err := reactor.Start() - if err != nil { - return fmt.Errorf("failed to start %v: %w", reactor, err) - } - } - - // Start accepting Peers. - go sw.acceptRoutine() - - return nil -} - -// OnStop implements BaseService. It stops all peers and reactors. -func (sw *Switch) OnStop() { - // Stop peers - for _, p := range sw.peers.List() { - sw.stopAndRemovePeer(p, nil) - } - - // Stop reactors - sw.Logger.Debug("Switch: Stopping reactors") - for _, reactor := range sw.reactors { - if err := reactor.Stop(); err != nil { - sw.Logger.Error("error while stopping reactor", "reactor", reactor, "error", err) - } - } -} - -//--------------------------------------------------------------------- -// Peers - -// Broadcast runs a go routine for each attempted send, which will block trying -// to send for defaultSendTimeoutSeconds. Returns a channel which receives -// success values for each attempted send (false if times out). Channel will be -// closed once msg bytes are sent to all peers (or time out). -// -// NOTE: Broadcast uses goroutines, so order of broadcast may not be preserved. -func (sw *Switch) Broadcast(chID byte, msgBytes []byte) chan bool { - sw.Logger.Debug("Broadcast", "channel", chID, "msgBytes", msgBytes) - - peers := sw.peers.List() - var wg sync.WaitGroup - wg.Add(len(peers)) - successChan := make(chan bool, len(peers)) - - for _, peer := range peers { - go func(p Peer) { - defer wg.Done() - success := p.Send(chID, msgBytes) - successChan <- success - }(peer) - } - - go func() { - wg.Wait() - close(successChan) - }() - - return successChan -} - -// NumPeers returns the count of outbound/inbound and outbound-dialing peers. -// unconditional peers are not counted here. -func (sw *Switch) NumPeers() (outbound, inbound, dialing int) { - peers := sw.peers.List() - for _, peer := range peers { - if peer.IsOutbound() { - if !sw.IsPeerUnconditional(peer.ID()) { - outbound++ - } - } else { - if !sw.IsPeerUnconditional(peer.ID()) { - inbound++ - } - } - } - dialing = sw.dialing.Size() - return -} - -func (sw *Switch) IsPeerUnconditional(id types.NodeID) bool { - _, ok := sw.unconditionalPeerIDs[id] - return ok -} - -// MaxNumOutboundPeers returns a maximum number of outbound peers. -func (sw *Switch) MaxNumOutboundPeers() int { - return sw.config.MaxNumOutboundPeers -} - -// Peers returns the set of peers that are connected to the switch. -func (sw *Switch) Peers() IPeerSet { - return sw.peers -} - -// StopPeerForError disconnects from a peer due to external error. -// If the peer is persistent, it will attempt to reconnect. -// TODO: make record depending on reason. -func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) { - if !peer.IsRunning() { - return - } - - sw.Logger.Error("Stopping peer for error", "peer", peer, "err", reason) - sw.stopAndRemovePeer(peer, reason) - - if peer.IsPersistent() { - var addr *NetAddress - if peer.IsOutbound() { // socket address for outbound peers - addr = peer.SocketAddr() - } else { // self-reported address for inbound peers - var err error - addr, err = peer.NodeInfo().NetAddress() - if err != nil { - sw.Logger.Error("Wanted to reconnect to inbound peer, but self-reported address is wrong", - "peer", peer, "err", err) - return - } - } - go sw.reconnectToPeer(addr) - } -} - -// StopPeerGracefully disconnects from a peer gracefully. -// TODO: handle graceful disconnects. -func (sw *Switch) StopPeerGracefully(peer Peer) { - sw.Logger.Info("Stopping peer gracefully") - sw.stopAndRemovePeer(peer, nil) -} - -func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) { - if err := peer.Stop(); err != nil { - sw.Logger.Error("error while stopping peer", "error", err) // TODO: should return error to be handled accordingly - } - - for _, reactor := range sw.reactors { - reactor.RemovePeer(peer, reason) - } - - // Removing a peer should go last to avoid a situation where a peer - // reconnect to our node and the switch calls InitPeer before - // RemovePeer is finished. - // https://github.com/tendermint/tendermint/issues/3338 - if sw.peers.Remove(peer) { - sw.metrics.Peers.Add(float64(-1)) - } - - sw.conns.RemoveAddr(peer.RemoteAddr()) -} - -// reconnectToPeer tries to reconnect to the addr, first repeatedly -// with a fixed interval, then with exponential backoff. -// If no success after all that, it stops trying, and leaves it -// to the PEX/Addrbook to find the peer with the addr again -// NOTE: this will keep trying even if the handshake or auth fails. -// TODO: be more explicit with error types so we only retry on certain failures -// - ie. if we're getting ErrDuplicatePeer we can stop -// because the addrbook got us the peer back already -func (sw *Switch) reconnectToPeer(addr *NetAddress) { - if sw.reconnecting.Has(string(addr.ID)) { - return - } - sw.reconnecting.Set(string(addr.ID), addr) - defer sw.reconnecting.Delete(string(addr.ID)) - - start := time.Now() - sw.Logger.Info("Reconnecting to peer", "addr", addr) - for i := 0; i < reconnectAttempts; i++ { - if !sw.IsRunning() { - return - } - - err := sw.DialPeerWithAddress(addr) - if err == nil { - return // success - } else if _, ok := err.(ErrCurrentlyDialingOrExistingAddress); ok { - return - } - - sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "err", err, "addr", addr) - // sleep a set amount - sw.randomSleep(reconnectInterval) - continue - } - - sw.Logger.Error("Failed to reconnect to peer. Beginning exponential backoff", - "addr", addr, "elapsed", time.Since(start)) - for i := 0; i < reconnectBackOffAttempts; i++ { - if !sw.IsRunning() { - return - } - - // sleep an exponentially increasing amount - sleepIntervalSeconds := math.Pow(reconnectBackOffBaseSeconds, float64(i)) - sw.randomSleep(time.Duration(sleepIntervalSeconds) * time.Second) - - err := sw.DialPeerWithAddress(addr) - if err == nil { - return // success - } else if _, ok := err.(ErrCurrentlyDialingOrExistingAddress); ok { - return - } - sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "err", err, "addr", addr) - } - sw.Logger.Error("Failed to reconnect to peer. Giving up", "addr", addr, "elapsed", time.Since(start)) -} - -// SetAddrBook allows to set address book on Switch. -func (sw *Switch) SetAddrBook(addrBook AddrBook) { - sw.addrBook = addrBook -} - -// MarkPeerAsGood marks the given peer as good when it did something useful -// like contributed to consensus. -func (sw *Switch) MarkPeerAsGood(peer Peer) { - if sw.addrBook != nil { - sw.addrBook.MarkGood(peer.ID()) - } -} - -//--------------------------------------------------------------------- -// Dialing - -type privateAddr interface { - PrivateAddr() bool -} - -func isPrivateAddr(err error) bool { - te, ok := err.(privateAddr) - return ok && te.PrivateAddr() -} - -// DialPeersAsync dials a list of peers asynchronously in random order. -// Used to dial peers from config on startup or from unsafe-RPC (trusted sources). -// It ignores ErrNetAddressLookup. However, if there are other errors, first -// encounter is returned. -// Nop if there are no peers. -func (sw *Switch) DialPeersAsync(peers []string) error { - netAddrs, errs := NewNetAddressStrings(peers) - // report all the errors - for _, err := range errs { - sw.Logger.Error("Error in peer's address", "err", err) - } - // return first non-ErrNetAddressLookup error - for _, err := range errs { - if _, ok := err.(types.ErrNetAddressLookup); ok { - continue - } - return err - } - sw.dialPeersAsync(netAddrs) - return nil -} - -func (sw *Switch) dialPeersAsync(netAddrs []*NetAddress) { - ourAddr := sw.NetAddress() - - // TODO: this code feels like it's in the wrong place. - // The integration tests depend on the addrBook being saved - // right away but maybe we can change that. Recall that - // the addrBook is only written to disk every 2min - if sw.addrBook != nil { - // add peers to `addrBook` - for _, netAddr := range netAddrs { - // do not add our address or ID - if !netAddr.Same(ourAddr) { - if err := sw.addrBook.AddAddress(netAddr, ourAddr); err != nil { - if isPrivateAddr(err) { - sw.Logger.Debug("Won't add peer's address to addrbook", "err", err) - } else { - sw.Logger.Error("Can't add peer's address to addrbook", "err", err) - } - } - } - } - // Persist some peers to disk right away. - // NOTE: integration tests depend on this - sw.addrBook.Save() - } - - // permute the list, dial them in random order. - perm := mrand.Perm(len(netAddrs)) - for i := 0; i < len(perm); i++ { - go func(i int) { - j := perm[i] - addr := netAddrs[j] - - if addr.Same(ourAddr) { - sw.Logger.Debug("Ignore attempt to connect to ourselves", "addr", addr, "ourAddr", ourAddr) - return - } - - sw.randomSleep(0) - - err := sw.DialPeerWithAddress(addr) - if err != nil { - switch err.(type) { - case ErrSwitchConnectToSelf, ErrSwitchDuplicatePeerID, ErrCurrentlyDialingOrExistingAddress: - sw.Logger.Debug("Error dialing peer", "err", err) - default: - sw.Logger.Error("Error dialing peer", "err", err) - } - } - }(i) - } -} - -// DialPeerWithAddress dials the given peer and runs sw.addPeer if it connects -// and authenticates successfully. -// If we're currently dialing this address or it belongs to an existing peer, -// ErrCurrentlyDialingOrExistingAddress is returned. -func (sw *Switch) DialPeerWithAddress(addr *NetAddress) error { - if sw.IsDialingOrExistingAddress(addr) { - return ErrCurrentlyDialingOrExistingAddress{addr.String()} - } - - sw.dialing.Set(string(addr.ID), addr) - defer sw.dialing.Delete(string(addr.ID)) - - return sw.addOutboundPeerWithConfig(addr, sw.config) -} - -// sleep for interval plus some random amount of ms on [0, dialRandomizerIntervalMilliseconds] -func (sw *Switch) randomSleep(interval time.Duration) { - // nolint:gosec // G404: Use of weak random number generator - r := time.Duration(mrand.Int63n(dialRandomizerIntervalMilliseconds)) * time.Millisecond - time.Sleep(r + interval) -} - -// IsDialingOrExistingAddress returns true if switch has a peer with the given -// address or dialing it at the moment. -func (sw *Switch) IsDialingOrExistingAddress(addr *NetAddress) bool { - return sw.dialing.Has(string(addr.ID)) || - sw.peers.Has(addr.ID) || - (!sw.config.AllowDuplicateIP && sw.peers.HasIP(addr.IP)) -} - -// AddPersistentPeers allows you to set persistent peers. It ignores -// ErrNetAddressLookup. However, if there are other errors, first encounter is -// returned. -func (sw *Switch) AddPersistentPeers(addrs []string) error { - sw.Logger.Info("Adding persistent peers", "addrs", addrs) - netAddrs, errs := NewNetAddressStrings(addrs) - // report all the errors - for _, err := range errs { - sw.Logger.Error("Error in peer's address", "err", err) - } - // return first non-ErrNetAddressLookup error - for _, err := range errs { - if _, ok := err.(types.ErrNetAddressLookup); ok { - continue - } - return err - } - sw.persistentPeersAddrs = netAddrs - return nil -} - -func (sw *Switch) AddUnconditionalPeerIDs(ids []string) error { - sw.Logger.Info("Adding unconditional peer ids", "ids", ids) - for i, id := range ids { - err := types.NodeID(id).Validate() - if err != nil { - return fmt.Errorf("wrong ID #%d: %w", i, err) - } - sw.unconditionalPeerIDs[types.NodeID(id)] = struct{}{} - } - return nil -} - -func (sw *Switch) AddPrivatePeerIDs(ids []string) error { - validIDs := make([]string, 0, len(ids)) - for i, id := range ids { - err := types.NodeID(id).Validate() - if err != nil { - return fmt.Errorf("wrong ID #%d: %w", i, err) - } - validIDs = append(validIDs, id) - } - - sw.addrBook.AddPrivateIDs(validIDs) - - return nil -} - -func (sw *Switch) IsPeerPersistent(na *NetAddress) bool { - for _, pa := range sw.persistentPeersAddrs { - if pa.Equals(na) { - return true - } - } - return false -} - -func (sw *Switch) acceptRoutine() { - for { - var peerNodeInfo types.NodeInfo - c, err := sw.transport.Accept() - if err == nil { - // NOTE: The legacy MConn transport did handshaking in Accept(), - // which was asynchronous and avoided head-of-line-blocking. - // However, as handshakes are being migrated out from the transport, - // we just do it synchronously here for now. - peerNodeInfo, _, err = sw.handshakePeer(c, "") - } - if err == nil { - err = sw.filterConn(c.(*mConnConnection).conn) - } - if err != nil { - if c != nil { - _ = c.Close() - } - if err == io.EOF { - err = ErrTransportClosed{} - } - switch err := err.(type) { - case ErrRejected: - addr := err.Addr() - if err.IsSelf() { - // Remove the given address from the address book and add to our addresses - // to avoid dialing in the future. - sw.addrBook.RemoveAddress(&addr) - sw.addrBook.AddOurAddress(&addr) - } - if err.IsIncompatible() { - sw.addrBook.RemoveAddress(&addr) - } - - sw.Logger.Info( - "Inbound Peer rejected", - "err", err, - "numPeers", sw.peers.Size(), - ) - - continue - case ErrFilterTimeout: - sw.Logger.Error( - "Peer filter timed out", - "err", err, - ) - - continue - case ErrTransportClosed: - sw.Logger.Error( - "Stopped accept routine, as transport is closed", - "numPeers", sw.peers.Size(), - ) - default: - sw.Logger.Error( - "Accept on transport errored", - "err", err, - "numPeers", sw.peers.Size(), - ) - // We could instead have a retry loop around the acceptRoutine, - // but that would need to stop and let the node shutdown eventually. - // So might as well panic and let process managers restart the node. - // There's no point in letting the node run without the acceptRoutine, - // since it won't be able to accept new connections. - panic(fmt.Errorf("accept routine exited: %v", err)) - } - - break - } - - isPersistent := false - addr, err := peerNodeInfo.NetAddress() - if err == nil { - isPersistent = sw.IsPeerPersistent(addr) - } - - p := newPeer( - peerNodeInfo, - newPeerConn(false, isPersistent, c), - sw.reactorsByCh, - sw.StopPeerForError, - PeerMetrics(sw.metrics), - ) - - if !sw.IsPeerUnconditional(p.NodeInfo().ID()) { - // Ignore connection if we already have enough peers. - _, in, _ := sw.NumPeers() - if in >= sw.config.MaxNumInboundPeers { - sw.Logger.Info( - "Ignoring inbound connection: already have enough inbound peers", - "address", p.SocketAddr(), - "have", in, - "max", sw.config.MaxNumInboundPeers, - ) - _ = p.CloseConn() - continue - } - - } - - if err := sw.addPeer(p); err != nil { - _ = p.CloseConn() - if p.IsRunning() { - _ = p.Stop() - } - sw.conns.RemoveAddr(p.RemoteAddr()) - sw.Logger.Info( - "Ignoring inbound connection: error while adding peer", - "err", err, - "id", p.ID(), - ) - } - } -} - -// dial the peer; make secret connection; authenticate against the dialed ID; -// add the peer. -// if dialing fails, start the reconnect loop. If handshake fails, it's over. -// If peer is started successfully, reconnectLoop will start when -// StopPeerForError is called. -func (sw *Switch) addOutboundPeerWithConfig( - addr *NetAddress, - cfg *config.P2PConfig, -) error { - sw.Logger.Info("Dialing peer", "address", addr) - - // XXX(xla): Remove the leakage of test concerns in implementation. - if cfg.TestDialFail { - go sw.reconnectToPeer(addr) - return fmt.Errorf("dial err (peerConfig.DialFail == true)") - } - - // Hardcoded timeout moved from MConn transport during refactoring. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - var peerNodeInfo types.NodeInfo - c, err := sw.transport.Dial(ctx, Endpoint{ - Protocol: MConnProtocol, - IP: addr.IP, - Port: addr.Port, - }) - if err == nil { - peerNodeInfo, _, err = sw.handshakePeer(c, addr.ID) - } - if err == nil { - err = sw.filterConn(c.(*mConnConnection).conn) - } - if err != nil { - if c != nil { - _ = c.Close() - } - if e, ok := err.(ErrRejected); ok { - if e.IsSelf() { - // Remove the given address from the address book and add to our addresses - // to avoid dialing in the future. - sw.addrBook.RemoveAddress(addr) - sw.addrBook.AddOurAddress(addr) - } - if e.IsIncompatible() { - sw.addrBook.RemoveAddress(addr) - } - - return err - } - - // retry persistent peers after - // any dial error besides IsSelf() - if sw.IsPeerPersistent(addr) { - go sw.reconnectToPeer(addr) - } - - return err - } - - p := newPeer( - peerNodeInfo, - newPeerConn(true, sw.IsPeerPersistent(addr), c), - sw.reactorsByCh, - sw.StopPeerForError, - PeerMetrics(sw.metrics), - ) - - if err := sw.addPeer(p); err != nil { - _ = p.CloseConn() - if p.IsRunning() { - _ = p.Stop() - } - sw.conns.RemoveAddr(p.RemoteAddr()) - return err - } - - return nil -} - -func (sw *Switch) handshakePeer( - c Connection, - expectPeerID types.NodeID, -) (types.NodeInfo, crypto.PubKey, error) { - // Moved from transport and hardcoded until legacy P2P stack removal. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - peerInfo, peerKey, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - return peerInfo, peerKey, ErrRejected{ - conn: c.(*mConnConnection).conn, - err: fmt.Errorf("handshake failed: %v", err), - isAuthFailure: true, - } - } - - if err = peerInfo.Validate(); err != nil { - return peerInfo, peerKey, ErrRejected{ - conn: c.(*mConnConnection).conn, - err: err, - isNodeInfoInvalid: true, - } - } - - // For outgoing conns, ensure connection key matches dialed key. - if expectPeerID != "" { - peerID := types.NodeIDFromPubKey(peerKey) - if expectPeerID != peerID { - return peerInfo, peerKey, ErrRejected{ - conn: c.(*mConnConnection).conn, - id: peerID, - err: fmt.Errorf( - "conn.ID (%v) dialed ID (%v) mismatch", - peerID, - expectPeerID, - ), - isAuthFailure: true, - } - } - } - - if sw.nodeInfo.ID() == peerInfo.ID() { - return peerInfo, peerKey, ErrRejected{ - addr: *types.NewNetAddress(peerInfo.ID(), c.(*mConnConnection).conn.RemoteAddr()), - conn: c.(*mConnConnection).conn, - id: peerInfo.ID(), - isSelf: true, - } - } - - if err = sw.nodeInfo.CompatibleWith(peerInfo); err != nil { - return peerInfo, peerKey, ErrRejected{ - conn: c.(*mConnConnection).conn, - err: err, - id: peerInfo.ID(), - isIncompatible: true, - } - } - - return peerInfo, peerKey, nil -} - -func (sw *Switch) filterPeer(p Peer) error { - // Avoid duplicate - if sw.peers.Has(p.ID()) { - return ErrRejected{id: p.ID(), isDuplicate: true} - } - - errc := make(chan error, len(sw.peerFilters)) - - for _, f := range sw.peerFilters { - go func(f PeerFilterFunc, p Peer, errc chan<- error) { - errc <- f(sw.peers, p) - }(f, p, errc) - } - - for i := 0; i < cap(errc); i++ { - select { - case err := <-errc: - if err != nil { - return ErrRejected{id: p.ID(), err: err, isFiltered: true} - } - case <-time.After(sw.filterTimeout): - return ErrFilterTimeout{} - } - } - - return nil -} - -// filterConn filters a connection, rejecting it if this function errors. -// -// FIXME: This is only here for compatibility with the current Switch code. In -// the new P2P stack, peer/connection filtering should be moved into the Router -// or PeerManager and removed from here. -func (sw *Switch) filterConn(conn net.Conn) error { - if sw.conns.Has(conn) { - return ErrRejected{conn: conn, isDuplicate: true} - } - - host, _, err := net.SplitHostPort(conn.RemoteAddr().String()) - if err != nil { - return err - } - ip := net.ParseIP(host) - if ip == nil { - return fmt.Errorf("connection address has invalid IP address %q", host) - } - - // Apply filter callbacks. - chErr := make(chan error, len(sw.connFilters)) - for _, connFilter := range sw.connFilters { - go func(connFilter ConnFilterFunc) { - chErr <- connFilter(sw.conns, conn, []net.IP{ip}) - }(connFilter) - } - - for i := 0; i < cap(chErr); i++ { - select { - case err := <-chErr: - if err != nil { - return ErrRejected{conn: conn, err: err, isFiltered: true} - } - case <-time.After(sw.filterTimeout): - return ErrFilterTimeout{} - } - - } - - // FIXME: Doesn't really make sense to set this here, but we preserve the - // behavior from the previous P2P transport implementation. - sw.conns.Set(conn, []net.IP{ip}) - return nil -} - -// addPeer starts up the Peer and adds it to the Switch. Error is returned if -// the peer is filtered out or failed to start or can't be added. -func (sw *Switch) addPeer(p Peer) error { - if err := sw.filterPeer(p); err != nil { - return err - } - - p.SetLogger(sw.Logger.With("peer", p.SocketAddr())) - - // Handle the shut down case where the switch has stopped but we're - // concurrently trying to add a peer. - if !sw.IsRunning() { - // XXX should this return an error or just log and terminate? - sw.Logger.Error("Won't start a peer - switch is not running", "peer", p) - return nil - } - - // Add some data to the peer, which is required by reactors. - for _, reactor := range sw.reactors { - p = reactor.InitPeer(p) - } - - // Start the peer's send/recv routines. - // Must start it before adding it to the peer set - // to prevent Start and Stop from being called concurrently. - err := p.Start() - if err != nil { - // Should never happen - sw.Logger.Error("Error starting peer", "err", err, "peer", p) - return err - } - - // Add the peer to PeerSet. Do this before starting the reactors - // so that if Receive errors, we will find the peer and remove it. - // Add should not err since we already checked peers.Has(). - if err := sw.peers.Add(p); err != nil { - return err - } - sw.metrics.Peers.Add(float64(1)) - - // Start all the reactor protocols on the peer. - for _, reactor := range sw.reactors { - reactor.AddPeer(p) - } - - sw.Logger.Info("Added peer", "peer", p) - - return nil -} - -// NewNetAddressStrings returns an array of NetAddress'es build using -// the provided strings. -func NewNetAddressStrings(addrs []string) ([]*NetAddress, []error) { - netAddrs := make([]*NetAddress, 0) - errs := make([]error, 0) - for _, addr := range addrs { - netAddr, err := types.NewNetAddressString(addr) - if err != nil { - errs = append(errs, err) - } else { - netAddrs = append(netAddrs, netAddr) - } - } - return netAddrs, errs -} diff --git a/internal/p2p/switch_test.go b/internal/p2p/switch_test.go deleted file mode 100644 index 8cb755c9f..000000000 --- a/internal/p2p/switch_test.go +++ /dev/null @@ -1,932 +0,0 @@ -package p2p - -import ( - "bytes" - "context" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "regexp" - "strconv" - "sync/atomic" - "testing" - "time" - - "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/crypto/ed25519" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" -) - -var ( - cfg *config.P2PConfig - ctx = context.Background() -) - -func init() { - cfg = config.DefaultP2PConfig() - cfg.PexReactor = true - cfg.AllowDuplicateIP = true -} - -type PeerMessage struct { - PeerID types.NodeID - Bytes []byte - Counter int -} - -type TestReactor struct { - BaseReactor - - mtx tmsync.Mutex - channels []*conn.ChannelDescriptor - logMessages bool - msgsCounter int - msgsReceived map[byte][]PeerMessage -} - -func NewTestReactor(channels []*conn.ChannelDescriptor, logMessages bool) *TestReactor { - tr := &TestReactor{ - channels: channels, - logMessages: logMessages, - msgsReceived: make(map[byte][]PeerMessage), - } - tr.BaseReactor = *NewBaseReactor("TestReactor", tr) - tr.SetLogger(log.TestingLogger()) - return tr -} - -func (tr *TestReactor) GetChannels() []*conn.ChannelDescriptor { - return tr.channels -} - -func (tr *TestReactor) AddPeer(peer Peer) {} - -func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {} - -func (tr *TestReactor) Receive(chID byte, peer Peer, msgBytes []byte) { - if tr.logMessages { - tr.mtx.Lock() - defer tr.mtx.Unlock() - // fmt.Printf("Received: %X, %X\n", chID, msgBytes) - tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.ID(), msgBytes, tr.msgsCounter}) - tr.msgsCounter++ - } -} - -func (tr *TestReactor) getMsgs(chID byte) []PeerMessage { - tr.mtx.Lock() - defer tr.mtx.Unlock() - return tr.msgsReceived[chID] -} - -//----------------------------------------------------------------------------- - -// convenience method for creating two switches connected to each other. -// XXX: note this uses net.Pipe and not a proper TCP conn -func MakeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switch, *Switch) { - // Create two switches that will be interconnected. - switches := MakeConnectedSwitches(cfg, 2, initSwitch, Connect2Switches) - return switches[0], switches[1] -} - -func initSwitchFunc(i int, sw *Switch) *Switch { - sw.SetAddrBook(&AddrBookMock{ - Addrs: make(map[string]struct{}), - OurAddrs: make(map[string]struct{})}) - - // Make two reactors of two channels each - sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x00), Priority: 10}, - {ID: byte(0x01), Priority: 10}, - }, true)) - sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x02), Priority: 10}, - {ID: byte(0x03), Priority: 10}, - }, true)) - - return sw -} - -func TestSwitches(t *testing.T) { - s1, s2 := MakeSwitchPair(t, initSwitchFunc) - t.Cleanup(func() { - if err := s1.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := s2.Stop(); err != nil { - t.Error(err) - } - }) - - if s1.Peers().Size() != 1 { - t.Errorf("expected exactly 1 peer in s1, got %v", s1.Peers().Size()) - } - if s2.Peers().Size() != 1 { - t.Errorf("expected exactly 1 peer in s2, got %v", s2.Peers().Size()) - } - - // Lets send some messages - ch0Msg := []byte("channel zero") - ch1Msg := []byte("channel foo") - ch2Msg := []byte("channel bar") - - s1.Broadcast(byte(0x00), ch0Msg) - s1.Broadcast(byte(0x01), ch1Msg) - s1.Broadcast(byte(0x02), ch2Msg) - - assertMsgReceivedWithTimeout(t, - ch0Msg, - byte(0x00), - s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second) - assertMsgReceivedWithTimeout(t, - ch1Msg, - byte(0x01), - s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second) - assertMsgReceivedWithTimeout(t, - ch2Msg, - byte(0x02), - s2.Reactor("bar").(*TestReactor), 10*time.Millisecond, 5*time.Second) -} - -func assertMsgReceivedWithTimeout( - t *testing.T, - msgBytes []byte, - channel byte, - reactor *TestReactor, - checkPeriod, - timeout time.Duration, -) { - ticker := time.NewTicker(checkPeriod) - for { - select { - case <-ticker.C: - msgs := reactor.getMsgs(channel) - if len(msgs) > 0 { - if !bytes.Equal(msgs[0].Bytes, msgBytes) { - t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", msgBytes, msgs[0].Bytes) - } - return - } - - case <-time.After(timeout): - t.Fatalf("Expected to have received 1 message in channel #%v, got zero", channel) - } - } -} - -func TestSwitchFiltersOutItself(t *testing.T) { - s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc, log.TestingLogger()) - - // simulate s1 having a public IP by creating a remote peer with the same ID - rp := &remotePeer{PrivKey: s1.nodeKey.PrivKey, Config: cfg} - rp.Start() - - // addr should be rejected in addPeer based on the same ID - err := s1.DialPeerWithAddress(rp.Addr()) - if assert.Error(t, err) { - if err, ok := err.(ErrRejected); ok { - if !err.IsSelf() { - t.Errorf("expected self to be rejected") - } - } else { - t.Errorf("expected ErrRejected") - } - } - - assert.True(t, s1.addrBook.OurAddress(rp.Addr())) - assert.False(t, s1.addrBook.HasAddress(rp.Addr())) - - rp.Stop() - - assertNoPeersAfterTimeout(t, s1, 100*time.Millisecond) -} - -func TestSwitchDialFailsOnIncompatiblePeer(t *testing.T) { - s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc, log.TestingLogger()) - ni := s1.NodeInfo() - ni.Network = "network-a" - s1.SetNodeInfo(ni) - - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg, Network: "network-b"} - rp.Start() - defer rp.Stop() - - err := s1.DialPeerWithAddress(rp.Addr()) - require.Error(t, err) - errRejected, ok := err.(ErrRejected) - require.True(t, ok, "expected error to be of type IsRejected") - require.True(t, errRejected.IsIncompatible(), "expected error to be IsIncompatible") - - // remote peer should not have been added to the addressbook - require.False(t, s1.addrBook.HasAddress(rp.Addr())) -} - -func TestSwitchPeerFilter(t *testing.T) { - var ( - filters = []PeerFilterFunc{ - func(_ IPeerSet, _ Peer) error { return nil }, - func(_ IPeerSet, _ Peer) error { return fmt.Errorf("denied") }, - func(_ IPeerSet, _ Peer) error { return nil }, - } - sw = MakeSwitch( - cfg, - 1, - "testing", - "123.123.123", - initSwitchFunc, - log.TestingLogger(), - SwitchPeerFilters(filters...), - ) - ) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - t.Cleanup(rp.Stop) - - c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr())) - if err != nil { - t.Fatal(err) - } - peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - t.Fatal(err) - } - p := newPeer( - peerInfo, - newPeerConn(true, false, c), - sw.reactorsByCh, - sw.StopPeerForError, - ) - - err = sw.addPeer(p) - if err, ok := err.(ErrRejected); ok { - if !err.IsFiltered() { - t.Errorf("expected peer to be filtered") - } - } else { - t.Errorf("expected ErrRejected") - } -} - -func TestSwitchPeerFilterTimeout(t *testing.T) { - var ( - filters = []PeerFilterFunc{ - func(_ IPeerSet, _ Peer) error { - time.Sleep(10 * time.Millisecond) - return nil - }, - } - sw = MakeSwitch( - cfg, - 1, - "testing", - "123.123.123", - initSwitchFunc, - log.TestingLogger(), - SwitchFilterTimeout(5*time.Millisecond), - SwitchPeerFilters(filters...), - ) - ) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Log(err) - } - }) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr())) - if err != nil { - t.Fatal(err) - } - peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - t.Fatal(err) - } - p := newPeer( - peerInfo, - newPeerConn(true, false, c), - sw.reactorsByCh, - sw.StopPeerForError, - ) - - err = sw.addPeer(p) - if _, ok := err.(ErrFilterTimeout); !ok { - t.Errorf("expected ErrFilterTimeout") - } -} - -func TestSwitchPeerFilterDuplicate(t *testing.T) { - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr())) - if err != nil { - t.Fatal(err) - } - peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - t.Fatal(err) - } - p := newPeer( - peerInfo, - newPeerConn(true, false, c), - sw.reactorsByCh, - sw.StopPeerForError, - ) - - if err := sw.addPeer(p); err != nil { - t.Fatal(err) - } - - err = sw.addPeer(p) - if errRej, ok := err.(ErrRejected); ok { - if !errRej.IsDuplicate() { - t.Errorf("expected peer to be duplicate. got %v", errRej) - } - } else { - t.Errorf("expected ErrRejected, got %v", err) - } -} - -func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration) { - time.Sleep(timeout) - if sw.Peers().Size() != 0 { - t.Fatalf("Expected %v to not connect to some peers, got %d", sw, sw.Peers().Size()) - } -} - -func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.Start() - if err != nil { - t.Error(err) - } - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr())) - if err != nil { - t.Fatal(err) - } - peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - t.Fatal(err) - } - p := newPeer( - peerInfo, - newPeerConn(true, false, c), - sw.reactorsByCh, - sw.StopPeerForError, - ) - - err = sw.addPeer(p) - require.Nil(err) - - require.NotNil(sw.Peers().Get(rp.ID())) - - // simulate failure by closing connection - err = p.CloseConn() - require.NoError(err) - - assertNoPeersAfterTimeout(t, sw, 100*time.Millisecond) - assert.False(p.IsRunning()) -} - -func TestSwitchStopPeerForError(t *testing.T) { - s := httptest.NewServer(promhttp.Handler()) - defer s.Close() - - scrapeMetrics := func() string { - resp, err := http.Get(s.URL) - require.NoError(t, err) - defer resp.Body.Close() - buf, _ := ioutil.ReadAll(resp.Body) - return string(buf) - } - - namespace, subsystem, name := config.TestInstrumentationConfig().Namespace, MetricsSubsystem, "peers" - re := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + ` ([0-9\.]+)`) - peersMetricValue := func() float64 { - matches := re.FindStringSubmatch(scrapeMetrics()) - f, _ := strconv.ParseFloat(matches[1], 64) - return f - } - - p2pMetrics := PrometheusMetrics(namespace) - - // make two connected switches - sw1, sw2 := MakeSwitchPair(t, func(i int, sw *Switch) *Switch { - // set metrics on sw1 - if i == 0 { - opt := WithMetrics(p2pMetrics) - opt(sw) - } - return initSwitchFunc(i, sw) - }) - - assert.Equal(t, len(sw1.Peers().List()), 1) - assert.EqualValues(t, 1, peersMetricValue()) - - // send messages to the peer from sw1 - p := sw1.Peers().List()[0] - p.Send(0x1, []byte("here's a message to send")) - - // stop sw2. this should cause the p to fail, - // which results in calling StopPeerForError internally - t.Cleanup(func() { - if err := sw2.Stop(); err != nil { - t.Error(err) - } - }) - - // now call StopPeerForError explicitly, eg. from a reactor - sw1.StopPeerForError(p, fmt.Errorf("some err")) - - assert.Equal(t, len(sw1.Peers().List()), 0) - assert.EqualValues(t, 0, peersMetricValue()) -} - -func TestSwitchReconnectsToOutboundPersistentPeer(t *testing.T) { - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // 1. simulate failure by closing connection - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - err = sw.AddPersistentPeers([]string{rp.Addr().String()}) - require.NoError(t, err) - - err = sw.DialPeerWithAddress(rp.Addr()) - require.Nil(t, err) - require.NotNil(t, sw.Peers().Get(rp.ID())) - - p := sw.Peers().List()[0] - err = p.(*peer).CloseConn() - require.NoError(t, err) - - waitUntilSwitchHasAtLeastNPeers(sw, 1) - assert.False(t, p.IsRunning()) // old peer instance - assert.Equal(t, 1, sw.Peers().Size()) // new peer instance - - // 2. simulate first time dial failure - rp = &remotePeer{ - PrivKey: ed25519.GenPrivKey(), - Config: cfg, - // Use different interface to prevent duplicate IP filter, this will break - // beyond two peers. - listenAddr: "127.0.0.1:0", - } - rp.Start() - defer rp.Stop() - - conf := config.DefaultP2PConfig() - conf.TestDialFail = true // will trigger a reconnect - err = sw.addOutboundPeerWithConfig(rp.Addr(), conf) - require.NotNil(t, err) - // DialPeerWithAddres - sw.peerConfig resets the dialer - waitUntilSwitchHasAtLeastNPeers(sw, 2) - assert.Equal(t, 2, sw.Peers().Size()) -} - -func TestSwitchReconnectsToInboundPersistentPeer(t *testing.T) { - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // 1. simulate failure by closing the connection - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - err = sw.AddPersistentPeers([]string{rp.Addr().String()}) - require.NoError(t, err) - - conn, err := rp.Dial(sw.NetAddress()) - require.NoError(t, err) - time.Sleep(50 * time.Millisecond) - require.NotNil(t, sw.Peers().Get(rp.ID())) - - conn.Close() - - waitUntilSwitchHasAtLeastNPeers(sw, 1) - assert.Equal(t, 1, sw.Peers().Size()) -} - -func TestSwitchDialPeersAsync(t *testing.T) { - if testing.Short() { - return - } - - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - err = sw.DialPeersAsync([]string{rp.Addr().String()}) - require.NoError(t, err) - time.Sleep(dialRandomizerIntervalMilliseconds * time.Millisecond) - require.NotNil(t, sw.Peers().Get(rp.ID())) -} - -func waitUntilSwitchHasAtLeastNPeers(sw *Switch, n int) { - for i := 0; i < 20; i++ { - time.Sleep(250 * time.Millisecond) - has := sw.Peers().Size() - if has >= n { - break - } - } -} - -func TestSwitchFullConnectivity(t *testing.T) { - switches := MakeConnectedSwitches(cfg, 3, initSwitchFunc, Connect2Switches) - defer func() { - for _, sw := range switches { - sw := sw - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - } - }() - - for i, sw := range switches { - if sw.Peers().Size() != 2 { - t.Fatalf("Expected each switch to be connected to 2 other, but %d switch only connected to %d", sw.Peers().Size(), i) - } - } -} - -func TestSwitchAcceptRoutine(t *testing.T) { - cfg.MaxNumInboundPeers = 5 - - // Create some unconditional peers. - const unconditionalPeersNum = 2 - var ( - unconditionalPeers = make([]*remotePeer, unconditionalPeersNum) - unconditionalPeerIDs = make([]string, unconditionalPeersNum) - ) - for i := 0; i < unconditionalPeersNum; i++ { - peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - peer.Start() - unconditionalPeers[i] = peer - unconditionalPeerIDs[i] = string(peer.ID()) - } - - // make switch - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.AddUnconditionalPeerIDs(unconditionalPeerIDs) - require.NoError(t, err) - err = sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - err := sw.Stop() - require.NoError(t, err) - }) - - // 0. check there are no peers - assert.Equal(t, 0, sw.Peers().Size()) - - // 1. check we connect up to MaxNumInboundPeers - peers := make([]*remotePeer, 0) - for i := 0; i < cfg.MaxNumInboundPeers; i++ { - peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - peers = append(peers, peer) - peer.Start() - c, err := peer.Dial(sw.NetAddress()) - require.NoError(t, err) - // spawn a reading routine to prevent connection from closing - go func(c net.Conn) { - for { - one := make([]byte, 1) - _, err := c.Read(one) - if err != nil { - return - } - } - }(c) - } - time.Sleep(100 * time.Millisecond) - assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size()) - - // 2. check we close new connections if we already have MaxNumInboundPeers peers - peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - peer.Start() - conn, err := peer.Dial(sw.NetAddress()) - require.NoError(t, err) - // check conn is closed - one := make([]byte, 1) - _ = conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) - _, err = conn.Read(one) - assert.Error(t, err) - assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size()) - peer.Stop() - - // 3. check we connect to unconditional peers despite the limit. - for _, peer := range unconditionalPeers { - c, err := peer.Dial(sw.NetAddress()) - require.NoError(t, err) - // spawn a reading routine to prevent connection from closing - go func(c net.Conn) { - for { - one := make([]byte, 1) - _, err := c.Read(one) - if err != nil { - return - } - } - }(c) - } - time.Sleep(10 * time.Millisecond) - assert.Equal(t, cfg.MaxNumInboundPeers+unconditionalPeersNum, sw.Peers().Size()) - - for _, peer := range peers { - peer.Stop() - } - for _, peer := range unconditionalPeers { - peer.Stop() - } -} - -func TestSwitchRejectsIncompatiblePeers(t *testing.T) { - sw := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc, log.TestingLogger()) - ni := sw.NodeInfo() - ni.Network = "network-a" - sw.SetNodeInfo(ni) - - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - err := sw.Stop() - require.NoError(t, err) - }) - - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg, Network: "network-b"} - rp.Start() - defer rp.Stop() - - assert.Equal(t, 0, sw.Peers().Size()) - - conn, err := rp.Dial(sw.NetAddress()) - assert.Nil(t, err) - - one := make([]byte, 1) - _ = conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) - _, err = conn.Read(one) - assert.Error(t, err) - - assert.Equal(t, 0, sw.Peers().Size()) -} - -type errorTransport struct { - acceptErr error -} - -func (et errorTransport) String() string { - return "error" -} - -func (et errorTransport) Protocols() []Protocol { - return []Protocol{"error"} -} - -func (et errorTransport) Accept() (Connection, error) { - return nil, et.acceptErr -} -func (errorTransport) Dial(context.Context, Endpoint) (Connection, error) { - panic("not implemented") -} -func (errorTransport) Close() error { panic("not implemented") } -func (errorTransport) FlushClose() error { panic("not implemented") } -func (errorTransport) Endpoints() []Endpoint { panic("not implemented") } - -func TestSwitchAcceptRoutineErrorCases(t *testing.T) { - sw := NewSwitch(cfg, errorTransport{ErrFilterTimeout{}}) - assert.NotPanics(t, func() { - err := sw.Start() - require.NoError(t, err) - err = sw.Stop() - require.NoError(t, err) - }) - - sw = NewSwitch(cfg, errorTransport{ErrRejected{conn: nil, err: errors.New("filtered"), isFiltered: true}}) - assert.NotPanics(t, func() { - err := sw.Start() - require.NoError(t, err) - err = sw.Stop() - require.NoError(t, err) - }) - // TODO(melekes) check we remove our address from addrBook - - sw = NewSwitch(cfg, errorTransport{ErrTransportClosed{}}) - assert.NotPanics(t, func() { - err := sw.Start() - require.NoError(t, err) - err = sw.Stop() - require.NoError(t, err) - }) -} - -// mockReactor checks that InitPeer never called before RemovePeer. If that's -// not true, InitCalledBeforeRemoveFinished will return true. -type mockReactor struct { - *BaseReactor - - // atomic - removePeerInProgress uint32 - initCalledBeforeRemoveFinished uint32 -} - -func (r *mockReactor) GetChannels() []*ChannelDescriptor { - return []*ChannelDescriptor{{ID: testCh, Priority: 10}} -} - -func (r *mockReactor) RemovePeer(peer Peer, reason interface{}) { - atomic.StoreUint32(&r.removePeerInProgress, 1) - defer atomic.StoreUint32(&r.removePeerInProgress, 0) - time.Sleep(100 * time.Millisecond) -} - -func (r *mockReactor) InitPeer(peer Peer) Peer { - if atomic.LoadUint32(&r.removePeerInProgress) == 1 { - atomic.StoreUint32(&r.initCalledBeforeRemoveFinished, 1) - } - - return peer -} - -func (r *mockReactor) InitCalledBeforeRemoveFinished() bool { - return atomic.LoadUint32(&r.initCalledBeforeRemoveFinished) == 1 -} - -// see stopAndRemovePeer -func TestSwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) { - // make reactor - reactor := &mockReactor{} - reactor.BaseReactor = NewBaseReactor("mockReactor", reactor) - - // make switch - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", func(i int, sw *Switch) *Switch { - sw.AddReactor("mock", reactor) - return sw - }, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // add peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - _, err = rp.Dial(sw.NetAddress()) - require.NoError(t, err) - - // wait till the switch adds rp to the peer set, then stop the peer asynchronously - for { - time.Sleep(20 * time.Millisecond) - if peer := sw.Peers().Get(rp.ID()); peer != nil { - go sw.StopPeerForError(peer, "test") - break - } - } - - // simulate peer reconnecting to us - _, err = rp.Dial(sw.NetAddress()) - require.NoError(t, err) - // wait till the switch adds rp to the peer set - time.Sleep(50 * time.Millisecond) - - // make sure reactor.RemovePeer is finished before InitPeer is called - assert.False(t, reactor.InitCalledBeforeRemoveFinished()) -} - -func BenchmarkSwitchBroadcast(b *testing.B) { - s1, s2 := MakeSwitchPair(b, func(i int, sw *Switch) *Switch { - // Make bar reactors of bar channels each - sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x00), Priority: 10}, - {ID: byte(0x01), Priority: 10}, - }, false)) - sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x02), Priority: 10}, - {ID: byte(0x03), Priority: 10}, - }, false)) - return sw - }) - - b.Cleanup(func() { - if err := s1.Stop(); err != nil { - b.Error(err) - } - }) - - b.Cleanup(func() { - if err := s2.Stop(); err != nil { - b.Error(err) - } - }) - - // Allow time for goroutines to boot up - time.Sleep(1 * time.Second) - - b.ResetTimer() - - numSuccess, numFailure := 0, 0 - - // Send random message from foo channel to another - for i := 0; i < b.N; i++ { - chID := byte(i % 4) - successChan := s1.Broadcast(chID, []byte("test data")) - for s := range successChan { - if s { - numSuccess++ - } else { - numFailure++ - } - } - } - - b.Logf("success: %v, failure: %v", numSuccess, numFailure) -} - -func TestNewNetAddressStrings(t *testing.T) { - addrs, errs := NewNetAddressStrings([]string{ - "127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeed@127.0.0.2:8080"}) - assert.Len(t, errs, 1) - assert.Equal(t, 2, len(addrs)) -} diff --git a/internal/p2p/test_util.go b/internal/p2p/test_util.go deleted file mode 100644 index b2851646d..000000000 --- a/internal/p2p/test_util.go +++ /dev/null @@ -1,288 +0,0 @@ -package p2p - -import ( - "context" - "fmt" - mrand "math/rand" - "net" - - "github.com/tendermint/tendermint/libs/log" - tmnet "github.com/tendermint/tendermint/libs/net" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/p2p/conn" -) - -const testCh = 0x01 - -//------------------------------------------------ - -func AddPeerToSwitchPeerSet(sw *Switch, peer Peer) { - sw.peers.Add(peer) //nolint:errcheck // ignore error -} - -func CreateRandomPeer(outbound bool) Peer { - addr, netAddr := CreateRoutableAddr() - p := &peer{ - peerConn: peerConn{outbound: outbound}, - nodeInfo: types.NodeInfo{ - NodeID: netAddr.ID, - ListenAddr: netAddr.DialString(), - }, - metrics: NopMetrics(), - } - p.SetLogger(log.TestingLogger().With("peer", addr)) - return p -} - -// nolint:gosec // G404: Use of weak random number generator -func CreateRoutableAddr() (addr string, netAddr *NetAddress) { - for { - var err error - addr = fmt.Sprintf("%X@%v.%v.%v.%v:26656", - tmrand.Bytes(20), - mrand.Int()%256, - mrand.Int()%256, - mrand.Int()%256, - mrand.Int()%256) - netAddr, err = types.NewNetAddressString(addr) - if err != nil { - panic(err) - } - if netAddr.Routable() { - break - } - } - return -} - -//------------------------------------------------------------------ -// Connects switches via arbitrary net.Conn. Used for testing. - -const TestHost = "localhost" - -// MakeConnectedSwitches returns n switches, connected according to the connect func. -// If connect==Connect2Switches, the switches will be fully connected. -// initSwitch defines how the i'th switch should be initialized (ie. with what reactors). -// NOTE: panics if any switch fails to start. -func MakeConnectedSwitches(cfg *config.P2PConfig, - n int, - initSwitch func(int, *Switch) *Switch, - connect func([]*Switch, int, int), -) []*Switch { - switches := make([]*Switch, n) - for i := 0; i < n; i++ { - switches[i] = MakeSwitch(cfg, i, TestHost, "123.123.123", initSwitch, log.TestingLogger()) - } - - if err := StartSwitches(switches); err != nil { - panic(err) - } - - for i := 0; i < n; i++ { - for j := i + 1; j < n; j++ { - connect(switches, i, j) - } - } - - return switches -} - -// Connect2Switches will connect switches i and j via net.Pipe(). -// Blocks until a connection is established. -// NOTE: caller ensures i and j are within bounds. -func Connect2Switches(switches []*Switch, i, j int) { - switchI := switches[i] - switchJ := switches[j] - - c1, c2 := conn.NetPipe() - - doneCh := make(chan struct{}) - go func() { - err := switchI.addPeerWithConnection(c1) - if err != nil { - panic(err) - } - doneCh <- struct{}{} - }() - go func() { - err := switchJ.addPeerWithConnection(c2) - if err != nil { - panic(err) - } - doneCh <- struct{}{} - }() - <-doneCh - <-doneCh -} - -func (sw *Switch) addPeerWithConnection(conn net.Conn) error { - pc, err := testInboundPeerConn(sw.transport.(*MConnTransport), conn) - if err != nil { - if err := conn.Close(); err != nil { - sw.Logger.Error("Error closing connection", "err", err) - } - return err - } - peerNodeInfo, _, err := pc.conn.Handshake(context.Background(), sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - if err := conn.Close(); err != nil { - sw.Logger.Error("Error closing connection", "err", err) - } - return err - } - - p := newPeer( - peerNodeInfo, - pc, - sw.reactorsByCh, - sw.StopPeerForError, - ) - - if err = sw.addPeer(p); err != nil { - pc.CloseConn() - return err - } - - return nil -} - -// StartSwitches calls sw.Start() for each given switch. -// It returns the first encountered error. -func StartSwitches(switches []*Switch) error { - for _, s := range switches { - err := s.Start() // start switch and reactors - if err != nil { - return err - } - } - return nil -} - -func MakeSwitch( - cfg *config.P2PConfig, - i int, - network, version string, - initSwitch func(int, *Switch) *Switch, - logger log.Logger, - opts ...SwitchOption, -) *Switch { - - nodeKey := types.GenNodeKey() - nodeInfo := testNodeInfo(nodeKey.ID, fmt.Sprintf("node%d", i)) - addr, err := types.NewNetAddressString( - nodeKey.ID.AddressString(nodeInfo.ListenAddr), - ) - if err != nil { - panic(err) - } - - swLogger := logger.With("switch", i) - t := NewMConnTransport(swLogger, MConnConfig(cfg), - []*ChannelDescriptor{}, MConnTransportOptions{}) - - // TODO: let the config be passed in? - sw := initSwitch(i, NewSwitch(cfg, t, opts...)) - sw.SetLogger(swLogger) - sw.SetNodeKey(nodeKey) - - if err := t.Listen(NewEndpoint(addr)); err != nil { - panic(err) - } - - ni := nodeInfo - ni.Channels = []byte{} - for ch := range sw.reactorsByCh { - ni.Channels = append(ni.Channels, ch) - } - nodeInfo = ni - - // TODO: We need to setup reactors ahead of time so the NodeInfo is properly - // populated and we don't have to do those awkward overrides and setters. - sw.SetNodeInfo(nodeInfo) - - return sw -} - -func testInboundPeerConn( - transport *MConnTransport, - conn net.Conn, -) (peerConn, error) { - return testPeerConn(transport, conn, false, false) -} - -func testPeerConn( - transport *MConnTransport, - rawConn net.Conn, - outbound, persistent bool, -) (pc peerConn, err error) { - - conn := newMConnConnection(transport.logger, rawConn, transport.mConnConfig, transport.channelDescs) - - return newPeerConn(outbound, persistent, conn), nil -} - -//---------------------------------------------------------------- -// rand node info - -func testNodeInfo(id types.NodeID, name string) types.NodeInfo { - return testNodeInfoWithNetwork(id, name, "testing") -} - -func testNodeInfoWithNetwork(id types.NodeID, name, network string) types.NodeInfo { - return types.NodeInfo{ - ProtocolVersion: defaultProtocolVersion, - NodeID: id, - ListenAddr: fmt.Sprintf("127.0.0.1:%d", getFreePort()), - Network: network, - Version: "1.2.3-rc0-deadbeef", - Channels: []byte{testCh}, - Moniker: name, - Other: types.NodeInfoOther{ - TxIndex: "on", - RPCAddress: fmt.Sprintf("127.0.0.1:%d", getFreePort()), - }, - } -} - -func getFreePort() int { - port, err := tmnet.GetFreePort() - if err != nil { - panic(err) - } - return port -} - -type AddrBookMock struct { - Addrs map[string]struct{} - OurAddrs map[string]struct{} - PrivateAddrs map[string]struct{} -} - -var _ AddrBook = (*AddrBookMock)(nil) - -func (book *AddrBookMock) AddAddress(addr *NetAddress, src *NetAddress) error { - book.Addrs[addr.String()] = struct{}{} - return nil -} -func (book *AddrBookMock) AddOurAddress(addr *NetAddress) { book.OurAddrs[addr.String()] = struct{}{} } -func (book *AddrBookMock) OurAddress(addr *NetAddress) bool { - _, ok := book.OurAddrs[addr.String()] - return ok -} -func (book *AddrBookMock) MarkGood(types.NodeID) {} -func (book *AddrBookMock) HasAddress(addr *NetAddress) bool { - _, ok := book.Addrs[addr.String()] - return ok -} -func (book *AddrBookMock) RemoveAddress(addr *NetAddress) { - delete(book.Addrs, addr.String()) -} -func (book *AddrBookMock) Save() {} -func (book *AddrBookMock) AddPrivateIDs(addrs []string) { - for _, addr := range addrs { - book.PrivateAddrs[addr] = struct{}{} - } -} diff --git a/internal/p2p/transport.go b/internal/p2p/transport.go index a3245dfc8..08de0d3b0 100644 --- a/internal/p2p/transport.go +++ b/internal/p2p/transport.go @@ -7,9 +7,7 @@ import ( "net" "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/version" ) //go:generate ../../scripts/mockery_generate.sh Transport|Connection @@ -20,19 +18,14 @@ const ( defaultProtocol Protocol = MConnProtocol ) -// defaultProtocolVersion populates the Block and P2P versions using -// the global values, but not the App. -var defaultProtocolVersion = types.ProtocolVersion{ - P2P: version.P2PProtocol, - Block: version.BlockProtocol, - App: 0, -} - // Protocol identifies a transport protocol. type Protocol string // Transport is a connection-oriented mechanism for exchanging data with a peer. type Transport interface { + // Listen starts the transport on the specified endpoint. + Listen(Endpoint) error + // Protocols returns the protocols supported by the transport. The Router // uses this to pick a transport for an Endpoint. Protocols() []Protocol @@ -54,6 +47,10 @@ type Transport interface { // Close stops accepting new connections, but does not close active connections. Close() error + // AddChannelDescriptors is only part of this interface + // temporarily + AddChannelDescriptors([]*ChannelDescriptor) + // Stringer is used to display the transport, e.g. in logs. // // Without this, the logger may use reflection to access and display @@ -91,19 +88,7 @@ type Connection interface { ReceiveMessage() (ChannelID, []byte, error) // SendMessage sends a message on the connection. Returns io.EOF if closed. - // - // FIXME: For compatibility with the legacy P2P stack, it returns an - // additional boolean false if the message timed out waiting to be accepted - // into the send buffer. This should be removed. - SendMessage(ChannelID, []byte) (bool, error) - - // TrySendMessage is a non-blocking version of SendMessage that returns - // immediately if the message buffer is full. It returns true if the message - // was accepted. - // - // FIXME: This method is here for backwards-compatibility with the legacy - // P2P stack and should be removed. - TrySendMessage(ChannelID, []byte) (bool, error) + SendMessage(ChannelID, []byte) error // LocalEndpoint returns the local endpoint for the connection. LocalEndpoint() Endpoint @@ -114,18 +99,6 @@ type Connection interface { // Close closes the connection. Close() error - // FlushClose flushes all pending sends and then closes the connection. - // - // FIXME: This only exists for backwards-compatibility with the current - // MConnection implementation. There should really be a separate Flush() - // method, but there is no easy way to synchronously flush pending data with - // the current MConnection code. - FlushClose() error - - // Status returns the current connection status. - // FIXME: Only here for compatibility with the current Peer code. - Status() conn.ConnectionStatus - // Stringer is used to display the connection, e.g. in logs. // // Without this, the logger may use reflection to access and display @@ -156,12 +129,17 @@ type Endpoint struct { } // NewEndpoint constructs an Endpoint from a types.NetAddress structure. -func NewEndpoint(na *types.NetAddress) Endpoint { +func NewEndpoint(addr string) (Endpoint, error) { + ip, port, err := types.ParseAddressString(addr) + if err != nil { + return Endpoint{}, err + } + return Endpoint{ Protocol: MConnProtocol, - IP: na.IP, - Port: na.Port, - } + IP: ip, + Port: port, + }, nil } // NodeAddress converts the endpoint into a NodeAddress for the given node ID. diff --git a/internal/p2p/transport_mconn.go b/internal/p2p/transport_mconn.go index eca261476..0580ce1bf 100644 --- a/internal/p2p/transport_mconn.go +++ b/internal/p2p/transport_mconn.go @@ -291,7 +291,7 @@ func (c *mConnConnection) Handshake( } c.mconn = mconn c.logger = mconn.Logger - if err = c.mconn.Start(); err != nil { + if err = c.mconn.Start(ctx); err != nil { return types.NodeInfo{}, nil, err } return peerInfo, peerKey, nil @@ -336,21 +336,21 @@ func (c *mConnConnection) handshake( } mconn := conn.NewMConnectionWithConfig( + c.logger.With("peer", c.RemoteEndpoint().NodeAddress(peerInfo.NodeID)), secretConn, c.channelDescs, c.onReceive, c.onError, c.mConnConfig, ) - mconn.SetLogger(c.logger.With("peer", c.RemoteEndpoint().NodeAddress(peerInfo.NodeID))) return mconn, peerInfo, secretConn.RemotePubKey(), nil } // onReceive is a callback for MConnection received messages. -func (c *mConnConnection) onReceive(chID byte, payload []byte) { +func (c *mConnConnection) onReceive(chID ChannelID, payload []byte) { select { - case c.receiveCh <- mConnMessage{channelID: ChannelID(chID), payload: payload}: + case c.receiveCh <- mConnMessage{channelID: chID, payload: payload}: case <-c.closeCh: } } @@ -377,32 +377,21 @@ func (c *mConnConnection) String() string { } // SendMessage implements Connection. -func (c *mConnConnection) SendMessage(chID ChannelID, msg []byte) (bool, error) { +func (c *mConnConnection) SendMessage(chID ChannelID, msg []byte) error { if chID > math.MaxUint8 { - return false, fmt.Errorf("MConnection only supports 1-byte channel IDs (got %v)", chID) + return fmt.Errorf("MConnection only supports 1-byte channel IDs (got %v)", chID) } select { case err := <-c.errorCh: - return false, err + return err case <-c.closeCh: - return false, io.EOF + return io.EOF default: - return c.mconn.Send(byte(chID), msg), nil - } -} + if ok := c.mconn.Send(chID, msg); !ok { + return errors.New("sending message timed out") + } -// TrySendMessage implements Connection. -func (c *mConnConnection) TrySendMessage(chID ChannelID, msg []byte) (bool, error) { - if chID > math.MaxUint8 { - return false, fmt.Errorf("MConnection only supports 1-byte channel IDs (got %v)", chID) - } - select { - case err := <-c.errorCh: - return false, err - case <-c.closeCh: - return false, io.EOF - default: - return c.mconn.TrySend(byte(chID), msg), nil + return nil } } @@ -442,14 +431,6 @@ func (c *mConnConnection) RemoteEndpoint() Endpoint { return endpoint } -// Status implements Connection. -func (c *mConnConnection) Status() conn.ConnectionStatus { - if c.mconn == nil { - return conn.ConnectionStatus{} - } - return c.mconn.Status() -} - // Close implements Connection. func (c *mConnConnection) Close() error { var err error @@ -463,17 +444,3 @@ func (c *mConnConnection) Close() error { }) return err } - -// FlushClose implements Connection. -func (c *mConnConnection) FlushClose() error { - var err error - c.closeOnce.Do(func() { - if c.mconn != nil && c.mconn.IsRunning() { - c.mconn.FlushStop() - } else { - err = c.conn.Close() - } - close(c.closeCh) - }) - return err -} diff --git a/internal/p2p/transport_mconn_test.go b/internal/p2p/transport_mconn_test.go index 06cd93c0a..4d9a945cb 100644 --- a/internal/p2p/transport_mconn_test.go +++ b/internal/p2p/transport_mconn_test.go @@ -1,6 +1,7 @@ package p2p_test import ( + "context" "io" "net" "testing" @@ -21,7 +22,7 @@ func init() { transport := p2p.NewMConnTransport( log.TestingLogger(), conn.DefaultMConnConfig(), - []*p2p.ChannelDescriptor{{ID: byte(chID), Priority: 1}}, + []*p2p.ChannelDescriptor{{ID: chID, Priority: 1}}, p2p.MConnTransportOptions{}, ) err := transport.Listen(p2p.Endpoint{ @@ -43,7 +44,7 @@ func TestMConnTransport_AcceptBeforeListen(t *testing.T) { transport := p2p.NewMConnTransport( log.TestingLogger(), conn.DefaultMConnConfig(), - []*p2p.ChannelDescriptor{{ID: byte(chID), Priority: 1}}, + []*p2p.ChannelDescriptor{{ID: chID, Priority: 1}}, p2p.MConnTransportOptions{ MaxAcceptedConnections: 2, }, @@ -58,10 +59,13 @@ func TestMConnTransport_AcceptBeforeListen(t *testing.T) { } func TestMConnTransport_AcceptMaxAcceptedConnections(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + transport := p2p.NewMConnTransport( log.TestingLogger(), conn.DefaultMConnConfig(), - []*p2p.ChannelDescriptor{{ID: byte(chID), Priority: 1}}, + []*p2p.ChannelDescriptor{{ID: chID, Priority: 1}}, p2p.MConnTransportOptions{ MaxAcceptedConnections: 2, }, @@ -124,6 +128,9 @@ func TestMConnTransport_AcceptMaxAcceptedConnections(t *testing.T) { } func TestMConnTransport_Listen(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + testcases := []struct { endpoint p2p.Endpoint ok bool @@ -145,10 +152,13 @@ func TestMConnTransport_Listen(t *testing.T) { t.Run(tc.endpoint.String(), func(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel = context.WithCancel(ctx) + defer cancel() + transport := p2p.NewMConnTransport( log.TestingLogger(), conn.DefaultMConnConfig(), - []*p2p.ChannelDescriptor{{ID: byte(chID), Priority: 1}}, + []*p2p.ChannelDescriptor{{ID: chID, Priority: 1}}, p2p.MConnTransportOptions{}, ) @@ -185,6 +195,9 @@ func TestMConnTransport_Listen(t *testing.T) { go func() { // Dialing the endpoint should work. var err error + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peerConn, err = transport.Dial(ctx, endpoint) require.NoError(t, err) close(dialedChan) @@ -195,7 +208,6 @@ func TestMConnTransport_Listen(t *testing.T) { _ = conn.Close() <-dialedChan - time.Sleep(time.Minute) // closing the connection should not error require.NoError(t, peerConn.Close()) diff --git a/internal/p2p/transport_memory.go b/internal/p2p/transport_memory.go index 09a387254..5d9291675 100644 --- a/internal/p2p/transport_memory.go +++ b/internal/p2p/transport_memory.go @@ -10,7 +10,6 @@ import ( "github.com/tendermint/tendermint/crypto" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" ) @@ -118,6 +117,10 @@ func (t *MemoryTransport) String() string { return string(MemoryProtocol) } +func (*MemoryTransport) Listen(Endpoint) error { return nil } + +func (t *MemoryTransport) AddChannelDescriptors([]*ChannelDescriptor) {} + // Protocols implements Transport. func (t *MemoryTransport) Protocols() []Protocol { return []Protocol{MemoryProtocol} @@ -262,11 +265,6 @@ func (c *MemoryConnection) RemoteEndpoint() Endpoint { } } -// Status implements Connection. -func (c *MemoryConnection) Status() conn.ConnectionStatus { - return conn.ConnectionStatus{} -} - // Handshake implements Connection. func (c *MemoryConnection) Handshake( ctx context.Context, @@ -316,42 +314,21 @@ func (c *MemoryConnection) ReceiveMessage() (ChannelID, []byte, error) { } // SendMessage implements Connection. -func (c *MemoryConnection) SendMessage(chID ChannelID, msg []byte) (bool, error) { +func (c *MemoryConnection) SendMessage(chID ChannelID, msg []byte) error { // Check close first, since channels are buffered. Otherwise, below select // may non-deterministically return non-error even when closed. select { case <-c.closer.Done(): - return false, io.EOF + return io.EOF default: } select { case c.sendCh <- memoryMessage{channelID: chID, message: msg}: c.logger.Debug("sent message", "chID", chID, "msg", msg) - return true, nil + return nil case <-c.closer.Done(): - return false, io.EOF - } -} - -// TrySendMessage implements Connection. -func (c *MemoryConnection) TrySendMessage(chID ChannelID, msg []byte) (bool, error) { - // Check close first, since channels are buffered. Otherwise, below select - // may non-deterministically return non-error even when closed. - select { - case <-c.closer.Done(): - return false, io.EOF - default: - } - - select { - case c.sendCh <- memoryMessage{channelID: chID, message: msg}: - c.logger.Debug("sent message", "chID", chID, "msg", msg) - return true, nil - case <-c.closer.Done(): - return false, io.EOF - default: - return false, nil + return io.EOF } } @@ -366,8 +343,3 @@ func (c *MemoryConnection) Close() error { } return nil } - -// FlushClose implements Connection. -func (c *MemoryConnection) FlushClose() error { - return c.Close() -} diff --git a/internal/p2p/transport_test.go b/internal/p2p/transport_test.go index 1b8ab77f5..a53be251d 100644 --- a/internal/p2p/transport_test.go +++ b/internal/p2p/transport_test.go @@ -25,20 +25,26 @@ var testTransports = map[string]transportFactory{} // withTransports is a test helper that runs a test against all transports // registered in testTransports. -func withTransports(t *testing.T, tester func(*testing.T, transportFactory)) { +func withTransports(ctx context.Context, t *testing.T, tester func(context.Context, *testing.T, transportFactory)) { t.Helper() for name, transportFactory := range testTransports { transportFactory := transportFactory t.Run(name, func(t *testing.T) { t.Cleanup(leaktest.Check(t)) - tester(t, transportFactory) + tctx, cancel := context.WithCancel(ctx) + defer cancel() + + tester(tctx, t, transportFactory) }) } } func TestTransport_AcceptClose(t *testing.T) { // Just test accept unblock on close, happy path is tested widely elsewhere. - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) // In-progress Accept should error on concurrent close. @@ -75,7 +81,10 @@ func TestTransport_DialEndpoints(t *testing.T) { {[]byte{1, 2, 3, 4, 5}, false}, } - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) endpoints := a.Endpoints() require.NotEmpty(t, endpoints) @@ -149,8 +158,11 @@ func TestTransport_DialEndpoints(t *testing.T) { } func TestTransport_Dial(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Most just tests dial failures, happy path is tested widely elsewhere. - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) @@ -190,7 +202,10 @@ func TestTransport_Dial(t *testing.T) { } func TestTransport_Endpoints(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) @@ -214,7 +229,10 @@ func TestTransport_Endpoints(t *testing.T) { } func TestTransport_Protocols(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) protocols := a.Protocols() endpoints := a.Endpoints() @@ -228,17 +246,23 @@ func TestTransport_Protocols(t *testing.T) { } func TestTransport_String(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) require.NotEmpty(t, a.String()) }) } func TestConnection_Handshake(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) - ab, ba := dialAccept(t, a, b) + ab, ba := dialAccept(ctx, t, a, b) // A handshake should pass the given keys and NodeInfo. aKey := ed25519.GenPrivKey() @@ -270,7 +294,10 @@ func TestConnection_Handshake(t *testing.T) { assert.Equal(t, aInfo, peerInfo) assert.Equal(t, aKey.PubKey(), peerKey) } - errCh <- err + select { + case errCh <- err: + case <-ctx.Done(): + } }() peerInfo, peerKey, err := ab.Handshake(ctx, aInfo, aKey) @@ -283,12 +310,15 @@ func TestConnection_Handshake(t *testing.T) { } func TestConnection_HandshakeCancel(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) // Handshake should error on context cancellation. - ab, ba := dialAccept(t, a, b) + ab, ba := dialAccept(ctx, t, a, b) timeoutCtx, cancel := context.WithTimeout(ctx, 1*time.Minute) cancel() _, _, err := ab.Handshake(timeoutCtx, types.NodeInfo{}, ed25519.GenPrivKey()) @@ -298,7 +328,7 @@ func TestConnection_HandshakeCancel(t *testing.T) { _ = ba.Close() // Handshake should error on context timeout. - ab, ba = dialAccept(t, a, b) + ab, ba = dialAccept(ctx, t, a, b) timeoutCtx, cancel = context.WithTimeout(ctx, 200*time.Millisecond) defer cancel() _, _, err = ab.Handshake(timeoutCtx, types.NodeInfo{}, ed25519.GenPrivKey()) @@ -310,35 +340,35 @@ func TestConnection_HandshakeCancel(t *testing.T) { } func TestConnection_FlushClose(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) - ab, _ := dialAcceptHandshake(t, a, b) + ab, _ := dialAcceptHandshake(ctx, t, a, b) - // FIXME: FlushClose should be removed (and replaced by separate Flush - // and Close calls if necessary). We can't reliably test it, so we just - // make sure it closes both ends and that it's idempotent. - err := ab.FlushClose() + err := ab.Close() require.NoError(t, err) _, _, err = ab.ReceiveMessage() require.Error(t, err) require.Equal(t, io.EOF, err) - _, err = ab.SendMessage(chID, []byte("closed")) + err = ab.SendMessage(chID, []byte("closed")) require.Error(t, err) require.Equal(t, io.EOF, err) - - err = ab.FlushClose() - require.NoError(t, err) }) } func TestConnection_LocalRemoteEndpoint(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) - ab, ba := dialAcceptHandshake(t, a, b) + ab, ba := dialAcceptHandshake(ctx, t, a, b) // Local and remote connection endpoints correspond to each other. require.NotEmpty(t, ab.LocalEndpoint()) @@ -349,15 +379,17 @@ func TestConnection_LocalRemoteEndpoint(t *testing.T) { } func TestConnection_SendReceive(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) - ab, ba := dialAcceptHandshake(t, a, b) + ab, ba := dialAcceptHandshake(ctx, t, a, b) // Can send and receive a to b. - ok, err := ab.SendMessage(chID, []byte("foo")) + err := ab.SendMessage(chID, []byte("foo")) require.NoError(t, err) - require.True(t, ok) ch, msg, err := ba.ReceiveMessage() require.NoError(t, err) @@ -365,30 +397,20 @@ func TestConnection_SendReceive(t *testing.T) { require.Equal(t, chID, ch) // Can send and receive b to a. - _, err = ba.SendMessage(chID, []byte("bar")) + err = ba.SendMessage(chID, []byte("bar")) require.NoError(t, err) _, msg, err = ab.ReceiveMessage() require.NoError(t, err) require.Equal(t, []byte("bar"), msg) - // TrySendMessage also works. - ok, err = ba.TrySendMessage(chID, []byte("try")) - require.NoError(t, err) - require.True(t, ok) - - ch, msg, err = ab.ReceiveMessage() - require.NoError(t, err) - require.Equal(t, []byte("try"), msg) - require.Equal(t, chID, ch) - // Connections should still be active after closing the transports. err = a.Close() require.NoError(t, err) err = b.Close() require.NoError(t, err) - _, err = ab.SendMessage(chID, []byte("still here")) + err = ab.SendMessage(chID, []byte("still here")) require.NoError(t, err) ch, msg, err = ba.ReceiveMessage() require.NoError(t, err) @@ -403,42 +425,29 @@ func TestConnection_SendReceive(t *testing.T) { _, _, err = ab.ReceiveMessage() require.Error(t, err) require.Equal(t, io.EOF, err) - _, err = ab.TrySendMessage(chID, []byte("closed try")) - require.Error(t, err) - require.Equal(t, io.EOF, err) - _, err = ab.SendMessage(chID, []byte("closed")) + + err = ab.SendMessage(chID, []byte("closed")) require.Error(t, err) require.Equal(t, io.EOF, err) _, _, err = ba.ReceiveMessage() require.Error(t, err) require.Equal(t, io.EOF, err) - _, err = ba.TrySendMessage(chID, []byte("closed try")) + + err = ba.SendMessage(chID, []byte("closed")) require.Error(t, err) require.Equal(t, io.EOF, err) - _, err = ba.SendMessage(chID, []byte("closed")) - require.Error(t, err) - require.Equal(t, io.EOF, err) - }) -} - -func TestConnection_Status(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { - a := makeTransport(t) - b := makeTransport(t) - ab, _ := dialAcceptHandshake(t, a, b) - - // FIXME: This isn't implemented in all transports, so for now we just - // check that it doesn't panic, which isn't really much of a test. - ab.Status() }) } func TestConnection_String(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) - ab, _ := dialAccept(t, a, b) + ab, _ := dialAccept(ctx, t, a, b) require.NotEmpty(t, ab.String()) }) } @@ -585,7 +594,7 @@ func TestEndpoint_Validate(t *testing.T) { // dialAccept is a helper that dials b from a and returns both sides of the // connection. -func dialAccept(t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.Connection) { +func dialAccept(ctx context.Context, t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.Connection) { t.Helper() endpoints := b.Endpoints() @@ -618,13 +627,10 @@ func dialAccept(t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.Connectio // dialAcceptHandshake is a helper that dials and handshakes b from a and // returns both sides of the connection. -func dialAcceptHandshake(t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.Connection) { +func dialAcceptHandshake(ctx context.Context, t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.Connection) { t.Helper() - ab, ba := dialAccept(t, a, b) - - ctx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() + ab, ba := dialAccept(ctx, t, a, b) errCh := make(chan error, 1) go func() { diff --git a/internal/p2p/trust/metric.go b/internal/p2p/trust/metric.go index aa0ff5298..8ad814f10 100644 --- a/internal/p2p/trust/metric.go +++ b/internal/p2p/trust/metric.go @@ -4,6 +4,7 @@ package trust import ( + "context" "math" "time" @@ -109,8 +110,8 @@ func NewMetricWithConfig(tmc MetricConfig) *Metric { } // OnStart implements Service -func (tm *Metric) OnStart() error { - if err := tm.BaseService.OnStart(); err != nil { +func (tm *Metric) OnStart(ctx context.Context) error { + if err := tm.BaseService.OnStart(ctx); err != nil { return err } go tm.processRequests() diff --git a/internal/p2p/trust/metric_test.go b/internal/p2p/trust/metric_test.go index 65caf38a2..b7d19da23 100644 --- a/internal/p2p/trust/metric_test.go +++ b/internal/p2p/trust/metric_test.go @@ -1,6 +1,7 @@ package trust import ( + "context" "testing" "time" @@ -9,8 +10,11 @@ import ( ) func TestTrustMetricScores(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tm := NewMetric() - err := tm.Start() + err := tm.Start(ctx) require.NoError(t, err) // Perfect score @@ -27,6 +31,9 @@ func TestTrustMetricScores(t *testing.T) { } func TestTrustMetricConfig(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // 7 days window := time.Minute * 60 * 24 * 7 config := MetricConfig{ @@ -35,7 +42,7 @@ func TestTrustMetricConfig(t *testing.T) { } tm := NewMetricWithConfig(config) - err := tm.Start() + err := tm.Start(ctx) require.NoError(t, err) // The max time intervals should be the TrackingWindow / IntervalLen @@ -52,7 +59,7 @@ func TestTrustMetricConfig(t *testing.T) { config.ProportionalWeight = 0.3 config.IntegralWeight = 0.7 tm = NewMetricWithConfig(config) - err = tm.Start() + err = tm.Start(ctx) require.NoError(t, err) // These weights should be equal to our custom values @@ -74,12 +81,15 @@ func TestTrustMetricCopyNilPointer(t *testing.T) { // XXX: This test fails non-deterministically //nolint:unused,deadcode func _TestTrustMetricStopPause(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // The TestTicker will provide manual control over // the passing of time within the metric tt := NewTestTicker() tm := NewMetric() tm.SetTicker(tt) - err := tm.Start() + err := tm.Start(ctx) require.NoError(t, err) // Allow some time intervals to pass and pause tt.NextTick() diff --git a/internal/p2p/trust/store.go b/internal/p2p/trust/store.go index 9f200b9dd..a01cbab2e 100644 --- a/internal/p2p/trust/store.go +++ b/internal/p2p/trust/store.go @@ -4,6 +4,7 @@ package trust import ( + "context" "encoding/json" "fmt" "time" @@ -11,6 +12,7 @@ import ( dbm "github.com/tendermint/tm-db" tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) @@ -38,27 +40,27 @@ type MetricStore struct { // NewTrustMetricStore returns a store that saves data to the DB // and uses the config when creating new trust metrics. // Use Start to to initialize the trust metric store -func NewTrustMetricStore(db dbm.DB, tmc MetricConfig) *MetricStore { +func NewTrustMetricStore(db dbm.DB, tmc MetricConfig, logger log.Logger) *MetricStore { tms := &MetricStore{ peerMetrics: make(map[string]*Metric), db: db, config: tmc, } - tms.BaseService = *service.NewBaseService(nil, "MetricStore", tms) + tms.BaseService = *service.NewBaseService(logger, "MetricStore", tms) return tms } // OnStart implements Service -func (tms *MetricStore) OnStart() error { - if err := tms.BaseService.OnStart(); err != nil { +func (tms *MetricStore) OnStart(ctx context.Context) error { + if err := tms.BaseService.OnStart(ctx); err != nil { return err } tms.mtx.Lock() defer tms.mtx.Unlock() - tms.loadFromDB() + tms.loadFromDB(ctx) go tms.saveRoutine() return nil } @@ -102,7 +104,7 @@ func (tms *MetricStore) AddPeerTrustMetric(key string, tm *Metric) { } // GetPeerTrustMetric returns a trust metric by peer key -func (tms *MetricStore) GetPeerTrustMetric(key string) *Metric { +func (tms *MetricStore) GetPeerTrustMetric(ctx context.Context, key string) *Metric { tms.mtx.Lock() defer tms.mtx.Unlock() @@ -110,7 +112,7 @@ func (tms *MetricStore) GetPeerTrustMetric(key string) *Metric { if !ok { // If the metric is not available, we will create it tm = NewMetricWithConfig(tms.config) - if err := tm.Start(); err != nil { + if err := tm.Start(ctx); err != nil { tms.Logger.Error("unable to start metric store", "error", err) } // The metric needs to be in the map @@ -151,7 +153,7 @@ func (tms *MetricStore) size() int { // Loads the history data for all peers from the store DB // cmn.Panics if file is corrupt -func (tms *MetricStore) loadFromDB() bool { +func (tms *MetricStore) loadFromDB(ctx context.Context) bool { // Obtain the history data we have so far bytes, err := tms.db.Get(trustMetricKey) if err != nil { @@ -172,7 +174,7 @@ func (tms *MetricStore) loadFromDB() bool { for key, p := range peers { tm := NewMetricWithConfig(tms.config) - if err := tm.Start(); err != nil { + if err := tm.Start(ctx); err != nil { tms.Logger.Error("unable to start metric", "error", err) } tm.Init(p) diff --git a/internal/p2p/trust/store_test.go b/internal/p2p/trust/store_test.go index ecf17dc4a..a6178459f 100644 --- a/internal/p2p/trust/store_test.go +++ b/internal/p2p/trust/store_test.go @@ -4,6 +4,7 @@ package trust import ( + "context" "fmt" "testing" @@ -15,19 +16,21 @@ import ( ) func TestTrustMetricStoreSaveLoad(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + dir := t.TempDir() + logger := log.TestingLogger() historyDB, err := dbm.NewDB("trusthistory", "goleveldb", dir) require.NoError(t, err) // 0 peers saved - store := NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) + store := NewTrustMetricStore(historyDB, DefaultConfig(), logger) store.saveToDB() // Load the data from the file - store = NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - err = store.Start() + store = NewTrustMetricStore(historyDB, DefaultConfig(), logger) + err = store.Start(ctx) require.NoError(t, err) // Make sure we still have 0 entries assert.Zero(t, store.Size()) @@ -45,7 +48,7 @@ func TestTrustMetricStoreSaveLoad(t *testing.T) { tm := NewMetric() tm.SetTicker(tt[i]) - err = tm.Start() + err = tm.Start(ctx) require.NoError(t, err) store.AddPeerTrustMetric(key, tm) @@ -64,9 +67,9 @@ func TestTrustMetricStoreSaveLoad(t *testing.T) { require.NoError(t, err) // Load the data from the DB - store = NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - err = store.Start() + store = NewTrustMetricStore(historyDB, DefaultConfig(), logger) + + err = store.Start(ctx) require.NoError(t, err) // Check that we still have 100 peers with imperfect trust values @@ -80,6 +83,9 @@ func TestTrustMetricStoreSaveLoad(t *testing.T) { } func TestTrustMetricStoreConfig(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + historyDB, err := dbm.NewDB("", "memdb", "") require.NoError(t, err) @@ -88,14 +94,15 @@ func TestTrustMetricStoreConfig(t *testing.T) { IntegralWeight: 0.5, } + logger := log.TestingLogger() // Create a store with custom config - store := NewTrustMetricStore(historyDB, config) - store.SetLogger(log.TestingLogger()) - err = store.Start() + store := NewTrustMetricStore(historyDB, config, logger) + + err = store.Start(ctx) require.NoError(t, err) // Have the store make us a metric with the config - tm := store.GetPeerTrustMetric("TestKey") + tm := store.GetPeerTrustMetric(ctx, "TestKey") // Check that the options made it to the metric assert.Equal(t, 0.5, tm.proportionalWeight) @@ -105,18 +112,21 @@ func TestTrustMetricStoreConfig(t *testing.T) { } func TestTrustMetricStoreLookup(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + historyDB, err := dbm.NewDB("", "memdb", "") require.NoError(t, err) - store := NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - err = store.Start() + store := NewTrustMetricStore(historyDB, DefaultConfig(), log.TestingLogger()) + + err = store.Start(ctx) require.NoError(t, err) // Create 100 peers in the trust metric store for i := 0; i < 100; i++ { key := fmt.Sprintf("peer_%d", i) - store.GetPeerTrustMetric(key) + store.GetPeerTrustMetric(ctx, key) // Check that the trust metric was successfully entered ktm := store.peerMetrics[key] @@ -128,16 +138,19 @@ func TestTrustMetricStoreLookup(t *testing.T) { } func TestTrustMetricStorePeerScore(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + historyDB, err := dbm.NewDB("", "memdb", "") require.NoError(t, err) - store := NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - err = store.Start() + store := NewTrustMetricStore(historyDB, DefaultConfig(), log.TestingLogger()) + + err = store.Start(ctx) require.NoError(t, err) key := "TestKey" - tm := store.GetPeerTrustMetric(key) + tm := store.GetPeerTrustMetric(ctx, key) // This peer is innocent so far first := tm.TrustScore() @@ -156,7 +169,7 @@ func TestTrustMetricStorePeerScore(t *testing.T) { store.PeerDisconnected(key) // We will remember our experiences with this peer - tm = store.GetPeerTrustMetric(key) + tm = store.GetPeerTrustMetric(ctx, key) assert.NotEqual(t, 100, tm.TrustScore()) err = store.Stop() require.NoError(t, err) diff --git a/internal/p2p/types.go b/internal/p2p/types.go index 403f43528..bee99a4fe 100644 --- a/internal/p2p/types.go +++ b/internal/p2p/types.go @@ -5,4 +5,4 @@ import ( ) type ChannelDescriptor = conn.ChannelDescriptor -type ConnectionStatus = conn.ConnectionStatus +type ChannelID = conn.ChannelID diff --git a/internal/p2p/upnp/upnp.go b/internal/p2p/upnp/upnp.go index c00530aca..e2c8f3fcf 100644 --- a/internal/p2p/upnp/upnp.go +++ b/internal/p2p/upnp/upnp.go @@ -10,7 +10,7 @@ import ( "encoding/xml" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "strconv" @@ -312,7 +312,7 @@ func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) { return } var envelope Envelope - data, err := ioutil.ReadAll(response.Body) + data, err := io.ReadAll(response.Body) if err != nil { return } @@ -374,7 +374,7 @@ func (n *upnpNAT) AddPortMapping( // TODO: check response to see if the port was forwarded // log.Println(message, response) // JAE: - // body, err := ioutil.ReadAll(response.Body) + // body, err := io.ReadAll(response.Body) // fmt.Println(string(body), err) mappedExternalPort = externalPort _ = response diff --git a/internal/p2p/wdrr_queue.go b/internal/p2p/wdrr_queue.go deleted file mode 100644 index 1b75ffce8..000000000 --- a/internal/p2p/wdrr_queue.go +++ /dev/null @@ -1,287 +0,0 @@ -package p2p - -import ( - "fmt" - "sort" - "strconv" - - "github.com/gogo/protobuf/proto" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/log" -) - -// wrappedEnvelope wraps a p2p Envelope with its precomputed size. -type wrappedEnvelope struct { - envelope Envelope - size uint -} - -// assert the WDRR scheduler implements the queue interface at compile-time -var _ queue = (*wdrrScheduler)(nil) - -// wdrrQueue implements a Weighted Deficit Round Robin (WDRR) scheduling -// algorithm via the queue interface. A WDRR queue is created per peer, where -// the queue will have N number of flows. Each flow corresponds to a p2p Channel, -// so there are n input flows and a single output source, the peer's connection. -// -// The WDRR scheduler contains a shared buffer with a fixed capacity. -// -// Each flow has the following: -// - quantum: The number of bytes that is added to the deficit counter of the -// flow in each round. The flow can send at most quantum bytes at a time. Each -// flow has its own unique quantum, which gives the queue its weighted nature. -// A higher quantum corresponds to a higher weight/priority. The quantum is -// computed as MaxSendBytes * Priority. -// - deficit counter: The number of bytes that the flow is allowed to transmit -// when it is its turn. -// -// See: https://en.wikipedia.org/wiki/Deficit_round_robin -type wdrrScheduler struct { - logger log.Logger - metrics *Metrics - chDescs []ChannelDescriptor - capacity uint - size uint - chPriorities map[ChannelID]uint - buffer map[ChannelID][]wrappedEnvelope - quanta map[ChannelID]uint - deficits map[ChannelID]uint - - closer *tmsync.Closer - doneCh *tmsync.Closer - - enqueueCh chan Envelope - dequeueCh chan Envelope -} - -func newWDRRScheduler( - logger log.Logger, - m *Metrics, - chDescs []ChannelDescriptor, - enqueueBuf, dequeueBuf, capacity uint, -) *wdrrScheduler { - - // copy each ChannelDescriptor and sort them by channel priority - chDescsCopy := make([]ChannelDescriptor, len(chDescs)) - copy(chDescsCopy, chDescs) - sort.Slice(chDescsCopy, func(i, j int) bool { return chDescsCopy[i].Priority > chDescsCopy[j].Priority }) - - var ( - buffer = make(map[ChannelID][]wrappedEnvelope) - chPriorities = make(map[ChannelID]uint) - quanta = make(map[ChannelID]uint) - deficits = make(map[ChannelID]uint) - ) - - for _, chDesc := range chDescsCopy { - chID := ChannelID(chDesc.ID) - chPriorities[chID] = uint(chDesc.Priority) - buffer[chID] = make([]wrappedEnvelope, 0) - quanta[chID] = chDesc.MaxSendBytes * uint(chDesc.Priority) - } - - return &wdrrScheduler{ - logger: logger.With("queue", "wdrr"), - metrics: m, - capacity: capacity, - chPriorities: chPriorities, - chDescs: chDescsCopy, - buffer: buffer, - quanta: quanta, - deficits: deficits, - closer: tmsync.NewCloser(), - doneCh: tmsync.NewCloser(), - enqueueCh: make(chan Envelope, enqueueBuf), - dequeueCh: make(chan Envelope, dequeueBuf), - } -} - -// enqueue returns an unbuffered write-only channel which a producer can send on. -func (s *wdrrScheduler) enqueue() chan<- Envelope { - return s.enqueueCh -} - -// dequeue returns an unbuffered read-only channel which a consumer can read from. -func (s *wdrrScheduler) dequeue() <-chan Envelope { - return s.dequeueCh -} - -func (s *wdrrScheduler) closed() <-chan struct{} { - return s.closer.Done() -} - -// close closes the WDRR queue. After this call enqueue() will block, so the -// caller must select on closed() as well to avoid blocking forever. The -// enqueue() and dequeue() along with the internal channels will NOT be closed. -// Note, close() will block until all externally spawned goroutines have exited. -func (s *wdrrScheduler) close() { - s.closer.Close() - <-s.doneCh.Done() -} - -// start starts the WDRR queue process in a blocking goroutine. This must be -// called before the queue can start to process and accept Envelopes. -func (s *wdrrScheduler) start() { - go s.process() -} - -// process starts a blocking WDRR scheduler process, where we continuously -// evaluate if we need to attempt to enqueue an Envelope or schedule Envelopes -// to be dequeued and subsequently read and sent on the source connection. -// Internally, each p2p Channel maps to a flow, where each flow has a deficit -// and a quantum. -// -// For each Envelope requested to be enqueued, we evaluate if there is sufficient -// capacity in the shared buffer to add the Envelope. If so, it is added. -// Otherwise, we evaluate all flows of lower priority where we attempt find an -// existing Envelope in the shared buffer of sufficient size that can be dropped -// in place of the incoming Envelope. If there is no such Envelope that can be -// dropped, then the incoming Envelope is dropped. -// -// When there is nothing to be enqueued, we perform the WDRR algorithm and -// determine which Envelopes can be dequeued. For each Envelope that can be -// dequeued, it is sent on the dequeueCh. Specifically, for each flow, if it is -// non-empty, its deficit counter is incremented by its quantum value. Then, the -// value of the deficit counter is a maximal amount of bytes that can be sent at -// this round. If the deficit counter is greater than the Envelopes's message -// size at the head of the queue (HoQ), this envelope can be sent and the value -// of the counter is decremented by the message's size. Then, the size of the -// next Envelopes's message is compared to the counter value, etc. Once the flow -// is empty or the value of the counter is insufficient, the scheduler will skip -// to the next flow. If the flow is empty, the value of the deficit counter is -// reset to 0. -// -// XXX/TODO: Evaluate the single goroutine scheduler mechanism. In other words, -// evaluate the effectiveness and performance of having a single goroutine -// perform handling both enqueueing and dequeueing logic. Specifically, there -// is potentially contention between reading off of enqueueCh and trying to -// enqueue while also attempting to perform the WDRR algorithm and find the next -// set of Envelope(s) to send on the dequeueCh. Alternatively, we could consider -// separate scheduling goroutines, but then that requires the use of mutexes and -// possibly a degrading performance. -func (s *wdrrScheduler) process() { - defer s.doneCh.Close() - - for { - select { - case <-s.closer.Done(): - return - - case e := <-s.enqueueCh: - // attempt to enqueue the incoming Envelope - chIDStr := strconv.Itoa(int(e.channelID)) - wEnv := wrappedEnvelope{envelope: e, size: uint(proto.Size(e.Message))} - msgSize := wEnv.size - - s.metrics.PeerPendingSendBytes.With("peer_id", string(e.To)).Add(float64(msgSize)) - - // If we're at capacity, we need to either drop the incoming Envelope or - // an Envelope from a lower priority flow. Otherwise, we add the (wrapped) - // envelope to the flow's queue. - if s.size+wEnv.size > s.capacity { - chPriority := s.chPriorities[e.channelID] - - var ( - canDrop bool - dropIdx int - dropChID ChannelID - ) - - // Evaluate all lower priority flows and determine if there exists an - // Envelope that is of equal or greater size that we can drop in favor - // of the incoming Envelope. - for i := len(s.chDescs) - 1; i >= 0 && uint(s.chDescs[i].Priority) < chPriority && !canDrop; i-- { - currChID := ChannelID(s.chDescs[i].ID) - flow := s.buffer[currChID] - - for j := 0; j < len(flow) && !canDrop; j++ { - if flow[j].size >= wEnv.size { - canDrop = true - dropIdx = j - dropChID = currChID - break - } - } - } - - // If we can drop an existing Envelope, drop it and enqueue the incoming - // Envelope. - if canDrop { - chIDStr = strconv.Itoa(int(dropChID)) - chPriority = s.chPriorities[dropChID] - msgSize = s.buffer[dropChID][dropIdx].size - - // Drop Envelope for the lower priority flow and update the queue's - // buffer size - s.size -= msgSize - s.buffer[dropChID] = append(s.buffer[dropChID][:dropIdx], s.buffer[dropChID][dropIdx+1:]...) - - // add the incoming Envelope and update queue's buffer size - s.size += wEnv.size - s.buffer[e.channelID] = append(s.buffer[e.channelID], wEnv) - s.metrics.PeerQueueMsgSize.With("ch_id", chIDStr).Set(float64(wEnv.size)) - } - - // We either dropped the incoming Enevelope or one from an existing - // lower priority flow. - s.metrics.PeerQueueDroppedMsgs.With("ch_id", chIDStr).Add(1) - s.logger.Debug( - "dropped envelope", - "ch_id", chIDStr, - "priority", chPriority, - "capacity", s.capacity, - "msg_size", msgSize, - ) - } else { - // we have sufficient capacity to enqueue the incoming Envelope - s.metrics.PeerQueueMsgSize.With("ch_id", chIDStr).Set(float64(wEnv.size)) - s.buffer[e.channelID] = append(s.buffer[e.channelID], wEnv) - s.size += wEnv.size - } - - default: - // perform the WDRR algorithm - for _, chDesc := range s.chDescs { - chID := ChannelID(chDesc.ID) - - // only consider non-empty flows - if len(s.buffer[chID]) > 0 { - // bump flow's quantum - s.deficits[chID] += s.quanta[chID] - - // grab the flow's current deficit counter and HoQ (wrapped) Envelope - d := s.deficits[chID] - we := s.buffer[chID][0] - - // While the flow is non-empty and we can send the current Envelope - // on the dequeueCh: - // - // 1. send the Envelope - // 2. update the scheduler's shared buffer's size - // 3. update the flow's deficit - // 4. remove from the flow's queue - // 5. grab the next HoQ Envelope and flow's deficit - for len(s.buffer[chID]) > 0 && d >= we.size { - s.metrics.PeerSendBytesTotal.With( - "chID", fmt.Sprint(chID), - "peer_id", string(we.envelope.To)).Add(float64(we.size)) - s.dequeueCh <- we.envelope - s.size -= we.size - s.deficits[chID] -= we.size - s.buffer[chID] = s.buffer[chID][1:] - - if len(s.buffer[chID]) > 0 { - d = s.deficits[chID] - we = s.buffer[chID][0] - } - } - } - - // reset the flow's deficit to zero if it is empty - if len(s.buffer[chID]) == 0 { - s.deficits[chID] = 0 - } - } - } - } -} diff --git a/internal/p2p/wdrr_queue_test.go b/internal/p2p/wdrr_queue_test.go deleted file mode 100644 index d49c77e76..000000000 --- a/internal/p2p/wdrr_queue_test.go +++ /dev/null @@ -1,208 +0,0 @@ -package p2p - -import ( - "math" - "math/rand" - "testing" - "time" - - gogotypes "github.com/gogo/protobuf/types" - "github.com/stretchr/testify/require" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/log" -) - -type testMessage = gogotypes.StringValue - -func TestWDRRQueue_EqualWeights(t *testing.T) { - chDescs := []ChannelDescriptor{ - {ID: 0x01, Priority: 1, MaxSendBytes: 4}, - {ID: 0x02, Priority: 1, MaxSendBytes: 4}, - {ID: 0x03, Priority: 1, MaxSendBytes: 4}, - {ID: 0x04, Priority: 1, MaxSendBytes: 4}, - {ID: 0x05, Priority: 1, MaxSendBytes: 4}, - {ID: 0x06, Priority: 1, MaxSendBytes: 4}, - } - - peerQueue := newWDRRScheduler(log.NewNopLogger(), NopMetrics(), chDescs, 1000, 1000, 120) - peerQueue.start() - - totalMsgs := make(map[ChannelID]int) - deliveredMsgs := make(map[ChannelID]int) - successRates := make(map[ChannelID]float64) - - closer := tmsync.NewCloser() - - go func() { - timout := 10 * time.Second - ticker := time.NewTicker(timout) - defer ticker.Stop() - - for { - select { - case e := <-peerQueue.dequeue(): - deliveredMsgs[e.channelID]++ - ticker.Reset(timout) - - case <-ticker.C: - closer.Close() - } - } - }() - - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - maxMsgs := 5000 - minMsgs := 1000 - - for _, chDesc := range chDescs { - total := rng.Intn(maxMsgs-minMsgs) + minMsgs // total = rand[minMsgs, maxMsgs) - totalMsgs[ChannelID(chDesc.ID)] = total - - go func(cID ChannelID, n int) { - for i := 0; i < n; i++ { - peerQueue.enqueue() <- Envelope{ - channelID: cID, - Message: &testMessage{Value: "foo"}, // 5 bytes - } - } - }(ChannelID(chDesc.ID), total) - } - - // wait for dequeueing to complete - <-closer.Done() - - // close queue and wait for cleanup - peerQueue.close() - <-peerQueue.closed() - - var ( - sum float64 - stdDev float64 - ) - - for _, chDesc := range peerQueue.chDescs { - chID := ChannelID(chDesc.ID) - require.Zero(t, peerQueue.deficits[chID], "expected flow deficit to be zero") - require.Len(t, peerQueue.buffer[chID], 0, "expected flow queue to be empty") - - total := totalMsgs[chID] - delivered := deliveredMsgs[chID] - successRate := float64(delivered) / float64(total) - - sum += successRate - successRates[chID] = successRate - - // require some messages dropped - require.Less(t, delivered, total, "expected some messages to be dropped") - require.Less(t, successRate, 1.0, "expected a success rate below 100%") - } - - require.Zero(t, peerQueue.size, "expected scheduler size to be zero") - - numFlows := float64(len(peerQueue.buffer)) - mean := sum / numFlows - - for _, successRate := range successRates { - stdDev += math.Pow(successRate-mean, 2) - } - - stdDev = math.Sqrt(stdDev / numFlows) - require.Less(t, stdDev, 0.02, "expected success rate standard deviation to be less than 2%") -} - -func TestWDRRQueue_DecreasingWeights(t *testing.T) { - chDescs := []ChannelDescriptor{ - {ID: 0x01, Priority: 18, MaxSendBytes: 4}, - {ID: 0x02, Priority: 10, MaxSendBytes: 4}, - {ID: 0x03, Priority: 2, MaxSendBytes: 4}, - {ID: 0x04, Priority: 1, MaxSendBytes: 4}, - {ID: 0x05, Priority: 1, MaxSendBytes: 4}, - {ID: 0x06, Priority: 1, MaxSendBytes: 4}, - } - - peerQueue := newWDRRScheduler(log.NewNopLogger(), NopMetrics(), chDescs, 0, 0, 500) - peerQueue.start() - - totalMsgs := make(map[ChannelID]int) - deliveredMsgs := make(map[ChannelID]int) - successRates := make(map[ChannelID]float64) - - for _, chDesc := range chDescs { - total := 1000 - totalMsgs[ChannelID(chDesc.ID)] = total - - go func(cID ChannelID, n int) { - for i := 0; i < n; i++ { - peerQueue.enqueue() <- Envelope{ - channelID: cID, - Message: &testMessage{Value: "foo"}, // 5 bytes - } - } - }(ChannelID(chDesc.ID), total) - } - - closer := tmsync.NewCloser() - - go func() { - timout := 20 * time.Second - ticker := time.NewTicker(timout) - defer ticker.Stop() - - for { - select { - case e := <-peerQueue.dequeue(): - deliveredMsgs[e.channelID]++ - ticker.Reset(timout) - - case <-ticker.C: - closer.Close() - } - } - }() - - // wait for dequeueing to complete - <-closer.Done() - - // close queue and wait for cleanup - peerQueue.close() - <-peerQueue.closed() - - for i, chDesc := range peerQueue.chDescs { - chID := ChannelID(chDesc.ID) - require.Zero(t, peerQueue.deficits[chID], "expected flow deficit to be zero") - require.Len(t, peerQueue.buffer[chID], 0, "expected flow queue to be empty") - - total := totalMsgs[chID] - delivered := deliveredMsgs[chID] - successRate := float64(delivered) / float64(total) - - successRates[chID] = successRate - - // Require some messages dropped. Note, the top weighted flows may not have - // any dropped if lower priority non-empty queues always exist. - if i > 2 { - require.Less(t, delivered, total, "expected some messages to be dropped") - require.Less(t, successRate, 1.0, "expected a success rate below 100%") - } - } - - require.Zero(t, peerQueue.size, "expected scheduler size to be zero") - - // require channel 0x01 to have the highest success rate due to its weight - ch01Rate := successRates[ChannelID(chDescs[0].ID)] - for i := 1; i < len(chDescs); i++ { - require.GreaterOrEqual(t, ch01Rate, successRates[ChannelID(chDescs[i].ID)]) - } - - // require channel 0x02 to have the 2nd highest success rate due to its weight - ch02Rate := successRates[ChannelID(chDescs[1].ID)] - for i := 2; i < len(chDescs); i++ { - require.GreaterOrEqual(t, ch02Rate, successRates[ChannelID(chDescs[i].ID)]) - } - - // require channel 0x03 to have the 3rd highest success rate due to its weight - ch03Rate := successRates[ChannelID(chDescs[2].ID)] - for i := 3; i < len(chDescs); i++ { - require.GreaterOrEqual(t, ch03Rate, successRates[ChannelID(chDescs[i].ID)]) - } -} diff --git a/proxy/app_conn.go b/internal/proxy/app_conn.go similarity index 58% rename from proxy/app_conn.go rename to internal/proxy/app_conn.go index 8eb90daf3..803875646 100644 --- a/proxy/app_conn.go +++ b/internal/proxy/app_conn.go @@ -2,36 +2,38 @@ package proxy import ( "context" + "time" - abcicli "github.com/tendermint/tendermint/abci/client" + "github.com/go-kit/kit/metrics" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/types" ) -//go:generate ../scripts/mockery_generate.sh AppConnConsensus|AppConnMempool|AppConnQuery|AppConnSnapshot +//go:generate ../../scripts/mockery_generate.sh AppConnConsensus|AppConnMempool|AppConnQuery|AppConnSnapshot //---------------------------------------------------------------------------------------- // Enforce which abci msgs can be sent on a connection at the type level type AppConnConsensus interface { - SetResponseCallback(abcicli.Callback) + SetResponseCallback(abciclient.Callback) Error() error InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error) BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error) - DeliverTxAsync(context.Context, types.RequestDeliverTx) (*abcicli.ReqRes, error) + DeliverTxAsync(context.Context, types.RequestDeliverTx) (*abciclient.ReqRes, error) EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error) CommitSync(context.Context) (*types.ResponseCommit, error) } type AppConnMempool interface { - SetResponseCallback(abcicli.Callback) + SetResponseCallback(abciclient.Callback) Error() error - CheckTxAsync(context.Context, types.RequestCheckTx) (*abcicli.ReqRes, error) + CheckTxAsync(context.Context, types.RequestCheckTx) (*abciclient.ReqRes, error) CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error) - FlushAsync(context.Context) (*abcicli.ReqRes, error) + FlushAsync(context.Context) (*abciclient.ReqRes, error) FlushSync(context.Context) error } @@ -53,19 +55,21 @@ type AppConnSnapshot interface { } //----------------------------------------------------------------------------------------- -// Implements AppConnConsensus (subset of abcicli.Client) +// Implements AppConnConsensus (subset of abciclient.Client) type appConnConsensus struct { - appConn abcicli.Client + metrics *Metrics + appConn abciclient.Client } -func NewAppConnConsensus(appConn abcicli.Client) AppConnConsensus { +func NewAppConnConsensus(appConn abciclient.Client, metrics *Metrics) AppConnConsensus { return &appConnConsensus{ + metrics: metrics, appConn: appConn, } } -func (app *appConnConsensus) SetResponseCallback(cb abcicli.Callback) { +func (app *appConnConsensus) SetResponseCallback(cb abciclient.Callback) { app.appConn.SetResponseCallback(cb) } @@ -77,6 +81,7 @@ func (app *appConnConsensus) InitChainSync( ctx context.Context, req types.RequestInitChain, ) (*types.ResponseInitChain, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "init_chain", "type", "sync"))() return app.appConn.InitChainSync(ctx, req) } @@ -84,10 +89,15 @@ func (app *appConnConsensus) BeginBlockSync( ctx context.Context, req types.RequestBeginBlock, ) (*types.ResponseBeginBlock, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "begin_block", "type", "sync"))() return app.appConn.BeginBlockSync(ctx, req) } -func (app *appConnConsensus) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*abcicli.ReqRes, error) { +func (app *appConnConsensus) DeliverTxAsync( + ctx context.Context, + req types.RequestDeliverTx, +) (*abciclient.ReqRes, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "deliver_tx", "type", "async"))() return app.appConn.DeliverTxAsync(ctx, req) } @@ -95,27 +105,31 @@ func (app *appConnConsensus) EndBlockSync( ctx context.Context, req types.RequestEndBlock, ) (*types.ResponseEndBlock, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "deliver_tx", "type", "sync"))() return app.appConn.EndBlockSync(ctx, req) } func (app *appConnConsensus) CommitSync(ctx context.Context) (*types.ResponseCommit, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "commit", "type", "sync"))() return app.appConn.CommitSync(ctx) } //------------------------------------------------ -// Implements AppConnMempool (subset of abcicli.Client) +// Implements AppConnMempool (subset of abciclient.Client) type appConnMempool struct { - appConn abcicli.Client + metrics *Metrics + appConn abciclient.Client } -func NewAppConnMempool(appConn abcicli.Client) AppConnMempool { +func NewAppConnMempool(appConn abciclient.Client, metrics *Metrics) AppConnMempool { return &appConnMempool{ + metrics: metrics, appConn: appConn, } } -func (app *appConnMempool) SetResponseCallback(cb abcicli.Callback) { +func (app *appConnMempool) SetResponseCallback(cb abciclient.Callback) { app.appConn.SetResponseCallback(cb) } @@ -123,31 +137,37 @@ func (app *appConnMempool) Error() error { return app.appConn.Error() } -func (app *appConnMempool) FlushAsync(ctx context.Context) (*abcicli.ReqRes, error) { +func (app *appConnMempool) FlushAsync(ctx context.Context) (*abciclient.ReqRes, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "flush", "type", "async"))() return app.appConn.FlushAsync(ctx) } func (app *appConnMempool) FlushSync(ctx context.Context) error { + defer addTimeSample(app.metrics.MethodTiming.With("method", "flush", "type", "sync"))() return app.appConn.FlushSync(ctx) } -func (app *appConnMempool) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*abcicli.ReqRes, error) { +func (app *appConnMempool) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*abciclient.ReqRes, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "check_tx", "type", "async"))() return app.appConn.CheckTxAsync(ctx, req) } func (app *appConnMempool) CheckTxSync(ctx context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "check_tx", "type", "sync"))() return app.appConn.CheckTxSync(ctx, req) } //------------------------------------------------ -// Implements AppConnQuery (subset of abcicli.Client) +// Implements AppConnQuery (subset of abciclient.Client) type appConnQuery struct { - appConn abcicli.Client + metrics *Metrics + appConn abciclient.Client } -func NewAppConnQuery(appConn abcicli.Client) AppConnQuery { +func NewAppConnQuery(appConn abciclient.Client, metrics *Metrics) AppConnQuery { return &appConnQuery{ + metrics: metrics, appConn: appConn, } } @@ -157,26 +177,31 @@ func (app *appConnQuery) Error() error { } func (app *appConnQuery) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "echo", "type", "sync"))() return app.appConn.EchoSync(ctx, msg) } func (app *appConnQuery) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "info", "type", "sync"))() return app.appConn.InfoSync(ctx, req) } func (app *appConnQuery) QuerySync(ctx context.Context, reqQuery types.RequestQuery) (*types.ResponseQuery, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "query", "type", "sync"))() return app.appConn.QuerySync(ctx, reqQuery) } //------------------------------------------------ -// Implements AppConnSnapshot (subset of abcicli.Client) +// Implements AppConnSnapshot (subset of abciclient.Client) type appConnSnapshot struct { - appConn abcicli.Client + metrics *Metrics + appConn abciclient.Client } -func NewAppConnSnapshot(appConn abcicli.Client) AppConnSnapshot { +func NewAppConnSnapshot(appConn abciclient.Client, metrics *Metrics) AppConnSnapshot { return &appConnSnapshot{ + metrics: metrics, appConn: appConn, } } @@ -189,6 +214,7 @@ func (app *appConnSnapshot) ListSnapshotsSync( ctx context.Context, req types.RequestListSnapshots, ) (*types.ResponseListSnapshots, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "list_snapshots", "type", "sync"))() return app.appConn.ListSnapshotsSync(ctx, req) } @@ -196,17 +222,29 @@ func (app *appConnSnapshot) OfferSnapshotSync( ctx context.Context, req types.RequestOfferSnapshot, ) (*types.ResponseOfferSnapshot, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "offer_snapshot", "type", "sync"))() return app.appConn.OfferSnapshotSync(ctx, req) } func (app *appConnSnapshot) LoadSnapshotChunkSync( ctx context.Context, req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "load_snapshot_chunk", "type", "sync"))() return app.appConn.LoadSnapshotChunkSync(ctx, req) } func (app *appConnSnapshot) ApplySnapshotChunkSync( ctx context.Context, req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "apply_snapshot_chunk", "type", "sync"))() return app.appConn.ApplySnapshotChunkSync(ctx, req) } + +// addTimeSample returns a function that, when called, adds an observation to m. +// The observation added to m is the number of seconds ellapsed since addTimeSample +// was initially called. addTimeSample is meant to be called in a defer to calculate +// the amount of time a function takes to complete. +func addTimeSample(m metrics.Histogram) func() { + start := time.Now() + return func() { m.Observe(time.Since(start).Seconds()) } +} diff --git a/proxy/app_conn_test.go b/internal/proxy/app_conn_test.go similarity index 66% rename from proxy/app_conn_test.go rename to internal/proxy/app_conn_test.go index 458088635..5eb810bd6 100644 --- a/proxy/app_conn_test.go +++ b/internal/proxy/app_conn_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/server" "github.com/tendermint/tendermint/abci/types" @@ -17,20 +17,20 @@ import ( //---------------------------------------- type appConnTestI interface { - EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) + EchoAsync(ctx context.Context, msg string) (*abciclient.ReqRes, error) FlushSync(context.Context) error InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error) } type appConnTest struct { - appConn abcicli.Client + appConn abciclient.Client } -func newAppConnTest(appConn abcicli.Client) appConnTestI { +func newAppConnTest(appConn abciclient.Client) appConnTestI { return &appConnTest{appConn} } -func (app *appConnTest) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) { +func (app *appConnTest) EchoAsync(ctx context.Context, msg string) (*abciclient.ReqRes, error) { return app.appConn.EchoAsync(ctx, msg) } @@ -48,34 +48,32 @@ var SOCKET = "socket" func TestEcho(t *testing.T) { sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) - clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true) + logger := log.TestingLogger() + clientCreator := abciclient.NewRemoteCreator(logger, sockPath, SOCKET, true) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // Start server - s := server.NewSocketServer(sockPath, kvstore.NewApplication()) - s.SetLogger(log.TestingLogger().With("module", "abci-server")) - if err := s.Start(); err != nil { + s := server.NewSocketServer(logger.With("module", "abci-server"), sockPath, kvstore.NewApplication()) + if err := s.Start(ctx); err != nil { t.Fatalf("Error starting socket server: %v", err.Error()) } - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); s.Wait() }) // Start client - cli, err := clientCreator.NewABCIClient() + cli, err := clientCreator(logger.With("module", "abci-client")) if err != nil { t.Fatalf("Error creating ABCI client: %v", err.Error()) } - cli.SetLogger(log.TestingLogger().With("module", "abci-client")) - if err := cli.Start(); err != nil { + + if err := cli.Start(ctx); err != nil { t.Fatalf("Error starting ABCI client: %v", err.Error()) } proxy := newAppConnTest(cli) t.Log("Connected") - ctx := context.Background() for i := 0; i < 1000; i++ { _, err = proxy.EchoAsync(ctx, fmt.Sprintf("echo-%v", i)) if err != nil { @@ -96,27 +94,26 @@ func TestEcho(t *testing.T) { func BenchmarkEcho(b *testing.B) { b.StopTimer() // Initialize sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) - clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true) + logger := log.TestingLogger() + clientCreator := abciclient.NewRemoteCreator(logger, sockPath, SOCKET, true) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // Start server - s := server.NewSocketServer(sockPath, kvstore.NewApplication()) - s.SetLogger(log.TestingLogger().With("module", "abci-server")) - if err := s.Start(); err != nil { + s := server.NewSocketServer(logger.With("module", "abci-server"), sockPath, kvstore.NewApplication()) + if err := s.Start(ctx); err != nil { b.Fatalf("Error starting socket server: %v", err.Error()) } - b.Cleanup(func() { - if err := s.Stop(); err != nil { - b.Error(err) - } - }) + b.Cleanup(func() { cancel(); s.Wait() }) // Start client - cli, err := clientCreator.NewABCIClient() + cli, err := clientCreator(logger.With("module", "abci-client")) if err != nil { b.Fatalf("Error creating ABCI client: %v", err.Error()) } - cli.SetLogger(log.TestingLogger().With("module", "abci-client")) - if err := cli.Start(); err != nil { + + if err := cli.Start(ctx); err != nil { b.Fatalf("Error starting ABCI client: %v", err.Error()) } @@ -125,7 +122,6 @@ func BenchmarkEcho(b *testing.B) { echoString := strings.Repeat(" ", 200) b.StartTimer() // Start benchmarking tests - ctx := context.Background() for i := 0; i < b.N; i++ { _, err = proxy.EchoAsync(ctx, echoString) if err != nil { @@ -149,27 +145,26 @@ func BenchmarkEcho(b *testing.B) { func TestInfo(t *testing.T) { sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) - clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true) + logger := log.TestingLogger() + clientCreator := abciclient.NewRemoteCreator(logger, sockPath, SOCKET, true) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // Start server - s := server.NewSocketServer(sockPath, kvstore.NewApplication()) - s.SetLogger(log.TestingLogger().With("module", "abci-server")) - if err := s.Start(); err != nil { + s := server.NewSocketServer(logger.With("module", "abci-server"), sockPath, kvstore.NewApplication()) + if err := s.Start(ctx); err != nil { t.Fatalf("Error starting socket server: %v", err.Error()) } - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); s.Wait() }) // Start client - cli, err := clientCreator.NewABCIClient() + cli, err := clientCreator(logger.With("module", "abci-client")) if err != nil { t.Fatalf("Error creating ABCI client: %v", err.Error()) } - cli.SetLogger(log.TestingLogger().With("module", "abci-client")) - if err := cli.Start(); err != nil { + + if err := cli.Start(ctx); err != nil { t.Fatalf("Error starting ABCI client: %v", err.Error()) } diff --git a/internal/proxy/client.go b/internal/proxy/client.go new file mode 100644 index 000000000..4e034802e --- /dev/null +++ b/internal/proxy/client.go @@ -0,0 +1,42 @@ +package proxy + +import ( + "io" + + abciclient "github.com/tendermint/tendermint/abci/client" + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" + e2e "github.com/tendermint/tendermint/test/e2e/app" +) + +// DefaultClientCreator returns a default ClientCreator, which will create a +// local client if addr is one of: 'kvstore', +// 'persistent_kvstore', 'e2e', or 'noop', otherwise - a remote client. +// +// The Closer is a noop except for persistent_kvstore applications, +// which will clean up the store. +func DefaultClientCreator(logger log.Logger, addr, transport, dbDir string) (abciclient.Creator, io.Closer) { + switch addr { + case "kvstore": + return abciclient.NewLocalCreator(kvstore.NewApplication()), noopCloser{} + case "persistent_kvstore": + app := kvstore.NewPersistentKVStoreApplication(dbDir) + return abciclient.NewLocalCreator(app), app + case "e2e": + app, err := e2e.NewApplication(e2e.DefaultConfig(dbDir)) + if err != nil { + panic(err) + } + return abciclient.NewLocalCreator(app), noopCloser{} + case "noop": + return abciclient.NewLocalCreator(types.NewBaseApplication()), noopCloser{} + default: + mustConnect := false // loop retrying + return abciclient.NewRemoteCreator(logger, addr, transport, mustConnect), noopCloser{} + } +} + +type noopCloser struct{} + +func (noopCloser) Close() error { return nil } diff --git a/internal/proxy/metrics.go b/internal/proxy/metrics.go new file mode 100644 index 000000000..99bd7d7b0 --- /dev/null +++ b/internal/proxy/metrics.go @@ -0,0 +1,47 @@ +package proxy + +import ( + "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics/discard" + "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +const ( + // MetricsSubsystem is a subsystem shared by all metrics exposed by this + // package. + MetricsSubsystem = "abci_connection" +) + +// Metrics contains the prometheus metrics exposed by the proxy package. +type Metrics struct { + MethodTiming metrics.Histogram +} + +// PrometheusMetrics constructs a Metrics instance that collects metrics samples. +// The resulting metrics will be prefixed with namespace and labeled with the +// defaultLabelsAndValues. defaultLabelsAndValues must be a list of string pairs +// where the first of each pair is the label and the second is the value. +func PrometheusMetrics(namespace string, defaultLabelsAndValues ...string) *Metrics { + defaultLabels := []string{} + for i := 0; i < len(defaultLabelsAndValues); i += 2 { + defaultLabels = append(defaultLabels, defaultLabelsAndValues[i]) + } + return &Metrics{ + MethodTiming: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "method_timing", + Help: "ABCI Method Timing", + Buckets: []float64{.0001, .0004, .002, .009, .02, .1, .65, 2, 6, 25}, + }, append(defaultLabels, []string{"method", "type"}...)).With(defaultLabelsAndValues...), + } +} + +// NopMetrics constructs a Metrics instance that discards all samples and is suitable +// for testing. +func NopMetrics() *Metrics { + return &Metrics{ + MethodTiming: discard.NewHistogram(), + } +} diff --git a/proxy/mocks/app_conn_consensus.go b/internal/proxy/mocks/app_conn_consensus.go similarity index 92% rename from proxy/mocks/app_conn_consensus.go rename to internal/proxy/mocks/app_conn_consensus.go index 03207706e..fa93b0931 100644 --- a/proxy/mocks/app_conn_consensus.go +++ b/internal/proxy/mocks/app_conn_consensus.go @@ -5,7 +5,7 @@ package mocks import ( context "context" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" mock "github.com/stretchr/testify/mock" @@ -64,15 +64,15 @@ func (_m *AppConnConsensus) CommitSync(_a0 context.Context) (*types.ResponseComm } // DeliverTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) { +func (_m *AppConnConsensus) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -147,6 +147,6 @@ func (_m *AppConnConsensus) InitChainSync(_a0 context.Context, _a1 types.Request } // SetResponseCallback provides a mock function with given fields: _a0 -func (_m *AppConnConsensus) SetResponseCallback(_a0 abcicli.Callback) { +func (_m *AppConnConsensus) SetResponseCallback(_a0 abciclient.Callback) { _m.Called(_a0) } diff --git a/proxy/mocks/app_conn_mempool.go b/internal/proxy/mocks/app_conn_mempool.go similarity index 83% rename from proxy/mocks/app_conn_mempool.go rename to internal/proxy/mocks/app_conn_mempool.go index 2505160d6..5429d8f90 100644 --- a/proxy/mocks/app_conn_mempool.go +++ b/internal/proxy/mocks/app_conn_mempool.go @@ -5,7 +5,7 @@ package mocks import ( context "context" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" mock "github.com/stretchr/testify/mock" @@ -18,15 +18,15 @@ type AppConnMempool struct { } // CheckTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnMempool) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) { +func (_m *AppConnMempool) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -78,15 +78,15 @@ func (_m *AppConnMempool) Error() error { } // FlushAsync provides a mock function with given fields: _a0 -func (_m *AppConnMempool) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) { +func (_m *AppConnMempool) FlushAsync(_a0 context.Context) (*abciclient.ReqRes, error) { ret := _m.Called(_a0) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context) *abciclient.ReqRes); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -115,6 +115,6 @@ func (_m *AppConnMempool) FlushSync(_a0 context.Context) error { } // SetResponseCallback provides a mock function with given fields: _a0 -func (_m *AppConnMempool) SetResponseCallback(_a0 abcicli.Callback) { +func (_m *AppConnMempool) SetResponseCallback(_a0 abciclient.Callback) { _m.Called(_a0) } diff --git a/proxy/mocks/app_conn_query.go b/internal/proxy/mocks/app_conn_query.go similarity index 100% rename from proxy/mocks/app_conn_query.go rename to internal/proxy/mocks/app_conn_query.go diff --git a/proxy/mocks/app_conn_snapshot.go b/internal/proxy/mocks/app_conn_snapshot.go similarity index 100% rename from proxy/mocks/app_conn_snapshot.go rename to internal/proxy/mocks/app_conn_snapshot.go diff --git a/internal/proxy/multi_app_conn.go b/internal/proxy/multi_app_conn.go new file mode 100644 index 000000000..6196992ff --- /dev/null +++ b/internal/proxy/multi_app_conn.go @@ -0,0 +1,241 @@ +package proxy + +import ( + "context" + "errors" + "fmt" + "os" + "syscall" + + abciclient "github.com/tendermint/tendermint/abci/client" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/service" +) + +const ( + connConsensus = "consensus" + connMempool = "mempool" + connQuery = "query" + connSnapshot = "snapshot" +) + +// AppConns is the Tendermint's interface to the application that consists of +// multiple connections. +type AppConns interface { + service.Service + + // Mempool connection + Mempool() AppConnMempool + // Consensus connection + Consensus() AppConnConsensus + // Query connection + Query() AppConnQuery + // Snapshot connection + Snapshot() AppConnSnapshot +} + +// NewAppConns calls NewMultiAppConn. +func NewAppConns(clientCreator abciclient.Creator, logger log.Logger, metrics *Metrics) AppConns { + return NewMultiAppConn(clientCreator, logger, metrics) +} + +// multiAppConn implements AppConns. +// +// A multiAppConn is made of a few appConns and manages their underlying abci +// clients. +// TODO: on app restart, clients must reboot together +type multiAppConn struct { + service.BaseService + + metrics *Metrics + consensusConn AppConnConsensus + mempoolConn AppConnMempool + queryConn AppConnQuery + snapshotConn AppConnSnapshot + + consensusConnClient stoppableClient + mempoolConnClient stoppableClient + queryConnClient stoppableClient + snapshotConnClient stoppableClient + + clientCreator abciclient.Creator +} + +// TODO: this is a totally internal and quasi permanent shim for +// clients. eventually we can have a single client and have some kind +// of reasonable lifecycle witout needing an explicit stop method. +type stoppableClient interface { + abciclient.Client + Stop() error +} + +// NewMultiAppConn makes all necessary abci connections to the application. +func NewMultiAppConn(clientCreator abciclient.Creator, logger log.Logger, metrics *Metrics) AppConns { + multiAppConn := &multiAppConn{ + metrics: metrics, + clientCreator: clientCreator, + } + multiAppConn.BaseService = *service.NewBaseService(logger, "multiAppConn", multiAppConn) + return multiAppConn +} + +func (app *multiAppConn) Mempool() AppConnMempool { + return app.mempoolConn +} + +func (app *multiAppConn) Consensus() AppConnConsensus { + return app.consensusConn +} + +func (app *multiAppConn) Query() AppConnQuery { + return app.queryConn +} + +func (app *multiAppConn) Snapshot() AppConnSnapshot { + return app.snapshotConn +} + +func (app *multiAppConn) OnStart(ctx context.Context) error { + c, err := app.abciClientFor(ctx, connQuery) + if err != nil { + return err + } + app.queryConnClient = c.(stoppableClient) + app.queryConn = NewAppConnQuery(c, app.metrics) + + c, err = app.abciClientFor(ctx, connSnapshot) + if err != nil { + app.stopAllClients() + return err + } + app.snapshotConnClient = c.(stoppableClient) + app.snapshotConn = NewAppConnSnapshot(c, app.metrics) + + c, err = app.abciClientFor(ctx, connMempool) + if err != nil { + app.stopAllClients() + return err + } + app.mempoolConnClient = c.(stoppableClient) + app.mempoolConn = NewAppConnMempool(c, app.metrics) + + c, err = app.abciClientFor(ctx, connConsensus) + if err != nil { + app.stopAllClients() + return err + } + app.consensusConnClient = c.(stoppableClient) + app.consensusConn = NewAppConnConsensus(c, app.metrics) + + // Kill Tendermint if the ABCI application crashes. + app.startWatchersForClientErrorToKillTendermint(ctx) + + return nil +} + +func (app *multiAppConn) OnStop() { + app.stopAllClients() +} + +func (app *multiAppConn) startWatchersForClientErrorToKillTendermint(ctx context.Context) { + // this function starts a number of threads (per abci client) + // that will SIGTERM's our own PID if any of the ABCI clients + // exit/return early. If the context is canceled then these + // functions will not kill tendermint. + + killFn := func(conn string, err error, logger log.Logger) { + logger.Error( + fmt.Sprintf("%s connection terminated. Did the application crash? Please restart tendermint", conn), + "err", err) + if killErr := kill(); killErr != nil { + logger.Error("Failed to kill this process - please do so manually", "err", killErr) + } + } + + type op struct { + connClient stoppableClient + name string + } + + for _, client := range []op{ + { + connClient: app.consensusConnClient, + name: connConsensus, + }, + { + connClient: app.mempoolConnClient, + name: connMempool, + }, + { + connClient: app.queryConnClient, + name: connQuery, + }, + { + connClient: app.snapshotConnClient, + name: connSnapshot, + }, + } { + go func(name string, client stoppableClient) { + client.Wait() + if ctx.Err() != nil { + return + } + if err := client.Error(); err != nil { + killFn(name, err, app.Logger) + } + }(client.name, client.connClient) + } +} + +func (app *multiAppConn) stopAllClients() { + if app.consensusConnClient != nil { + if err := app.consensusConnClient.Stop(); err != nil { + if !errors.Is(err, service.ErrAlreadyStopped) { + app.Logger.Error("error while stopping consensus client", "error", err) + } + } + } + if app.mempoolConnClient != nil { + if err := app.mempoolConnClient.Stop(); err != nil { + if !errors.Is(err, service.ErrAlreadyStopped) { + app.Logger.Error("error while stopping mempool client", "error", err) + } + } + } + if app.queryConnClient != nil { + if err := app.queryConnClient.Stop(); err != nil { + if !errors.Is(err, service.ErrAlreadyStopped) { + app.Logger.Error("error while stopping query client", "error", err) + } + } + } + if app.snapshotConnClient != nil { + if err := app.snapshotConnClient.Stop(); err != nil { + if !errors.Is(err, service.ErrAlreadyStopped) { + app.Logger.Error("error while stopping snapshot client", "error", err) + } + } + } +} + +func (app *multiAppConn) abciClientFor(ctx context.Context, conn string) (abciclient.Client, error) { + c, err := app.clientCreator(app.Logger.With( + "module", "abci-client", + "connection", conn)) + if err != nil { + return nil, fmt.Errorf("error creating ABCI client (%s connection): %w", conn, err) + } + if err := c.Start(ctx); err != nil { + return nil, fmt.Errorf("error starting ABCI client (%s connection): %w", conn, err) + } + return c, nil +} + +func kill() error { + p, err := os.FindProcess(os.Getpid()) + if err != nil { + return err + } + + return p.Signal(syscall.SIGTERM) +} diff --git a/internal/proxy/multi_app_conn_test.go b/internal/proxy/multi_app_conn_test.go new file mode 100644 index 000000000..98ea0ca53 --- /dev/null +++ b/internal/proxy/multi_app_conn_test.go @@ -0,0 +1,98 @@ +package proxy + +import ( + "context" + "errors" + "os" + "os/signal" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + abciclient "github.com/tendermint/tendermint/abci/client" + abcimocks "github.com/tendermint/tendermint/abci/client/mocks" + "github.com/tendermint/tendermint/libs/log" +) + +type noopStoppableClientImpl struct { + abciclient.Client + count int +} + +func (c *noopStoppableClientImpl) Stop() error { c.count++; return nil } + +func TestAppConns_Start_Stop(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + clientMock := &abcimocks.Client{} + clientMock.On("Start", mock.Anything).Return(nil).Times(4) + clientMock.On("Error").Return(nil) + clientMock.On("Wait").Return(nil).Times(4) + cl := &noopStoppableClientImpl{Client: clientMock} + + creatorCallCount := 0 + creator := func(logger log.Logger) (abciclient.Client, error) { + creatorCallCount++ + return cl, nil + } + + appConns := NewAppConns(creator, log.TestingLogger(), NopMetrics()) + + err := appConns.Start(ctx) + require.NoError(t, err) + + time.Sleep(100 * time.Millisecond) + + cancel() + appConns.Wait() + + clientMock.AssertExpectations(t) + assert.Equal(t, 4, cl.count) + assert.Equal(t, 4, creatorCallCount) +} + +// Upon failure, we call tmos.Kill +func TestAppConns_Failure(t *testing.T) { + ok := make(chan struct{}) + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGTERM) + go func() { + for range c { + close(ok) + return + } + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + clientMock := &abcimocks.Client{} + clientMock.On("SetLogger", mock.Anything).Return() + clientMock.On("Start", mock.Anything).Return(nil) + + clientMock.On("Wait").Return(nil) + clientMock.On("Error").Return(errors.New("EOF")) + cl := &noopStoppableClientImpl{Client: clientMock} + + creator := func(log.Logger) (abciclient.Client, error) { + return cl, nil + } + + appConns := NewAppConns(creator, log.TestingLogger(), NopMetrics()) + + err := appConns.Start(ctx) + require.NoError(t, err) + t.Cleanup(func() { cancel(); appConns.Wait() }) + + select { + case <-ok: + t.Log("SIGTERM successfully received") + case <-time.After(5 * time.Second): + t.Fatal("expected process to receive SIGTERM signal") + } +} diff --git a/proxy/version.go b/internal/proxy/version.go similarity index 100% rename from proxy/version.go rename to internal/proxy/version.go diff --git a/rpc/core/CONTRIBUTING.md b/internal/rpc/core/CONTRIBUTING.md similarity index 100% rename from rpc/core/CONTRIBUTING.md rename to internal/rpc/core/CONTRIBUTING.md diff --git a/rpc/core/README.md b/internal/rpc/core/README.md similarity index 100% rename from rpc/core/README.md rename to internal/rpc/core/README.md diff --git a/rpc/core/abci.go b/internal/rpc/core/abci.go similarity index 70% rename from rpc/core/abci.go rename to internal/rpc/core/abci.go index ce705ba90..06c033050 100644 --- a/rpc/core/abci.go +++ b/internal/rpc/core/abci.go @@ -2,9 +2,9 @@ package core import ( abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/proxy" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) @@ -16,7 +16,7 @@ func (env *Environment) ABCIQuery( data bytes.HexBytes, height int64, prove bool, -) (*ctypes.ResultABCIQuery, error) { +) (*coretypes.ResultABCIQuery, error) { resQuery, err := env.ProxyAppQuery.QuerySync(ctx.Context(), abci.RequestQuery{ Path: path, Data: data, @@ -27,16 +27,16 @@ func (env *Environment) ABCIQuery( return nil, err } - return &ctypes.ResultABCIQuery{Response: *resQuery}, nil + return &coretypes.ResultABCIQuery{Response: *resQuery}, nil } // ABCIInfo gets some info about the application. // More: https://docs.tendermint.com/master/rpc/#/ABCI/abci_info -func (env *Environment) ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { +func (env *Environment) ABCIInfo(ctx *rpctypes.Context) (*coretypes.ResultABCIInfo, error) { resInfo, err := env.ProxyAppQuery.InfoSync(ctx.Context(), proxy.RequestInfo) if err != nil { return nil, err } - return &ctypes.ResultABCIInfo{Response: *resInfo}, nil + return &coretypes.ResultABCIInfo{Response: *resInfo}, nil } diff --git a/rpc/core/blocks.go b/internal/rpc/core/blocks.go similarity index 84% rename from rpc/core/blocks.go rename to internal/rpc/core/blocks.go index 78b567583..26472fab4 100644 --- a/rpc/core/blocks.go +++ b/internal/rpc/core/blocks.go @@ -4,12 +4,12 @@ import ( "fmt" "sort" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/bytes" tmmath "github.com/tendermint/tendermint/libs/math" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) @@ -25,7 +25,7 @@ import ( // More: https://docs.tendermint.com/master/rpc/#/Info/blockchain func (env *Environment) BlockchainInfo( ctx *rpctypes.Context, - minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { + minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { const limit int64 = 20 @@ -49,7 +49,7 @@ func (env *Environment) BlockchainInfo( } } - return &ctypes.ResultBlockchainInfo{ + return &coretypes.ResultBlockchainInfo{ LastHeight: env.BlockStore.Height(), BlockMetas: blockMetas}, nil } @@ -60,7 +60,7 @@ func (env *Environment) BlockchainInfo( func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { // filter negatives if min < 0 || max < 0 { - return min, max, ctypes.ErrZeroOrNegativeHeight + return min, max, coretypes.ErrZeroOrNegativeHeight } // adjust for default values @@ -83,7 +83,7 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { if min > max { return min, max, fmt.Errorf("%w: min height %d can't be greater than max height %d", - ctypes.ErrInvalidRequest, min, max) + coretypes.ErrInvalidRequest, min, max) } return min, max, nil } @@ -91,7 +91,7 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { // Block gets block at a given height. // If no height is provided, it will fetch the latest block. // More: https://docs.tendermint.com/master/rpc/#/Info/block -func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) { +func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*coretypes.ResultBlock, error) { height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err @@ -99,33 +99,33 @@ func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes. blockMeta := env.BlockStore.LoadBlockMeta(height) if blockMeta == nil { - return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil + return &coretypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil } block := env.BlockStore.LoadBlock(height) - return &ctypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil + return &coretypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil } // BlockByHash gets block by hash. // More: https://docs.tendermint.com/master/rpc/#/Info/block_by_hash -func (env *Environment) BlockByHash(ctx *rpctypes.Context, hash bytes.HexBytes) (*ctypes.ResultBlock, error) { +func (env *Environment) BlockByHash(ctx *rpctypes.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { // N.B. The hash parameter is HexBytes so that the reflective parameter // decoding logic in the HTTP service will correctly translate from JSON. // See https://github.com/tendermint/tendermint/issues/6802 for context. block := env.BlockStore.LoadBlockByHash(hash) if block == nil { - return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil + return &coretypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil } // If block is not nil, then blockMeta can't be nil. blockMeta := env.BlockStore.LoadBlockMeta(block.Height) - return &ctypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil + return &coretypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil } // Commit gets block commit at a given height. // If no height is provided, it will fetch the commit for the latest block. // More: https://docs.tendermint.com/master/rpc/#/Info/commit -func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) { +func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*coretypes.ResultCommit, error) { height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err @@ -144,7 +144,7 @@ func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes // NOTE: we can't yet ensure atomicity of operations in asserting // whether this is the latest height and retrieving the seen commit if commit != nil && commit.Height == height { - return ctypes.NewResultCommit(&header, commit, false), nil + return coretypes.NewResultCommit(&header, commit, false), nil } } @@ -153,7 +153,7 @@ func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes if commit == nil { return nil, nil } - return ctypes.NewResultCommit(&header, commit, true), nil + return coretypes.NewResultCommit(&header, commit, true), nil } // BlockResults gets ABCIResults at a given height. @@ -163,7 +163,7 @@ func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes // Thus response.results.deliver_tx[5] is the results of executing // getBlock(h).Txs[5] // More: https://docs.tendermint.com/master/rpc/#/Info/block_results -func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) { +func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*coretypes.ResultBlockResults, error) { height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err @@ -179,7 +179,7 @@ func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (* totalGasUsed += tx.GetGasUsed() } - return &ctypes.ResultBlockResults{ + return &coretypes.ResultBlockResults{ Height: height, TxsResults: results.DeliverTxs, TotalGasUsed: totalGasUsed, @@ -197,7 +197,7 @@ func (env *Environment) BlockSearch( query string, pagePtr, perPagePtr *int, orderBy string, -) (*ctypes.ResultBlockSearch, error) { +) (*coretypes.ResultBlockSearch, error) { if !indexer.KVSinkEnabled(env.EventSinks) { return nil, fmt.Errorf("block searching is disabled due to no kvEventSink") @@ -229,7 +229,7 @@ func (env *Environment) BlockSearch( sort.Slice(results, func(i, j int) bool { return results[i] < results[j] }) default: - return nil, fmt.Errorf("expected order_by to be either `asc` or `desc` or empty: %w", ctypes.ErrInvalidRequest) + return nil, fmt.Errorf("expected order_by to be either `asc` or `desc` or empty: %w", coretypes.ErrInvalidRequest) } // paginate results @@ -244,13 +244,13 @@ func (env *Environment) BlockSearch( skipCount := validateSkipCount(page, perPage) pageSize := tmmath.MinInt(perPage, totalCount-skipCount) - apiResults := make([]*ctypes.ResultBlock, 0, pageSize) + apiResults := make([]*coretypes.ResultBlock, 0, pageSize) for i := skipCount; i < skipCount+pageSize; i++ { block := env.BlockStore.LoadBlock(results[i]) if block != nil { blockMeta := env.BlockStore.LoadBlockMeta(block.Height) if blockMeta != nil { - apiResults = append(apiResults, &ctypes.ResultBlock{ + apiResults = append(apiResults, &coretypes.ResultBlock{ Block: block, BlockID: blockMeta.BlockID, }) @@ -258,5 +258,5 @@ func (env *Environment) BlockSearch( } } - return &ctypes.ResultBlockSearch{Blocks: apiResults, TotalCount: totalCount}, nil + return &coretypes.ResultBlockSearch{Blocks: apiResults, TotalCount: totalCount}, nil } diff --git a/rpc/core/blocks_test.go b/internal/rpc/core/blocks_test.go similarity index 95% rename from rpc/core/blocks_test.go rename to internal/rpc/core/blocks_test.go index 29db2f094..68237bc0b 100644 --- a/rpc/core/blocks_test.go +++ b/internal/rpc/core/blocks_test.go @@ -10,10 +10,10 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + sm "github.com/tendermint/tendermint/internal/state" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -89,12 +89,12 @@ func TestBlockResults(t *testing.T) { testCases := []struct { height int64 wantErr bool - wantRes *ctypes.ResultBlockResults + wantRes *coretypes.ResultBlockResults }{ {-1, true, nil}, {0, true, nil}, {101, true, nil}, - {100, false, &ctypes.ResultBlockResults{ + {100, false, &coretypes.ResultBlockResults{ Height: 100, TxsResults: results.DeliverTxs, TotalGasUsed: 15, diff --git a/rpc/core/consensus.go b/internal/rpc/core/consensus.go similarity index 61% rename from rpc/core/consensus.go rename to internal/rpc/core/consensus.go index b067e1063..d17796fff 100644 --- a/rpc/core/consensus.go +++ b/internal/rpc/core/consensus.go @@ -1,13 +1,9 @@ package core import ( - "errors" - - cm "github.com/tendermint/tendermint/internal/consensus" tmmath "github.com/tendermint/tendermint/libs/math" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/types" ) // Validators gets the validator set at the given block height. @@ -20,7 +16,7 @@ import ( func (env *Environment) Validators( ctx *rpctypes.Context, heightPtr *int64, - pagePtr, perPagePtr *int) (*ctypes.ResultValidators, error) { + pagePtr, perPagePtr *int) (*coretypes.ResultValidators, error) { // The latest validator that we know is the NextValidator of the last block. height, err := env.getHeight(env.latestUncommittedHeight(), heightPtr) @@ -44,7 +40,7 @@ func (env *Environment) Validators( v := validators.Validators[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)] - return &ctypes.ResultValidators{ + return &coretypes.ResultValidators{ BlockHeight: height, Validators: v, Count: len(v), @@ -54,56 +50,32 @@ func (env *Environment) Validators( // DumpConsensusState dumps consensus state. // UNSTABLE // More: https://docs.tendermint.com/master/rpc/#/Info/dump_consensus_state -func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { +func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*coretypes.ResultDumpConsensusState, error) { // Get Peer consensus states. - var peerStates []ctypes.PeerStateInfo - switch { - case env.P2PPeers != nil: - peers := env.P2PPeers.Peers().List() - peerStates = make([]ctypes.PeerStateInfo, 0, len(peers)) - for _, peer := range peers { - peerState, ok := peer.Get(types.PeerStateKey).(*cm.PeerState) - if !ok { // peer does not have a state yet - continue - } - peerStateJSON, err := peerState.ToJSON() - if err != nil { - return nil, err - } - peerStates = append(peerStates, ctypes.PeerStateInfo{ + var peerStates []coretypes.PeerStateInfo + peers := env.PeerManager.Peers() + peerStates = make([]coretypes.PeerStateInfo, 0, len(peers)) + for _, pid := range peers { + peerState, ok := env.ConsensusReactor.GetPeerState(pid) + if !ok { + continue + } + + peerStateJSON, err := peerState.ToJSON() + if err != nil { + return nil, err + } + + addr := env.PeerManager.Addresses(pid) + if len(addr) != 0 { + peerStates = append(peerStates, coretypes.PeerStateInfo{ // Peer basic info. - NodeAddress: peer.SocketAddr().String(), + NodeAddress: addr[0].String(), // Peer consensus state. PeerState: peerStateJSON, }) } - case env.PeerManager != nil: - peers := env.PeerManager.Peers() - peerStates = make([]ctypes.PeerStateInfo, 0, len(peers)) - for _, pid := range peers { - peerState, ok := env.ConsensusReactor.GetPeerState(pid) - if !ok { - continue - } - - peerStateJSON, err := peerState.ToJSON() - if err != nil { - return nil, err - } - - addr := env.PeerManager.Addresses(pid) - if len(addr) >= 1 { - peerStates = append(peerStates, ctypes.PeerStateInfo{ - // Peer basic info. - NodeAddress: addr[0].String(), - // Peer consensus state. - PeerState: peerStateJSON, - }) - } - } - default: - return nil, errors.New("no peer system configured") } // Get self round state. @@ -111,7 +83,7 @@ func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.Resul if err != nil { return nil, err } - return &ctypes.ResultDumpConsensusState{ + return &coretypes.ResultDumpConsensusState{ RoundState: roundState, Peers: peerStates}, nil } @@ -119,10 +91,10 @@ func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.Resul // ConsensusState returns a concise summary of the consensus state. // UNSTABLE // More: https://docs.tendermint.com/master/rpc/#/Info/consensus_state -func (env *Environment) GetConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) { +func (env *Environment) GetConsensusState(ctx *rpctypes.Context) (*coretypes.ResultConsensusState, error) { // Get self round state. bz, err := env.ConsensusState.GetRoundStateSimpleJSON() - return &ctypes.ResultConsensusState{RoundState: bz}, err + return &coretypes.ResultConsensusState{RoundState: bz}, err } // ConsensusParams gets the consensus parameters at the given block height. @@ -130,7 +102,7 @@ func (env *Environment) GetConsensusState(ctx *rpctypes.Context) (*ctypes.Result // More: https://docs.tendermint.com/master/rpc/#/Info/consensus_params func (env *Environment) ConsensusParams( ctx *rpctypes.Context, - heightPtr *int64) (*ctypes.ResultConsensusParams, error) { + heightPtr *int64) (*coretypes.ResultConsensusParams, error) { // The latest consensus params that we know is the consensus params after the // last block. @@ -144,7 +116,7 @@ func (env *Environment) ConsensusParams( return nil, err } - return &ctypes.ResultConsensusParams{ + return &coretypes.ResultConsensusParams{ BlockHeight: height, ConsensusParams: consensusParams}, nil } diff --git a/rpc/core/dev.go b/internal/rpc/core/dev.go similarity index 61% rename from rpc/core/dev.go rename to internal/rpc/core/dev.go index 0e365cdcc..21c5154ff 100644 --- a/rpc/core/dev.go +++ b/internal/rpc/core/dev.go @@ -1,12 +1,12 @@ package core import ( - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // UnsafeFlushMempool removes all transactions from the mempool. -func (env *Environment) UnsafeFlushMempool(ctx *rpctypes.Context) (*ctypes.ResultUnsafeFlushMempool, error) { +func (env *Environment) UnsafeFlushMempool(ctx *rpctypes.Context) (*coretypes.ResultUnsafeFlushMempool, error) { env.Mempool.Flush() - return &ctypes.ResultUnsafeFlushMempool{}, nil + return &coretypes.ResultUnsafeFlushMempool{}, nil } diff --git a/rpc/core/doc.go b/internal/rpc/core/doc.go similarity index 100% rename from rpc/core/doc.go rename to internal/rpc/core/doc.go diff --git a/rpc/core/doc_template.txt b/internal/rpc/core/doc_template.txt similarity index 100% rename from rpc/core/doc_template.txt rename to internal/rpc/core/doc_template.txt diff --git a/rpc/core/env.go b/internal/rpc/core/env.go similarity index 79% rename from rpc/core/env.go rename to internal/rpc/core/env.go index 7069bc4d4..9adeeee71 100644 --- a/rpc/core/env.go +++ b/internal/rpc/core/env.go @@ -5,17 +5,19 @@ import ( "fmt" "time" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/internal/consensus" - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/statesync" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/proxy" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/indexer" + "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) @@ -50,14 +52,6 @@ type transport interface { NodeInfo() types.NodeInfo } -type peers interface { - AddPersistentPeers([]string) error - AddUnconditionalPeerIDs([]string) error - AddPrivatePeerIDs([]string) error - DialPeersAsync([]string) error - Peers() p2p.IPeerSet -} - type consensusReactor interface { WaitSync() bool GetPeerState(peerID types.NodeID) (*consensus.PeerState, bool) @@ -82,7 +76,6 @@ type Environment struct { EvidencePool sm.EvidencePool ConsensusState consensusState ConsensusReactor consensusReactor - P2PPeers peers // Legacy p2p stack P2PTransport transport @@ -91,16 +84,17 @@ type Environment struct { PeerManager peerManager // objects - PubKey crypto.PubKey - GenDoc *types.GenesisDoc // cache the genesis structure - EventSinks []indexer.EventSink - EventBus *types.EventBus // thread safe - Mempool mempl.Mempool - BlockSyncReactor consensus.BlockSyncReactor + PubKey crypto.PubKey + GenDoc *types.GenesisDoc // cache the genesis structure + EventSinks []indexer.EventSink + EventBus *eventbus.EventBus // thread safe + Mempool mempool.Mempool + BlockSyncReactor consensus.BlockSyncReactor + StateSyncMetricer statesync.Metricer Logger log.Logger - Config cfg.RPCConfig + Config config.RPCConfig // cache of chunked genesis data. genChunks []string @@ -111,7 +105,7 @@ type Environment struct { func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { // this can only happen if we haven't first run validatePerPage if perPage < 1 { - panic(fmt.Errorf("%w (%d)", ctypes.ErrZeroOrNegativePerPage, perPage)) + panic(fmt.Errorf("%w (%d)", coretypes.ErrZeroOrNegativePerPage, perPage)) } if pagePtr == nil { // no page parameter @@ -124,7 +118,7 @@ func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { } page := *pagePtr if page <= 0 || page > pages { - return 1, fmt.Errorf("%w expected range: [1, %d], given %d", ctypes.ErrPageOutOfRange, pages, page) + return 1, fmt.Errorf("%w expected range: [1, %d], given %d", coretypes.ErrPageOutOfRange, pages, page) } return page, nil @@ -189,15 +183,15 @@ func (env *Environment) getHeight(latestHeight int64, heightPtr *int64) (int64, if heightPtr != nil { height := *heightPtr if height <= 0 { - return 0, fmt.Errorf("%w (requested height: %d)", ctypes.ErrZeroOrNegativeHeight, height) + return 0, fmt.Errorf("%w (requested height: %d)", coretypes.ErrZeroOrNegativeHeight, height) } if height > latestHeight { return 0, fmt.Errorf("%w (requested height: %d, blockchain height: %d)", - ctypes.ErrHeightExceedsChainHead, height, latestHeight) + coretypes.ErrHeightExceedsChainHead, height, latestHeight) } base := env.BlockStore.Base() if height < base { - return 0, fmt.Errorf("%w (requested height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, height, base) + return 0, fmt.Errorf("%w (requested height: %d, base height: %d)", coretypes.ErrHeightNotAvailable, height, base) } return height, nil } diff --git a/rpc/core/env_test.go b/internal/rpc/core/env_test.go similarity index 100% rename from rpc/core/env_test.go rename to internal/rpc/core/env_test.go diff --git a/rpc/core/events.go b/internal/rpc/core/events.go similarity index 58% rename from rpc/core/events.go rename to internal/rpc/core/events.go index e56295c52..d8e09f35e 100644 --- a/rpc/core/events.go +++ b/internal/rpc/core/events.go @@ -2,29 +2,36 @@ package core import ( "context" + "errors" "fmt" "time" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) const ( // Buffer on the Tendermint (server) side to allow some slowness in clients. subBufferSize = 100 + + // maxQueryLength is the maximum length of a query string that will be + // accepted. This is just a safety check to avoid outlandish queries. + maxQueryLength = 512 ) // Subscribe for events via WebSocket. // More: https://docs.tendermint.com/master/rpc/#/Websocket/subscribe -func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { +func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*coretypes.ResultSubscribe, error) { addr := ctx.RemoteAddr() if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients { return nil, fmt.Errorf("max_subscription_clients %d reached", env.Config.MaxSubscriptionClients) } else if env.EventBus.NumClientSubscriptions(addr) >= env.Config.MaxSubscriptionsPerClient { return nil, fmt.Errorf("max_subscriptions_per_client %d reached", env.Config.MaxSubscriptionsPerClient) + } else if len(query) > maxQueryLength { + return nil, errors.New("maximum query length exceeded") } env.Logger.Info("Subscribe to query", "remote", addr, "query", query) @@ -37,7 +44,11 @@ func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*ctypes. subCtx, cancel := context.WithTimeout(ctx.Context(), SubscribeTimeout) defer cancel() - sub, err := env.EventBus.Subscribe(subCtx, addr, q, subBufferSize) + sub, err := env.EventBus.SubscribeWithArgs(subCtx, tmpubsub.SubscribeArgs{ + ClientID: addr, + Query: q, + Limit: subBufferSize, + }) if err != nil { return nil, err } @@ -46,46 +57,43 @@ func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*ctypes. subscriptionID := ctx.JSONReq.ID go func() { for { - select { - case msg := <-sub.Out(): - var ( - resultEvent = &ctypes.ResultEvent{Query: query, Data: msg.Data(), Events: msg.Events()} - resp = rpctypes.NewRPCSuccessResponse(subscriptionID, resultEvent) - ) - writeCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - if err := ctx.WSConn.WriteRPCResponse(writeCtx, resp); err != nil { - env.Logger.Info("Can't write response (slow client)", + msg, err := sub.Next(context.Background()) + if errors.Is(err, tmpubsub.ErrUnsubscribed) { + // The subscription was removed by the client. + return + } else if errors.Is(err, tmpubsub.ErrTerminated) { + // The subscription was terminated by the publisher. + resp := rpctypes.RPCServerError(subscriptionID, err) + ok := ctx.WSConn.TryWriteRPCResponse(resp) + if !ok { + env.Logger.Info("Unable to write response (slow client)", "to", addr, "subscriptionID", subscriptionID, "err", err) } - case <-sub.Canceled(): - if sub.Err() != tmpubsub.ErrUnsubscribed { - var reason string - if sub.Err() == nil { - reason = "Tendermint exited" - } else { - reason = sub.Err().Error() - } - var ( - err = fmt.Errorf("subscription was canceled (reason: %s)", reason) - resp = rpctypes.RPCServerError(subscriptionID, err) - ) - if ok := ctx.WSConn.TryWriteRPCResponse(resp); !ok { - env.Logger.Info("Can't write response (slow client)", - "to", addr, "subscriptionID", subscriptionID, "err", err) - } - } return } + + // We have a message to deliver to the client. + resp := rpctypes.NewRPCSuccessResponse(subscriptionID, &coretypes.ResultEvent{ + Query: query, + Data: msg.Data(), + Events: msg.Events(), + }) + wctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + err = ctx.WSConn.WriteRPCResponse(wctx, resp) + cancel() + if err != nil { + env.Logger.Info("Unable to write response (slow client)", + "to", addr, "subscriptionID", subscriptionID, "err", err) + } } }() - return &ctypes.ResultSubscribe{}, nil + return &coretypes.ResultSubscribe{}, nil } // Unsubscribe from events via WebSocket. // More: https://docs.tendermint.com/master/rpc/#/Websocket/unsubscribe -func (env *Environment) Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) { +func (env *Environment) Unsubscribe(ctx *rpctypes.Context, query string) (*coretypes.ResultUnsubscribe, error) { args := tmpubsub.UnsubscribeArgs{Subscriber: ctx.RemoteAddr()} env.Logger.Info("Unsubscribe from query", "remote", args.Subscriber, "subscription", query) @@ -100,17 +108,17 @@ func (env *Environment) Unsubscribe(ctx *rpctypes.Context, query string) (*ctype if err != nil { return nil, err } - return &ctypes.ResultUnsubscribe{}, nil + return &coretypes.ResultUnsubscribe{}, nil } // UnsubscribeAll from all events via WebSocket. // More: https://docs.tendermint.com/master/rpc/#/Websocket/unsubscribe_all -func (env *Environment) UnsubscribeAll(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) { +func (env *Environment) UnsubscribeAll(ctx *rpctypes.Context) (*coretypes.ResultUnsubscribe, error) { addr := ctx.RemoteAddr() env.Logger.Info("Unsubscribe from all", "remote", addr) err := env.EventBus.UnsubscribeAll(ctx.Context(), addr) if err != nil { return nil, err } - return &ctypes.ResultUnsubscribe{}, nil + return &coretypes.ResultUnsubscribe{}, nil } diff --git a/rpc/core/evidence.go b/internal/rpc/core/evidence.go similarity index 69% rename from rpc/core/evidence.go rename to internal/rpc/core/evidence.go index e071c5a7e..a7641b99d 100644 --- a/rpc/core/evidence.go +++ b/internal/rpc/core/evidence.go @@ -3,7 +3,7 @@ package core import ( "fmt" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -12,10 +12,10 @@ import ( // More: https://docs.tendermint.com/master/rpc/#/Evidence/broadcast_evidence func (env *Environment) BroadcastEvidence( ctx *rpctypes.Context, - ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { + ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { if ev == nil { - return nil, fmt.Errorf("%w: no evidence was provided", ctypes.ErrInvalidRequest) + return nil, fmt.Errorf("%w: no evidence was provided", coretypes.ErrInvalidRequest) } if err := ev.ValidateBasic(); err != nil { @@ -25,5 +25,5 @@ func (env *Environment) BroadcastEvidence( if err := env.EvidencePool.AddEvidence(ev); err != nil { return nil, fmt.Errorf("failed to add evidence: %w", err) } - return &ctypes.ResultBroadcastEvidence{Hash: ev.Hash()}, nil + return &coretypes.ResultBroadcastEvidence{Hash: ev.Hash()}, nil } diff --git a/rpc/core/health.go b/internal/rpc/core/health.go similarity index 59% rename from rpc/core/health.go rename to internal/rpc/core/health.go index 855911d83..fc355c7e7 100644 --- a/rpc/core/health.go +++ b/internal/rpc/core/health.go @@ -1,13 +1,13 @@ package core import ( - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // Health gets node health. Returns empty result (200 OK) on success, no // response - in case of an error. // More: https://docs.tendermint.com/master/rpc/#/Info/health -func (env *Environment) Health(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) { - return &ctypes.ResultHealth{}, nil +func (env *Environment) Health(ctx *rpctypes.Context) (*coretypes.ResultHealth, error) { + return &coretypes.ResultHealth{}, nil } diff --git a/internal/rpc/core/mempool.go b/internal/rpc/core/mempool.go new file mode 100644 index 000000000..5b12a6c2b --- /dev/null +++ b/internal/rpc/core/mempool.go @@ -0,0 +1,157 @@ +package core + +import ( + "errors" + "fmt" + "math/rand" + "time" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/rpc/coretypes" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" + "github.com/tendermint/tendermint/types" +) + +//----------------------------------------------------------------------------- +// NOTE: tx should be signed, but this is only checked at the app level (not by Tendermint!) + +// BroadcastTxAsync returns right away, with no response. Does not wait for +// CheckTx nor DeliverTx results. +// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_async +func (env *Environment) BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { + err := env.Mempool.CheckTx(ctx.Context(), tx, nil, mempool.TxInfo{}) + if err != nil { + return nil, err + } + + return &coretypes.ResultBroadcastTx{Hash: tx.Hash()}, nil +} + +// BroadcastTxSync returns with the response from CheckTx. Does not wait for +// DeliverTx result. +// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_sync +func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { + resCh := make(chan *abci.Response, 1) + err := env.Mempool.CheckTx( + ctx.Context(), + tx, + func(res *abci.Response) { resCh <- res }, + mempool.TxInfo{}, + ) + if err != nil { + return nil, err + } + + res := <-resCh + r := res.GetCheckTx() + + return &coretypes.ResultBroadcastTx{ + Code: r.Code, + Data: r.Data, + Log: r.Log, + Codespace: r.Codespace, + MempoolError: r.MempoolError, + Hash: tx.Hash(), + }, nil +} + +// BroadcastTxCommit returns with the responses from CheckTx and DeliverTx. +// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_commit +func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { //nolint:lll + resCh := make(chan *abci.Response, 1) + err := env.Mempool.CheckTx( + ctx.Context(), + tx, + func(res *abci.Response) { resCh <- res }, + mempool.TxInfo{}, + ) + if err != nil { + return nil, err + } + + r := (<-resCh).GetCheckTx() + + if !indexer.KVSinkEnabled(env.EventSinks) { + return &coretypes.ResultBroadcastTxCommit{ + CheckTx: *r, + Hash: tx.Hash(), + }, + errors.New("cannot confirm transaction because kvEventSink is not enabled") + } + + startAt := time.Now() + timer := time.NewTimer(0) + defer timer.Stop() + + count := 0 + for { + count++ + select { + case <-ctx.Context().Done(): + env.Logger.Error("Error on broadcastTxCommit", + "duration", time.Since(startAt), + "err", err) + return &coretypes.ResultBroadcastTxCommit{ + CheckTx: *r, + Hash: tx.Hash(), + }, fmt.Errorf("timeout waiting for commit of tx %s (%s)", + tx.Hash(), time.Since(startAt)) + case <-timer.C: + txres, err := env.Tx(ctx, tx.Hash(), false) + if err != nil { + jitter := 100*time.Millisecond + time.Duration(rand.Int63n(int64(time.Second))) // nolint: gosec + backoff := 100 * time.Duration(count) * time.Millisecond + timer.Reset(jitter + backoff) + continue + } + + return &coretypes.ResultBroadcastTxCommit{ + CheckTx: *r, + DeliverTx: txres.TxResult, + Hash: tx.Hash(), + Height: txres.Height, + }, nil + } + } +} + +// UnconfirmedTxs gets unconfirmed transactions (maximum ?limit entries) +// including their number. +// More: https://docs.tendermint.com/master/rpc/#/Info/unconfirmed_txs +func (env *Environment) UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*coretypes.ResultUnconfirmedTxs, error) { + // reuse per_page validator + limit := env.validatePerPage(limitPtr) + + txs := env.Mempool.ReapMaxTxs(limit) + return &coretypes.ResultUnconfirmedTxs{ + Count: len(txs), + Total: env.Mempool.Size(), + TotalBytes: env.Mempool.SizeBytes(), + Txs: txs}, nil +} + +// NumUnconfirmedTxs gets number of unconfirmed transactions. +// More: https://docs.tendermint.com/master/rpc/#/Info/num_unconfirmed_txs +func (env *Environment) NumUnconfirmedTxs(ctx *rpctypes.Context) (*coretypes.ResultUnconfirmedTxs, error) { + return &coretypes.ResultUnconfirmedTxs{ + Count: env.Mempool.Size(), + Total: env.Mempool.Size(), + TotalBytes: env.Mempool.SizeBytes()}, nil +} + +// CheckTx checks the transaction without executing it. The transaction won't +// be added to the mempool either. +// More: https://docs.tendermint.com/master/rpc/#/Tx/check_tx +func (env *Environment) CheckTx(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { + res, err := env.ProxyAppMempool.CheckTxSync(ctx.Context(), abci.RequestCheckTx{Tx: tx}) + if err != nil { + return nil, err + } + return &coretypes.ResultCheckTx{ResponseCheckTx: *res}, nil +} + +func (env *Environment) RemoveTx(ctx *rpctypes.Context, txkey types.TxKey) error { + return env.Mempool.RemoveTxByKey(txkey) +} diff --git a/internal/rpc/core/net.go b/internal/rpc/core/net.go new file mode 100644 index 000000000..fdf4be69b --- /dev/null +++ b/internal/rpc/core/net.go @@ -0,0 +1,67 @@ +package core + +import ( + "errors" + "fmt" + + "github.com/tendermint/tendermint/rpc/coretypes" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" +) + +// NetInfo returns network info. +// More: https://docs.tendermint.com/master/rpc/#/Info/net_info +func (env *Environment) NetInfo(ctx *rpctypes.Context) (*coretypes.ResultNetInfo, error) { + peerList := env.PeerManager.Peers() + + peers := make([]coretypes.Peer, 0, len(peerList)) + for _, peer := range peerList { + addrs := env.PeerManager.Addresses(peer) + if len(addrs) == 0 { + continue + } + + peers = append(peers, coretypes.Peer{ + ID: peer, + URL: addrs[0].String(), + }) + } + + return &coretypes.ResultNetInfo{ + Listening: env.P2PTransport.IsListening(), + Listeners: env.P2PTransport.Listeners(), + NPeers: len(peers), + Peers: peers, + }, nil +} + +// Genesis returns genesis file. +// More: https://docs.tendermint.com/master/rpc/#/Info/genesis +func (env *Environment) Genesis(ctx *rpctypes.Context) (*coretypes.ResultGenesis, error) { + if len(env.genChunks) > 1 { + return nil, errors.New("genesis response is large, please use the genesis_chunked API instead") + } + + return &coretypes.ResultGenesis{Genesis: env.GenDoc}, nil +} + +func (env *Environment) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*coretypes.ResultGenesisChunk, error) { + if env.genChunks == nil { + return nil, fmt.Errorf("service configuration error, genesis chunks are not initialized") + } + + if len(env.genChunks) == 0 { + return nil, fmt.Errorf("service configuration error, there are no chunks") + } + + id := int(chunk) + + if id > len(env.genChunks)-1 { + return nil, fmt.Errorf("there are %d chunks, %d is invalid", len(env.genChunks)-1, id) + } + + return &coretypes.ResultGenesisChunk{ + TotalChunks: len(env.genChunks), + ChunkNumber: id, + Data: env.genChunks[id], + }, nil +} diff --git a/rpc/core/routes.go b/internal/rpc/core/routes.go similarity index 93% rename from rpc/core/routes.go rename to internal/rpc/core/routes.go index 1eb50fe4e..fe99d2118 100644 --- a/rpc/core/routes.go +++ b/internal/rpc/core/routes.go @@ -28,6 +28,7 @@ func (env *Environment) GetRoutes() RoutesMap { "block_results": rpc.NewRPCFunc(env.BlockResults, "height", true), "commit": rpc.NewRPCFunc(env.Commit, "height", true), "check_tx": rpc.NewRPCFunc(env.CheckTx, "tx", true), + "remove_tx": rpc.NewRPCFunc(env.RemoveTx, "txkey", false), "tx": rpc.NewRPCFunc(env.Tx, "hash,prove", true), "tx_search": rpc.NewRPCFunc(env.TxSearch, "query,prove,page,per_page,order_by", false), "block_search": rpc.NewRPCFunc(env.BlockSearch, "query,page,per_page,order_by", false), @@ -55,7 +56,5 @@ func (env *Environment) GetRoutes() RoutesMap { // AddUnsafeRoutes adds unsafe routes. func (env *Environment) AddUnsafe(routes RoutesMap) { // control API - routes["dial_seeds"] = rpc.NewRPCFunc(env.UnsafeDialSeeds, "seeds", false) - routes["dial_peers"] = rpc.NewRPCFunc(env.UnsafeDialPeers, "peers,persistent,unconditional,private", false) routes["unsafe_flush_mempool"] = rpc.NewRPCFunc(env.UnsafeFlushMempool, "", false) } diff --git a/rpc/core/status.go b/internal/rpc/core/status.go similarity index 77% rename from rpc/core/status.go rename to internal/rpc/core/status.go index 815ab37f5..b883c6dc2 100644 --- a/rpc/core/status.go +++ b/internal/rpc/core/status.go @@ -5,7 +5,7 @@ import ( "time" tmbytes "github.com/tendermint/tendermint/libs/bytes" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -13,7 +13,7 @@ import ( // Status returns Tendermint status including node info, pubkey, latest block // hash, app hash, block height, current max peer block height, and time. // More: https://docs.tendermint.com/master/rpc/#/Info/status -func (env *Environment) Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { +func (env *Environment) Status(ctx *rpctypes.Context) (*coretypes.ResultStatus, error) { var ( earliestBlockHeight int64 earliestBlockHash tmbytes.HexBytes @@ -50,17 +50,18 @@ func (env *Environment) Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, err if val := env.validatorAtHeight(env.latestUncommittedHeight()); val != nil { votingPower = val.VotingPower } - validatorInfo := ctypes.ValidatorInfo{} + validatorInfo := coretypes.ValidatorInfo{} if env.PubKey != nil { - validatorInfo = ctypes.ValidatorInfo{ + validatorInfo = coretypes.ValidatorInfo{ Address: env.PubKey.Address(), PubKey: env.PubKey, VotingPower: votingPower, } } - result := &ctypes.ResultStatus{ + + result := &coretypes.ResultStatus{ NodeInfo: env.P2PTransport.NodeInfo(), - SyncInfo: ctypes.SyncInfo{ + SyncInfo: coretypes.SyncInfo{ LatestBlockHash: latestBlockHash, LatestAppHash: latestAppHash, LatestBlockHeight: latestHeight, @@ -77,6 +78,16 @@ func (env *Environment) Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, err ValidatorInfo: validatorInfo, } + if env.StateSyncMetricer != nil { + result.SyncInfo.TotalSnapshots = env.StateSyncMetricer.TotalSnapshots() + result.SyncInfo.ChunkProcessAvgTime = env.StateSyncMetricer.ChunkProcessAvgTime() + result.SyncInfo.SnapshotHeight = env.StateSyncMetricer.SnapshotHeight() + result.SyncInfo.SnapshotChunksCount = env.StateSyncMetricer.SnapshotChunksCount() + result.SyncInfo.SnapshotChunksTotal = env.StateSyncMetricer.SnapshotChunksTotal() + result.SyncInfo.BackFilledBlocks = env.StateSyncMetricer.BackFilledBlocks() + result.SyncInfo.BackFillBlocksTotal = env.StateSyncMetricer.BackFillBlocksTotal() + } + return result, nil } diff --git a/rpc/core/tx.go b/internal/rpc/core/tx.go similarity index 86% rename from rpc/core/tx.go rename to internal/rpc/core/tx.go index eb6c73858..7ba2bf90c 100644 --- a/rpc/core/tx.go +++ b/internal/rpc/core/tx.go @@ -5,12 +5,12 @@ import ( "fmt" "sort" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/bytes" tmmath "github.com/tendermint/tendermint/libs/math" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) @@ -18,7 +18,7 @@ import ( // transaction is in the mempool, invalidated, or was not sent in the first // place. // More: https://docs.tendermint.com/master/rpc/#/Info/tx -func (env *Environment) Tx(ctx *rpctypes.Context, hash bytes.HexBytes, prove bool) (*ctypes.ResultTx, error) { +func (env *Environment) Tx(ctx *rpctypes.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { // if index is disabled, return error // N.B. The hash parameter is HexBytes so that the reflective parameter @@ -45,7 +45,7 @@ func (env *Environment) Tx(ctx *rpctypes.Context, hash bytes.HexBytes, prove boo proof = block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines } - return &ctypes.ResultTx{ + return &coretypes.ResultTx{ Hash: hash, Height: height, Index: index, @@ -68,10 +68,12 @@ func (env *Environment) TxSearch( prove bool, pagePtr, perPagePtr *int, orderBy string, -) (*ctypes.ResultTxSearch, error) { +) (*coretypes.ResultTxSearch, error) { if !indexer.KVSinkEnabled(env.EventSinks) { return nil, fmt.Errorf("transaction searching is disabled due to no kvEventSink") + } else if len(query) > maxQueryLength { + return nil, errors.New("maximum query length exceeded") } q, err := tmquery.New(query) @@ -103,7 +105,7 @@ func (env *Environment) TxSearch( return results[i].Height < results[j].Height }) default: - return nil, fmt.Errorf("expected order_by to be either `asc` or `desc` or empty: %w", ctypes.ErrInvalidRequest) + return nil, fmt.Errorf("expected order_by to be either `asc` or `desc` or empty: %w", coretypes.ErrInvalidRequest) } // paginate results @@ -118,7 +120,7 @@ func (env *Environment) TxSearch( skipCount := validateSkipCount(page, perPage) pageSize := tmmath.MinInt(perPage, totalCount-skipCount) - apiResults := make([]*ctypes.ResultTx, 0, pageSize) + apiResults := make([]*coretypes.ResultTx, 0, pageSize) for i := skipCount; i < skipCount+pageSize; i++ { r := results[i] @@ -128,7 +130,7 @@ func (env *Environment) TxSearch( proof = block.Data.Txs.Proof(int(r.Index)) // XXX: overflow on 32-bit machines } - apiResults = append(apiResults, &ctypes.ResultTx{ + apiResults = append(apiResults, &coretypes.ResultTx{ Hash: types.Tx(r.Tx).Hash(), Height: r.Height, Index: r.Index, @@ -138,7 +140,7 @@ func (env *Environment) TxSearch( }) } - return &ctypes.ResultTxSearch{Txs: apiResults, TotalCount: totalCount}, nil + return &coretypes.ResultTxSearch{Txs: apiResults, TotalCount: totalCount}, nil } } diff --git a/state/errors.go b/internal/state/errors.go similarity index 100% rename from state/errors.go rename to internal/state/errors.go diff --git a/state/execution.go b/internal/state/execution.go similarity index 98% rename from state/execution.go rename to internal/state/execution.go index 05d5bdd52..e4a1ba6c3 100644 --- a/state/execution.go +++ b/internal/state/execution.go @@ -7,12 +7,13 @@ import ( "time" abci "github.com/tendermint/tendermint/abci/types" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/libs/fail" - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/log" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -37,7 +38,7 @@ type BlockExecutor struct { // manage the mempool lock during commit // and update both with block results after commit. - mempool mempl.Mempool + mempool mempool.Mempool evpool EvidencePool logger log.Logger @@ -61,7 +62,7 @@ func NewBlockExecutor( stateStore Store, logger log.Logger, proxyApp proxy.AppConnConsensus, - mempool mempl.Mempool, + pool mempool.Mempool, evpool EvidencePool, blockStore BlockStore, options ...BlockExecutorOption, @@ -69,8 +70,8 @@ func NewBlockExecutor( res := &BlockExecutor{ store: stateStore, proxyApp: proxyApp, - eventBus: types.NopEventBus{}, - mempool: mempool, + eventBus: eventbus.NopEventBus{}, + mempool: pool, evpool: evpool, logger: logger, metrics: NopMetrics(), @@ -331,7 +332,7 @@ func execBlockOnProxyApp( byzVals = append(byzVals, evidence.ABCI()...) } - ctx := context.Background() + ctx := context.TODO() // Begin block var err error @@ -424,7 +425,7 @@ func validateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, } // Check if validator's pubkey matches an ABCI type in the consensus params - pk, err := cryptoenc.PubKeyFromProto(valUpdate.PubKey) + pk, err := encoding.PubKeyFromProto(valUpdate.PubKey) if err != nil { return err } diff --git a/state/execution_test.go b/internal/state/execution_test.go similarity index 82% rename from state/execution_test.go rename to internal/state/execution_test.go index 8e0ec563a..5da5adbb5 100644 --- a/state/execution_test.go +++ b/internal/state/execution_test.go @@ -8,23 +8,26 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/internal/eventbus" mmock "github.com/tendermint/tendermint/internal/mempool/mock" + "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/mocks" + sf "github.com/tendermint/tendermint/internal/state/test/factory" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/pubsub" tmtime "github.com/tendermint/tendermint/libs/time" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/mocks" - sf "github.com/tendermint/tendermint/state/test/factory" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" - dbm "github.com/tendermint/tm-db" ) var ( @@ -34,16 +37,20 @@ var ( func TestApplyBlock(t *testing.T) { app := &testApp{} - cc := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(cc) - err := proxyApp.Start() + cc := abciclient.NewLocalCreator(app) + logger := log.TestingLogger() + proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + err := proxyApp.Start(ctx) require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, _ := makeState(1, 1) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), + blockExec := sm.NewBlockExecutor(stateStore, logger, proxyApp.Consensus(), mmock.Mempool{}, sm.EmptyEvidencePool{}, blockStore) block := sf.MakeBlock(state, 1, new(types.Commit)) @@ -58,12 +65,15 @@ func TestApplyBlock(t *testing.T) { // TestBeginBlockValidators ensures we send absent validators list. func TestBeginBlockValidators(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + app := &testApp{} - cc := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(cc) - err := proxyApp.Start() + cc := abciclient.NewLocalCreator(app) + proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics()) + + err := proxyApp.Start(ctx) require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // no need to check error again state, stateDB, _ := makeState(2, 2) stateStore := sm.NewStore(stateDB) @@ -121,12 +131,14 @@ func TestBeginBlockValidators(t *testing.T) { // TestBeginBlockByzantineValidators ensures we send byzantine validators list. func TestBeginBlockByzantineValidators(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + app := &testApp{} - cc := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(cc) - err := proxyApp.Start() + cc := abciclient.NewLocalCreator(app) + proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics()) + err := proxyApp.Start(ctx) require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, privVals := makeState(1, 1) stateStore := sm.NewStore(stateDB) @@ -217,9 +229,9 @@ func TestBeginBlockByzantineValidators(t *testing.T) { func TestValidateValidatorUpdates(t *testing.T) { pubkey1 := ed25519.GenPrivKey().PubKey() pubkey2 := ed25519.GenPrivKey().PubKey() - pk1, err := cryptoenc.PubKeyToProto(pubkey1) + pk1, err := encoding.PubKeyToProto(pubkey1) assert.NoError(t, err) - pk2, err := cryptoenc.PubKeyToProto(pubkey2) + pk2, err := encoding.PubKeyToProto(pubkey2) assert.NoError(t, err) defaultValidatorParams := types.ValidatorParams{PubKeyTypes: []string{types.ABCIPubKeyTypeEd25519}} @@ -277,9 +289,9 @@ func TestUpdateValidators(t *testing.T) { pubkey2 := ed25519.GenPrivKey().PubKey() val2 := types.NewValidator(pubkey2, 20) - pk, err := cryptoenc.PubKeyToProto(pubkey1) + pk, err := encoding.PubKeyToProto(pubkey1) require.NoError(t, err) - pk2, err := cryptoenc.PubKeyToProto(pubkey2) + pk2, err := encoding.PubKeyToProto(pubkey2) require.NoError(t, err) testCases := []struct { @@ -346,12 +358,15 @@ func TestUpdateValidators(t *testing.T) { // TestEndBlockValidatorUpdates ensures we update validator set and send an event. func TestEndBlockValidatorUpdates(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + app := &testApp{} - cc := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(cc) - err := proxyApp.Start() + cc := abciclient.NewLocalCreator(app) + logger := log.TestingLogger() + proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, _ := makeState(1, 1) stateStore := sm.NewStore(stateDB) @@ -359,32 +374,31 @@ func TestEndBlockValidatorUpdates(t *testing.T) { blockExec := sm.NewBlockExecutor( stateStore, - log.TestingLogger(), + logger, proxyApp.Consensus(), mmock.Mempool{}, sm.EmptyEvidencePool{}, blockStore, ) - eventBus := types.NewEventBus() - err = eventBus.Start() + eventBus := eventbus.NewDefault(logger) + err = eventBus.Start(ctx) require.NoError(t, err) defer eventBus.Stop() //nolint:errcheck // ignore for tests blockExec.SetEventBus(eventBus) - updatesSub, err := eventBus.Subscribe( - context.Background(), - "TestEndBlockValidatorUpdates", - types.EventQueryValidatorSetUpdates, - ) + updatesSub, err := eventBus.SubscribeWithArgs(context.Background(), pubsub.SubscribeArgs{ + ClientID: "TestEndBlockValidatorUpdates", + Query: types.EventQueryValidatorSetUpdates, + }) require.NoError(t, err) block := sf.MakeBlock(state, 1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} pubkey := ed25519.GenPrivKey().PubKey() - pk, err := cryptoenc.PubKeyToProto(pubkey) + pk, err := encoding.PubKeyToProto(pubkey) require.NoError(t, err) app.ValidatorUpdates = []abci.ValidatorUpdate{ {PubKey: pk, Power: 10}, @@ -401,30 +415,30 @@ func TestEndBlockValidatorUpdates(t *testing.T) { } // test we threw an event - select { - case msg := <-updatesSub.Out(): - event, ok := msg.Data().(types.EventDataValidatorSetUpdates) - require.True(t, ok, "Expected event of type EventDataValidatorSetUpdates, got %T", msg.Data()) - if assert.NotEmpty(t, event.ValidatorUpdates) { - assert.Equal(t, pubkey, event.ValidatorUpdates[0].PubKey) - assert.EqualValues(t, 10, event.ValidatorUpdates[0].VotingPower) - } - case <-updatesSub.Canceled(): - t.Fatalf("updatesSub was canceled (reason: %v)", updatesSub.Err()) - case <-time.After(1 * time.Second): - t.Fatal("Did not receive EventValidatorSetUpdates within 1 sec.") + ctx, cancel = context.WithTimeout(ctx, 1*time.Second) + defer cancel() + msg, err := updatesSub.Next(ctx) + require.NoError(t, err) + event, ok := msg.Data().(types.EventDataValidatorSetUpdates) + require.True(t, ok, "Expected event of type EventDataValidatorSetUpdates, got %T", msg.Data()) + if assert.NotEmpty(t, event.ValidatorUpdates) { + assert.Equal(t, pubkey, event.ValidatorUpdates[0].PubKey) + assert.EqualValues(t, 10, event.ValidatorUpdates[0].VotingPower) } } // TestEndBlockValidatorUpdatesResultingInEmptySet checks that processing validator updates that // would result in empty set causes no panic, an error is raised and NextValidators is not updated func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + app := &testApp{} - cc := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(cc) - err := proxyApp.Start() + cc := abciclient.NewLocalCreator(app) + logger := log.TestingLogger() + proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, _ := makeState(1, 1) stateStore := sm.NewStore(stateDB) @@ -441,7 +455,7 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { block := sf.MakeBlock(state, 1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} - vp, err := cryptoenc.PubKeyToProto(state.Validators.Validators[0].PubKey) + vp, err := encoding.PubKeyToProto(state.Validators.Validators[0].PubKey) require.NoError(t, err) // Remove the only validator app.ValidatorUpdates = []abci.ValidatorUpdate{ diff --git a/state/export_test.go b/internal/state/export_test.go similarity index 100% rename from state/export_test.go rename to internal/state/export_test.go diff --git a/state/helpers_test.go b/internal/state/helpers_test.go similarity index 93% rename from state/helpers_test.go rename to internal/state/helpers_test.go index 8f54c8e45..acc8744d9 100644 --- a/state/helpers_test.go +++ b/internal/state/helpers_test.go @@ -8,18 +8,20 @@ import ( dbm "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/internal/test/factory" + "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/types" ) @@ -30,8 +32,8 @@ type paramsChangeTestCase struct { func newTestApp() proxy.AppConns { app := &testApp{} - cc := proxy.NewLocalClientCreator(app) - return proxy.NewAppConns(cc) + cc := abciclient.NewLocalCreator(app) + return proxy.NewAppConns(cc, log.NewNopLogger(), proxy.NopMetrics()) } func makeAndCommitGoodBlock( @@ -148,11 +150,11 @@ func makeHeaderPartsResponsesValPubKeyChange( // If the pubkey is new, remove the old and add the new. _, val := state.NextValidators.GetByIndex(0) if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) { - vPbPk, err := cryptoenc.PubKeyToProto(val.PubKey) + vPbPk, err := encoding.PubKeyToProto(val.PubKey) if err != nil { panic(err) } - pbPk, err := cryptoenc.PubKeyToProto(pubkey) + pbPk, err := encoding.PubKeyToProto(pubkey) if err != nil { panic(err) } @@ -181,7 +183,7 @@ func makeHeaderPartsResponsesValPowerChange( // If the pubkey is new, remove the old and add the new. _, val := state.NextValidators.GetByIndex(0) if val.VotingPower != power { - vPbPk, err := cryptoenc.PubKeyToProto(val.PubKey) + vPbPk, err := encoding.PubKeyToProto(val.PubKey) if err != nil { panic(err) } diff --git a/state/indexer/block/kv/kv.go b/internal/state/indexer/block/kv/kv.go similarity index 99% rename from state/indexer/block/kv/kv.go rename to internal/state/indexer/block/kv/kv.go index bc90eadf5..d52f06c96 100644 --- a/state/indexer/block/kv/kv.go +++ b/internal/state/indexer/block/kv/kv.go @@ -12,8 +12,8 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) diff --git a/state/indexer/block/kv/kv_test.go b/internal/state/indexer/block/kv/kv_test.go similarity index 94% rename from state/indexer/block/kv/kv_test.go rename to internal/state/indexer/block/kv/kv_test.go index 2a342f870..024df332c 100644 --- a/state/indexer/block/kv/kv_test.go +++ b/internal/state/indexer/block/kv/kv_test.go @@ -6,15 +6,16 @@ import ( "testing" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" + blockidxkv "github.com/tendermint/tendermint/internal/state/indexer/block/kv" "github.com/tendermint/tendermint/libs/pubsub/query" - blockidxkv "github.com/tendermint/tendermint/state/indexer/block/kv" "github.com/tendermint/tendermint/types" - db "github.com/tendermint/tm-db" ) func TestBlockIndexer(t *testing.T) { - store := db.NewPrefixDB(db.NewMemDB(), []byte("block_events")) + store := dbm.NewPrefixDB(dbm.NewMemDB(), []byte("block_events")) indexer := blockidxkv.New(store) require.NoError(t, indexer.Index(types.EventDataNewBlockHeader{ diff --git a/state/indexer/block/kv/util.go b/internal/state/indexer/block/kv/util.go similarity index 100% rename from state/indexer/block/kv/util.go rename to internal/state/indexer/block/kv/util.go diff --git a/state/indexer/block/null/null.go b/internal/state/indexer/block/null/null.go similarity index 91% rename from state/indexer/block/null/null.go rename to internal/state/indexer/block/null/null.go index d36d8680e..9b28d93bb 100644 --- a/state/indexer/block/null/null.go +++ b/internal/state/indexer/block/null/null.go @@ -4,8 +4,8 @@ import ( "context" "errors" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) diff --git a/state/indexer/doc.go b/internal/state/indexer/doc.go similarity index 100% rename from state/indexer/doc.go rename to internal/state/indexer/doc.go diff --git a/state/indexer/eventsink.go b/internal/state/indexer/eventsink.go similarity index 96% rename from state/indexer/eventsink.go rename to internal/state/indexer/eventsink.go index d7c9d10e0..dba50b6af 100644 --- a/state/indexer/eventsink.go +++ b/internal/state/indexer/eventsink.go @@ -16,7 +16,7 @@ const ( PSQL EventSinkType = "psql" ) -//go:generate ../../scripts/mockery_generate.sh EventSink +//go:generate ../../../scripts/mockery_generate.sh EventSink // EventSink interface is defined the APIs for the IndexerService to interact with the data store, // including the block/transaction indexing and the search functions. diff --git a/state/indexer/indexer.go b/internal/state/indexer/indexer.go similarity index 89% rename from state/indexer/indexer.go rename to internal/state/indexer/indexer.go index 24dc62d70..5c238e137 100644 --- a/state/indexer/indexer.go +++ b/internal/state/indexer/indexer.go @@ -41,26 +41,26 @@ type BlockIndexer interface { // Batch groups together multiple Index operations to be performed at the same time. // NOTE: Batch is NOT thread-safe and must not be modified after starting its execution. type Batch struct { - Ops []*abci.TxResult + Ops []*abci.TxResult + Pending int64 } // NewBatch creates a new Batch. func NewBatch(n int64) *Batch { - return &Batch{ - Ops: make([]*abci.TxResult, n), - } + return &Batch{Ops: make([]*abci.TxResult, n), Pending: n} } // Add or update an entry for the given result.Index. func (b *Batch) Add(result *abci.TxResult) error { - b.Ops[result.Index] = result + if b.Ops[result.Index] == nil { + b.Pending-- + b.Ops[result.Index] = result + } return nil } // Size returns the total number of operations inside the batch. -func (b *Batch) Size() int { - return len(b.Ops) -} +func (b *Batch) Size() int { return len(b.Ops) } // ErrorEmptyHash indicates empty hash var ErrorEmptyHash = errors.New("transaction hash cannot be empty") diff --git a/internal/state/indexer/indexer_service.go b/internal/state/indexer/indexer_service.go new file mode 100644 index 000000000..80c4adf02 --- /dev/null +++ b/internal/state/indexer/indexer_service.go @@ -0,0 +1,169 @@ +package indexer + +import ( + "context" + "time" + + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/types" +) + +// Service connects event bus, transaction and block indexers together in +// order to index transactions and blocks coming from the event bus. +type Service struct { + service.BaseService + + eventSinks []EventSink + eventBus *eventbus.EventBus + metrics *Metrics + + currentBlock struct { + header types.EventDataNewBlockHeader + height int64 + batch *Batch + } +} + +// NewService constructs a new indexer service from the given arguments. +func NewService(args ServiceArgs) *Service { + is := &Service{ + eventSinks: args.Sinks, + eventBus: args.EventBus, + metrics: args.Metrics, + } + if is.metrics == nil { + is.metrics = NopMetrics() + } + is.BaseService = *service.NewBaseService(args.Logger, "IndexerService", is) + return is +} + +// publish publishes a pubsub message to the service. The service blocks until +// the message has been fully processed. +func (is *Service) publish(msg pubsub.Message) error { + // Indexing has three states. Initially, no block is in progress (WAIT) and + // we expect a block header. Upon seeing a header, we are waiting for zero + // or more transactions (GATHER). Once all the expected transactions have + // been delivered (in some order), we are ready to index. After indexing a + // block, we revert to the WAIT state for the next block. + + if is.currentBlock.batch == nil { + // WAIT: Start a new block. + hdr := msg.Data().(types.EventDataNewBlockHeader) + is.currentBlock.header = hdr + is.currentBlock.height = hdr.Header.Height + is.currentBlock.batch = NewBatch(hdr.NumTxs) + + if hdr.NumTxs != 0 { + return nil + } + // If the block does not expect any transactions, fall through and index + // it immediately. This shouldn't happen, but this check ensures we do + // not get stuck if it does. + } + + curr := is.currentBlock.batch + if curr.Pending != 0 { + // GATHER: Accumulate a transaction into the current block's batch. + txResult := msg.Data().(types.EventDataTx).TxResult + if err := curr.Add(&txResult); err != nil { + is.Logger.Error("failed to add tx to batch", + "height", is.currentBlock.height, "index", txResult.Index, "err", err) + } + + // This may have been the last transaction in the batch, so fall through + // to check whether it is time to index. + } + + if curr.Pending == 0 { + // INDEX: We have all the transactions we expect for the current block. + for _, sink := range is.eventSinks { + start := time.Now() + if err := sink.IndexBlockEvents(is.currentBlock.header); err != nil { + is.Logger.Error("failed to index block header", + "height", is.currentBlock.height, "err", err) + } else { + is.metrics.BlockEventsSeconds.Observe(time.Since(start).Seconds()) + is.metrics.BlocksIndexed.Add(1) + is.Logger.Debug("indexed block", + "height", is.currentBlock.height, "sink", sink.Type()) + } + + if curr.Size() != 0 { + start := time.Now() + err := sink.IndexTxEvents(curr.Ops) + if err != nil { + is.Logger.Error("failed to index block txs", + "height", is.currentBlock.height, "err", err) + } else { + is.metrics.TxEventsSeconds.Observe(time.Since(start).Seconds()) + is.metrics.TransactionsIndexed.Add(float64(curr.Size())) + is.Logger.Debug("indexed txs", + "height", is.currentBlock.height, "sink", sink.Type()) + } + } + } + is.currentBlock.batch = nil // return to the WAIT state for the next block + } + + return nil +} + +// OnStart implements part of service.Service. It registers an observer for the +// indexer if the underlying event sinks support indexing. +// +// TODO(creachadair): Can we get rid of the "enabled" check? +func (is *Service) OnStart(ctx context.Context) error { + // If the event sinks support indexing, register an observer to capture + // block header data for the indexer. + if IndexingEnabled(is.eventSinks) { + err := is.eventBus.Observe(context.TODO(), is.publish, + types.EventQueryNewBlockHeader, types.EventQueryTx) + if err != nil { + return err + } + } + return nil +} + +// OnStop implements service.Service by closing the event sinks. +func (is *Service) OnStop() { + for _, sink := range is.eventSinks { + if err := sink.Stop(); err != nil { + is.Logger.Error("failed to close eventsink", "eventsink", sink.Type(), "err", err) + } + } +} + +// ServiceArgs are arguments for constructing a new indexer service. +type ServiceArgs struct { + Sinks []EventSink + EventBus *eventbus.EventBus + Metrics *Metrics + Logger log.Logger +} + +// KVSinkEnabled returns the given eventSinks is containing KVEventSink. +func KVSinkEnabled(sinks []EventSink) bool { + for _, sink := range sinks { + if sink.Type() == KV { + return true + } + } + + return false +} + +// IndexingEnabled returns the given eventSinks is supporting the indexing services. +func IndexingEnabled(sinks []EventSink) bool { + for _, sink := range sinks { + if sink.Type() == KV || sink.Type() == PSQL { + return true + } + } + + return false +} diff --git a/state/indexer/indexer_service_test.go b/internal/state/indexer/indexer_service_test.go similarity index 80% rename from state/indexer/indexer_service_test.go rename to internal/state/indexer/indexer_service_test.go index 4d12cc86f..879cf8820 100644 --- a/state/indexer/indexer_service_test.go +++ b/internal/state/indexer/indexer_service_test.go @@ -1,27 +1,30 @@ package indexer_test import ( + "context" "database/sql" "fmt" - "io/ioutil" "os" "testing" "time" "github.com/adlio/schema" - _ "github.com/lib/pq" - dockertest "github.com/ory/dockertest" + "github.com/ory/dockertest" "github.com/ory/dockertest/docker" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer/sink/kv" + "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" tmlog "github.com/tendermint/tendermint/libs/log" - indexer "github.com/tendermint/tendermint/state/indexer" - kv "github.com/tendermint/tendermint/state/indexer/sink/kv" - psql "github.com/tendermint/tendermint/state/indexer/sink/psql" "github.com/tendermint/tendermint/types" - db "github.com/tendermint/tm-db" + + // Register the Postgre database driver. + _ "github.com/lib/pq" ) var psqldb *sql.DB @@ -36,17 +39,24 @@ var ( dbName = "postgres" ) -func TestIndexerServiceIndexesBlocks(t *testing.T) { - // event bus - eventBus := types.NewEventBus() - eventBus.SetLogger(tmlog.TestingLogger()) - err := eventBus.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } +// NewIndexerService returns a new service instance. +func NewIndexerService(es []indexer.EventSink, eventBus *eventbus.EventBus) *indexer.Service { + return indexer.NewService(indexer.ServiceArgs{ + Sinks: es, + EventBus: eventBus, }) +} + +func TestIndexerServiceIndexesBlocks(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := tmlog.TestingLogger() + // event bus + eventBus := eventbus.NewDefault(logger) + err := eventBus.Start(ctx) + require.NoError(t, err) + t.Cleanup(eventBus.Wait) assert.False(t, indexer.KVSinkEnabled([]indexer.EventSink{})) assert.False(t, indexer.IndexingEnabled([]indexer.EventSink{})) @@ -55,20 +65,14 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { pool, err := setupDB(t) assert.Nil(t, err) - store := db.NewMemDB() + store := dbm.NewMemDB() eventSinks := []indexer.EventSink{kv.NewEventSink(store), pSink} assert.True(t, indexer.KVSinkEnabled(eventSinks)) assert.True(t, indexer.IndexingEnabled(eventSinks)) - service := indexer.NewIndexerService(eventSinks, eventBus) - service.SetLogger(tmlog.TestingLogger()) - err = service.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := service.Stop(); err != nil { - t.Error(err) - } - }) + service := NewIndexerService(eventSinks, eventBus) + require.NoError(t, service.Start(ctx)) + t.Cleanup(service.Wait) // publish block with txs err = eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ @@ -112,7 +116,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { func readSchema() ([]*schema.Migration, error) { filename := "./sink/psql/schema.sql" - contents, err := ioutil.ReadFile(filename) + contents, err := os.ReadFile(filename) if err != nil { return nil, fmt.Errorf("failed to read sql file from '%s': %w", filename, err) } diff --git a/internal/state/indexer/metrics.go b/internal/state/indexer/metrics.go new file mode 100644 index 000000000..aa64a4bb2 --- /dev/null +++ b/internal/state/indexer/metrics.go @@ -0,0 +1,73 @@ +package indexer + +import ( + "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics/discard" + + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +// MetricsSubsystem is a the subsystem label for the indexer package. +const MetricsSubsystem = "indexer" + +// Metrics contains metrics exposed by this package. +type Metrics struct { + // Latency for indexing block events. + BlockEventsSeconds metrics.Histogram + + // Latency for indexing transaction events. + TxEventsSeconds metrics.Histogram + + // Number of complete blocks indexed. + BlocksIndexed metrics.Counter + + // Number of transactions indexed. + TransactionsIndexed metrics.Counter +} + +// PrometheusMetrics returns Metrics build using Prometheus client library. +// Optionally, labels can be provided along with their values ("foo", +// "fooValue"). +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + BlockEventsSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_events_seconds", + Help: "Latency for indexing block events.", + }, labels).With(labelsAndValues...), + TxEventsSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "tx_events_seconds", + Help: "Latency for indexing transaction events.", + }, labels).With(labelsAndValues...), + BlocksIndexed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "blocks_indexed", + Help: "Number of complete blocks indexed.", + }, labels).With(labelsAndValues...), + TransactionsIndexed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "transactions_indexed", + Help: "Number of transactions indexed.", + }, labels).With(labelsAndValues...), + } +} + +// NopMetrics returns an indexer metrics stub that discards all samples. +func NopMetrics() *Metrics { + return &Metrics{ + BlockEventsSeconds: discard.NewHistogram(), + TxEventsSeconds: discard.NewHistogram(), + BlocksIndexed: discard.NewCounter(), + TransactionsIndexed: discard.NewCounter(), + } +} diff --git a/state/indexer/mocks/event_sink.go b/internal/state/indexer/mocks/event_sink.go similarity index 98% rename from state/indexer/mocks/event_sink.go rename to internal/state/indexer/mocks/event_sink.go index ce5b8ace5..98b32e935 100644 --- a/state/indexer/mocks/event_sink.go +++ b/internal/state/indexer/mocks/event_sink.go @@ -6,7 +6,7 @@ import ( context "context" mock "github.com/stretchr/testify/mock" - indexer "github.com/tendermint/tendermint/state/indexer" + indexer "github.com/tendermint/tendermint/internal/state/indexer" query "github.com/tendermint/tendermint/libs/pubsub/query" diff --git a/state/indexer/query_range.go b/internal/state/indexer/query_range.go similarity index 100% rename from state/indexer/query_range.go rename to internal/state/indexer/query_range.go diff --git a/state/indexer/sink/kv/kv.go b/internal/state/indexer/sink/kv/kv.go similarity index 78% rename from state/indexer/sink/kv/kv.go rename to internal/state/indexer/sink/kv/kv.go index 7d51640d8..fe7068a1b 100644 --- a/state/indexer/sink/kv/kv.go +++ b/internal/state/indexer/sink/kv/kv.go @@ -3,13 +3,14 @@ package kv import ( "context" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/indexer" - kvb "github.com/tendermint/tendermint/state/indexer/block/kv" - kvt "github.com/tendermint/tendermint/state/indexer/tx/kv" - "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tm-db" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/state/indexer" + kvb "github.com/tendermint/tendermint/internal/state/indexer/block/kv" + kvt "github.com/tendermint/tendermint/internal/state/indexer/tx/kv" + "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/types" ) var _ indexer.EventSink = (*EventSink)(nil) @@ -17,14 +18,16 @@ var _ indexer.EventSink = (*EventSink)(nil) // The EventSink is an aggregator for redirecting the call path of the tx/block kvIndexer. // For the implementation details please see the kv.go in the indexer/block and indexer/tx folder. type EventSink struct { - txi *kvt.TxIndex - bi *kvb.BlockerIndexer + txi *kvt.TxIndex + bi *kvb.BlockerIndexer + store dbm.DB } func NewEventSink(store dbm.DB) indexer.EventSink { return &EventSink{ - txi: kvt.NewTxIndex(store), - bi: kvb.New(store), + txi: kvt.NewTxIndex(store), + bi: kvb.New(store), + store: store, } } @@ -57,5 +60,5 @@ func (kves *EventSink) HasBlock(h int64) (bool, error) { } func (kves *EventSink) Stop() error { - return nil + return kves.store.Close() } diff --git a/state/indexer/sink/kv/kv_test.go b/internal/state/indexer/sink/kv/kv_test.go similarity index 95% rename from state/indexer/sink/kv/kv_test.go rename to internal/state/indexer/sink/kv/kv_test.go index a5d2dd81e..7d7552946 100644 --- a/state/indexer/sink/kv/kv_test.go +++ b/internal/state/indexer/sink/kv/kv_test.go @@ -5,29 +5,30 @@ import ( "fmt" "testing" + dbm "github.com/tendermint/tm-db" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/state/indexer" + kvtx "github.com/tendermint/tendermint/internal/state/indexer/tx/kv" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/indexer" - kvtx "github.com/tendermint/tendermint/state/indexer/tx/kv" "github.com/tendermint/tendermint/types" - db "github.com/tendermint/tm-db" ) func TestType(t *testing.T) { - kvSink := NewEventSink(db.NewMemDB()) + kvSink := NewEventSink(dbm.NewMemDB()) assert.Equal(t, indexer.KV, kvSink.Type()) } func TestStop(t *testing.T) { - kvSink := NewEventSink(db.NewMemDB()) + kvSink := NewEventSink(dbm.NewMemDB()) assert.Nil(t, kvSink.Stop()) } func TestBlockFuncs(t *testing.T) { - store := db.NewPrefixDB(db.NewMemDB(), []byte("block_events")) + store := dbm.NewPrefixDB(dbm.NewMemDB(), []byte("block_events")) indexer := NewEventSink(store) require.NoError(t, indexer.IndexBlockEvents(types.EventDataNewBlockHeader{ @@ -158,7 +159,7 @@ func TestBlockFuncs(t *testing.T) { } func TestTxSearchWithCancelation(t *testing.T) { - indexer := NewEventSink(db.NewMemDB()) + indexer := NewEventSink(dbm.NewMemDB()) txResult := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, @@ -180,7 +181,7 @@ func TestTxSearchWithCancelation(t *testing.T) { } func TestTxSearchDeprecatedIndexing(t *testing.T) { - esdb := db.NewMemDB() + esdb := dbm.NewMemDB() indexer := NewEventSink(esdb) // index tx using events indexing (composite key) @@ -260,7 +261,7 @@ func TestTxSearchDeprecatedIndexing(t *testing.T) { } func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { - indexer := NewEventSink(db.NewMemDB()) + indexer := NewEventSink(dbm.NewMemDB()) txResult := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, @@ -282,7 +283,7 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { } func TestTxSearchMultipleTxs(t *testing.T) { - indexer := NewEventSink(db.NewMemDB()) + indexer := NewEventSink(dbm.NewMemDB()) // indexed first, but bigger height (to test the order of transactions) txResult := txResultWithEvents([]abci.Event{ diff --git a/state/indexer/sink/null/null.go b/internal/state/indexer/sink/null/null.go similarity index 94% rename from state/indexer/sink/null/null.go rename to internal/state/indexer/sink/null/null.go index b5ad93ab4..f58142f21 100644 --- a/state/indexer/sink/null/null.go +++ b/internal/state/indexer/sink/null/null.go @@ -4,8 +4,8 @@ import ( "context" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) diff --git a/state/indexer/sink/null/null_test.go b/internal/state/indexer/sink/null/null_test.go similarity index 93% rename from state/indexer/sink/null/null_test.go rename to internal/state/indexer/sink/null/null_test.go index eef63fd6e..15b77dc55 100644 --- a/state/indexer/sink/null/null_test.go +++ b/internal/state/indexer/sink/null/null_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/types" ) diff --git a/state/indexer/sink/psql/psql.go b/internal/state/indexer/sink/psql/psql.go similarity index 99% rename from state/indexer/sink/psql/psql.go rename to internal/state/indexer/sink/psql/psql.go index e452ed406..4db6f4435 100644 --- a/state/indexer/sink/psql/psql.go +++ b/internal/state/indexer/sink/psql/psql.go @@ -11,8 +11,8 @@ import ( "github.com/gogo/protobuf/proto" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) diff --git a/state/indexer/sink/psql/psql_test.go b/internal/state/indexer/sink/psql/psql_test.go similarity index 97% rename from state/indexer/sink/psql/psql_test.go rename to internal/state/indexer/sink/psql/psql_test.go index e8a1ce833..650579f9b 100644 --- a/state/indexer/sink/psql/psql_test.go +++ b/internal/state/indexer/sink/psql/psql_test.go @@ -5,7 +5,6 @@ import ( "database/sql" "flag" "fmt" - "io/ioutil" "log" "os" "os/signal" @@ -19,7 +18,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/types" // Register the Postgres database driver. @@ -144,6 +143,9 @@ func TestType(t *testing.T) { } func TestIndexing(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + t.Run("IndexBlockEvents", func(t *testing.T) { indexer := &EventSink{store: testDB(), chainID: chainID} require.NoError(t, indexer.IndexBlockEvents(newTestBlockHeader())) @@ -155,7 +157,7 @@ func TestIndexing(t *testing.T) { verifyNotImplemented(t, "hasBlock", func() (bool, error) { return indexer.HasBlock(2) }) verifyNotImplemented(t, "block search", func() (bool, error) { - v, err := indexer.SearchBlockEvents(context.Background(), nil) + v, err := indexer.SearchBlockEvents(ctx, nil) return v != nil, err }) @@ -189,7 +191,7 @@ func TestIndexing(t *testing.T) { return txr != nil, err }) verifyNotImplemented(t, "tx search", func() (bool, error) { - txr, err := indexer.SearchTxEvents(context.Background(), nil) + txr, err := indexer.SearchTxEvents(ctx, nil) return txr != nil, err }) @@ -227,7 +229,7 @@ func newTestBlockHeader() types.EventDataNewBlockHeader { // readSchema loads the indexing database schema file func readSchema() ([]*schema.Migration, error) { const filename = "schema.sql" - contents, err := ioutil.ReadFile(filename) + contents, err := os.ReadFile(filename) if err != nil { return nil, fmt.Errorf("failed to read sql file from '%s': %w", filename, err) } diff --git a/state/indexer/sink/psql/schema.sql b/internal/state/indexer/sink/psql/schema.sql similarity index 100% rename from state/indexer/sink/psql/schema.sql rename to internal/state/indexer/sink/psql/schema.sql diff --git a/state/indexer/sink/sink.go b/internal/state/indexer/sink/sink.go similarity index 85% rename from state/indexer/sink/sink.go rename to internal/state/indexer/sink/sink.go index f9dfa54df..b4c41ec31 100644 --- a/state/indexer/sink/sink.go +++ b/internal/state/indexer/sink/sink.go @@ -5,10 +5,10 @@ import ( "strings" "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/state/indexer/sink/kv" - "github.com/tendermint/tendermint/state/indexer/sink/null" - "github.com/tendermint/tendermint/state/indexer/sink/psql" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer/sink/kv" + "github.com/tendermint/tendermint/internal/state/indexer/sink/null" + "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" ) // EventSinksFromConfig constructs a slice of indexer.EventSink using the provided diff --git a/state/indexer/tx/kv/kv.go b/internal/state/indexer/tx/kv/kv.go similarity index 99% rename from state/indexer/tx/kv/kv.go rename to internal/state/indexer/tx/kv/kv.go index 080dbce2c..f0550f8f3 100644 --- a/state/indexer/tx/kv/kv.go +++ b/internal/state/indexer/tx/kv/kv.go @@ -12,8 +12,8 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + indexer "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/pubsub/query" - indexer "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) diff --git a/state/indexer/tx/kv/kv_bench_test.go b/internal/state/indexer/tx/kv/kv_bench_test.go similarity index 95% rename from state/indexer/tx/kv/kv_bench_test.go rename to internal/state/indexer/tx/kv/kv_bench_test.go index 3f4e63ee1..e8504ebcc 100644 --- a/state/indexer/tx/kv/kv_bench_test.go +++ b/internal/state/indexer/tx/kv/kv_bench_test.go @@ -4,7 +4,7 @@ import ( "context" "crypto/rand" "fmt" - "io/ioutil" + "os" "testing" dbm "github.com/tendermint/tm-db" @@ -15,7 +15,7 @@ import ( ) func BenchmarkTxSearch(b *testing.B) { - dbDir, err := ioutil.TempDir("", "benchmark_tx_search_test") + dbDir, err := os.MkdirTemp("", "benchmark_tx_search_test") if err != nil { b.Errorf("failed to create temporary directory: %s", err) } diff --git a/state/indexer/tx/kv/kv_test.go b/internal/state/indexer/tx/kv/kv_test.go similarity index 96% rename from state/indexer/tx/kv/kv_test.go rename to internal/state/indexer/tx/kv/kv_test.go index dd63dd9a4..985d58f42 100644 --- a/state/indexer/tx/kv/kv_test.go +++ b/internal/state/indexer/tx/kv/kv_test.go @@ -3,25 +3,23 @@ package kv import ( "context" "fmt" - "io/ioutil" "os" "testing" "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - db "github.com/tendermint/tm-db" + dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/pubsub/query" tmrand "github.com/tendermint/tendermint/libs/rand" - indexer "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) func TestTxIndex(t *testing.T) { - txIndexer := NewTxIndex(db.NewMemDB()) + txIndexer := NewTxIndex(dbm.NewMemDB()) tx := types.Tx("HELLO WORLD") txResult := &abci.TxResult{ @@ -67,7 +65,7 @@ func TestTxIndex(t *testing.T) { } func TestTxSearch(t *testing.T) { - indexer := NewTxIndex(db.NewMemDB()) + indexer := NewTxIndex(dbm.NewMemDB()) txResult := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, @@ -147,7 +145,7 @@ func TestTxSearch(t *testing.T) { } func TestTxSearchWithCancelation(t *testing.T) { - indexer := NewTxIndex(db.NewMemDB()) + indexer := NewTxIndex(dbm.NewMemDB()) txResult := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, @@ -165,7 +163,7 @@ func TestTxSearchWithCancelation(t *testing.T) { } func TestTxSearchDeprecatedIndexing(t *testing.T) { - indexer := NewTxIndex(db.NewMemDB()) + indexer := NewTxIndex(dbm.NewMemDB()) // index tx using events indexing (composite key) txResult1 := txResultWithEvents([]abci.Event{ @@ -244,7 +242,7 @@ func TestTxSearchDeprecatedIndexing(t *testing.T) { } func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { - indexer := NewTxIndex(db.NewMemDB()) + indexer := NewTxIndex(dbm.NewMemDB()) txResult := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, @@ -266,7 +264,7 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { } func TestTxSearchMultipleTxs(t *testing.T) { - indexer := NewTxIndex(db.NewMemDB()) + indexer := NewTxIndex(dbm.NewMemDB()) // indexed first, but bigger height (to test the order of transactions) txResult := txResultWithEvents([]abci.Event{ @@ -335,11 +333,11 @@ func txResultWithEvents(events []abci.Event) *abci.TxResult { } func benchmarkTxIndex(txsCount int64, b *testing.B) { - dir, err := ioutil.TempDir("", "tx_index_db") + dir, err := os.MkdirTemp("", "tx_index_db") require.NoError(b, err) defer os.RemoveAll(dir) - store, err := db.NewDB("tx_index", "goleveldb", dir) + store, err := dbm.NewDB("tx_index", "goleveldb", dir) require.NoError(b, err) txIndexer := NewTxIndex(store) diff --git a/state/indexer/tx/kv/utils.go b/internal/state/indexer/tx/kv/utils.go similarity index 100% rename from state/indexer/tx/kv/utils.go rename to internal/state/indexer/tx/kv/utils.go diff --git a/state/indexer/tx/kv/utils_test.go b/internal/state/indexer/tx/kv/utils_test.go similarity index 100% rename from state/indexer/tx/kv/utils_test.go rename to internal/state/indexer/tx/kv/utils_test.go diff --git a/state/indexer/tx/null/null.go b/internal/state/indexer/tx/null/null.go similarity index 93% rename from state/indexer/tx/null/null.go rename to internal/state/indexer/tx/null/null.go index d92ed489e..0da7fc683 100644 --- a/state/indexer/tx/null/null.go +++ b/internal/state/indexer/tx/null/null.go @@ -5,8 +5,8 @@ import ( "errors" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/indexer" ) var _ indexer.TxIndexer = (*TxIndex)(nil) diff --git a/state/metrics.go b/internal/state/metrics.go similarity index 100% rename from state/metrics.go rename to internal/state/metrics.go diff --git a/state/mocks/block_store.go b/internal/state/mocks/block_store.go similarity index 100% rename from state/mocks/block_store.go rename to internal/state/mocks/block_store.go diff --git a/state/mocks/event_sink.go b/internal/state/mocks/event_sink.go similarity index 98% rename from state/mocks/event_sink.go rename to internal/state/mocks/event_sink.go index 749515ccf..b8a8fc464 100644 --- a/state/mocks/event_sink.go +++ b/internal/state/mocks/event_sink.go @@ -6,7 +6,7 @@ import ( context "context" mock "github.com/stretchr/testify/mock" - indexer "github.com/tendermint/tendermint/state/indexer" + indexer "github.com/tendermint/tendermint/internal/state/indexer" query "github.com/tendermint/tendermint/libs/pubsub/query" diff --git a/state/mocks/evidence_pool.go b/internal/state/mocks/evidence_pool.go similarity index 96% rename from state/mocks/evidence_pool.go rename to internal/state/mocks/evidence_pool.go index bb33547d2..8bf4a9b64 100644 --- a/state/mocks/evidence_pool.go +++ b/internal/state/mocks/evidence_pool.go @@ -4,8 +4,7 @@ package mocks import ( mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/state" - + state "github.com/tendermint/tendermint/internal/state" types "github.com/tendermint/tendermint/types" ) diff --git a/state/mocks/store.go b/internal/state/mocks/store.go similarity index 98% rename from state/mocks/store.go rename to internal/state/mocks/store.go index 750bf7f29..4452f9bec 100644 --- a/state/mocks/store.go +++ b/internal/state/mocks/store.go @@ -4,8 +4,7 @@ package mocks import ( mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/state" - + state "github.com/tendermint/tendermint/internal/state" tendermintstate "github.com/tendermint/tendermint/proto/tendermint/state" types "github.com/tendermint/tendermint/types" diff --git a/internal/state/rollback.go b/internal/state/rollback.go new file mode 100644 index 000000000..e78957b02 --- /dev/null +++ b/internal/state/rollback.go @@ -0,0 +1,104 @@ +package state + +import ( + "errors" + "fmt" + + "github.com/tendermint/tendermint/version" +) + +// Rollback overwrites the current Tendermint state (height n) with the most +// recent previous state (height n - 1). +// Note that this function does not affect application state. +func Rollback(bs BlockStore, ss Store) (int64, []byte, error) { + invalidState, err := ss.Load() + if err != nil { + return -1, nil, err + } + if invalidState.IsEmpty() { + return -1, nil, errors.New("no state found") + } + + height := bs.Height() + + // NOTE: persistence of state and blocks don't happen atomically. Therefore it is possible that + // when the user stopped the node the state wasn't updated but the blockstore was. In this situation + // we don't need to rollback any state and can just return early + if height == invalidState.LastBlockHeight+1 { + return invalidState.LastBlockHeight, invalidState.AppHash, nil + } + + // If the state store isn't one below nor equal to the blockstore height than this violates the + // invariant + if height != invalidState.LastBlockHeight { + return -1, nil, fmt.Errorf("statestore height (%d) is not one below or equal to blockstore height (%d)", + invalidState.LastBlockHeight, height) + } + + // state store height is equal to blockstore height. We're good to proceed with rolling back state + rollbackHeight := invalidState.LastBlockHeight + rollbackBlock := bs.LoadBlockMeta(rollbackHeight) + if rollbackBlock == nil { + return -1, nil, fmt.Errorf("block at height %d not found", rollbackHeight) + } + + previousValidatorSet, err := ss.LoadValidators(rollbackHeight - 1) + if err != nil { + return -1, nil, err + } + + previousParams, err := ss.LoadConsensusParams(rollbackHeight) + if err != nil { + return -1, nil, err + } + + valChangeHeight := invalidState.LastHeightValidatorsChanged + // this can only happen if the validator set changed since the last block + if valChangeHeight > rollbackHeight { + valChangeHeight = rollbackHeight + } + + paramsChangeHeight := invalidState.LastHeightConsensusParamsChanged + // this can only happen if params changed from the last block + if paramsChangeHeight > rollbackHeight { + paramsChangeHeight = rollbackHeight + } + + // build the new state from the old state and the prior block + rolledBackState := State{ + Version: Version{ + Consensus: version.Consensus{ + Block: version.BlockProtocol, + App: previousParams.Version.AppVersion, + }, + Software: version.TMVersion, + }, + // immutable fields + ChainID: invalidState.ChainID, + InitialHeight: invalidState.InitialHeight, + + LastBlockHeight: invalidState.LastBlockHeight - 1, + LastBlockID: rollbackBlock.Header.LastBlockID, + LastBlockTime: rollbackBlock.Header.Time, + + NextValidators: invalidState.Validators, + Validators: invalidState.LastValidators, + LastValidators: previousValidatorSet, + LastHeightValidatorsChanged: valChangeHeight, + + ConsensusParams: previousParams, + LastHeightConsensusParamsChanged: paramsChangeHeight, + + LastResultsHash: rollbackBlock.Header.LastResultsHash, + AppHash: rollbackBlock.Header.AppHash, + } + + // persist the new state. This overrides the invalid one. NOTE: this will also + // persist the validator set and consensus params over the existing structures, + // but both should be the same + if err := ss.Save(rolledBackState); err != nil { + return -1, nil, fmt.Errorf("failed to save rolled back state: %w", err) + } + + return rolledBackState.LastBlockHeight, rolledBackState.AppHash, nil +} diff --git a/internal/state/rollback_test.go b/internal/state/rollback_test.go new file mode 100644 index 000000000..f7ac7fe7e --- /dev/null +++ b/internal/state/rollback_test.go @@ -0,0 +1,133 @@ +package state_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/mocks" + "github.com/tendermint/tendermint/internal/test/factory" + "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/version" +) + +func TestRollback(t *testing.T) { + var ( + height int64 = 100 + appVersion uint64 = 10 + ) + blockStore := &mocks.BlockStore{} + stateStore := setupStateStore(t, height) + initialState, err := stateStore.Load() + require.NoError(t, err) + + height++ + block := &types.BlockMeta{ + Header: types.Header{ + Height: height, + AppHash: initialState.AppHash, + LastBlockID: initialState.LastBlockID, + LastResultsHash: initialState.LastResultsHash, + }, + } + blockStore.On("LoadBlockMeta", height).Return(block) + blockStore.On("Height").Return(height) + + // perform the rollback over a version bump + appVersion++ + newParams := types.DefaultConsensusParams() + newParams.Version.AppVersion = appVersion + newParams.Block.MaxBytes = 1000 + nextState := initialState.Copy() + nextState.LastBlockHeight = height + nextState.Version.Consensus.App = appVersion + nextState.LastBlockID = factory.MakeBlockID() + nextState.AppHash = factory.RandomHash() + nextState.LastValidators = initialState.Validators + nextState.Validators = initialState.NextValidators + nextState.NextValidators = initialState.NextValidators.CopyIncrementProposerPriority(1) + nextState.ConsensusParams = *newParams + nextState.LastHeightConsensusParamsChanged = height + 1 + nextState.LastHeightValidatorsChanged = height + 1 + + // update the state + require.NoError(t, stateStore.Save(nextState)) + + // rollback the state + rollbackHeight, rollbackHash, err := state.Rollback(blockStore, stateStore) + require.NoError(t, err) + require.EqualValues(t, int64(100), rollbackHeight) + require.EqualValues(t, initialState.AppHash, rollbackHash) + blockStore.AssertExpectations(t) + + // assert that we've recovered the prior state + loadedState, err := stateStore.Load() + require.NoError(t, err) + require.EqualValues(t, initialState, loadedState) +} + +func TestRollbackNoState(t *testing.T) { + stateStore := state.NewStore(dbm.NewMemDB()) + blockStore := &mocks.BlockStore{} + + _, _, err := state.Rollback(blockStore, stateStore) + require.Error(t, err) + require.Contains(t, err.Error(), "no state found") +} + +func TestRollbackNoBlocks(t *testing.T) { + const height = int64(100) + stateStore := setupStateStore(t, height) + blockStore := &mocks.BlockStore{} + blockStore.On("Height").Return(height) + blockStore.On("LoadBlockMeta", height).Return(nil) + + _, _, err := state.Rollback(blockStore, stateStore) + require.Error(t, err) + require.Contains(t, err.Error(), "block at height 100 not found") +} + +func TestRollbackDifferentStateHeight(t *testing.T) { + const height = int64(100) + stateStore := setupStateStore(t, height) + blockStore := &mocks.BlockStore{} + blockStore.On("Height").Return(height + 2) + + _, _, err := state.Rollback(blockStore, stateStore) + require.Error(t, err) + require.Equal(t, err.Error(), "statestore height (100) is not one below or equal to blockstore height (102)") +} + +func setupStateStore(t *testing.T, height int64) state.Store { + stateStore := state.NewStore(dbm.NewMemDB()) + valSet, _ := factory.ValidatorSet(5, 10) + + params := types.DefaultConsensusParams() + params.Version.AppVersion = 10 + + initialState := state.State{ + Version: state.Version{ + Consensus: version.Consensus{ + Block: version.BlockProtocol, + App: 10, + }, + Software: version.TMVersion, + }, + ChainID: factory.DefaultTestChainID, + InitialHeight: 10, + LastBlockID: factory.MakeBlockID(), + AppHash: factory.RandomHash(), + LastResultsHash: factory.RandomHash(), + LastBlockHeight: height, + LastValidators: valSet, + Validators: valSet.CopyIncrementProposerPriority(1), + NextValidators: valSet.CopyIncrementProposerPriority(2), + LastHeightValidatorsChanged: height + 1, + ConsensusParams: *params, + LastHeightConsensusParamsChanged: height + 1, + } + require.NoError(t, stateStore.Bootstrap(initialState)) + return stateStore +} diff --git a/state/services.go b/internal/state/services.go similarity index 94% rename from state/services.go rename to internal/state/services.go index c692d0b94..49388cc12 100644 --- a/state/services.go +++ b/internal/state/services.go @@ -9,7 +9,7 @@ import ( // NOTE: Interfaces used by RPC must be thread safe! //------------------------------------------------------ -//go:generate ../scripts/mockery_generate.sh BlockStore +//go:generate ../../scripts/mockery_generate.sh BlockStore //------------------------------------------------------ // blockstore @@ -38,7 +38,7 @@ type BlockStore interface { //----------------------------------------------------------------------------- // evidence pool -//go:generate ../scripts/mockery_generate.sh EvidencePool +//go:generate ../../scripts/mockery_generate.sh EvidencePool // EvidencePool defines the EvidencePool interface used by State. type EvidencePool interface { diff --git a/state/state.go b/internal/state/state.go similarity index 97% rename from state/state.go rename to internal/state/state.go index 5862162d1..6fd632ff9 100644 --- a/state/state.go +++ b/internal/state/state.go @@ -4,7 +4,7 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "os" "time" "github.com/gogo/protobuf/proto" @@ -64,6 +64,8 @@ func VersionFromProto(v tmstate.Version) Version { // Instead, use state.Copy() or updateState(...). // NOTE: not goroutine-safe. type State struct { + // FIXME: This can be removed as TMVersion is a constant, and version.Consensus should + // eventually be replaced by VersionParams in ConsensusParams Version Version // immutable @@ -193,8 +195,8 @@ func (state *State) ToProto() (*tmstate.State, error) { return sm, nil } -// StateFromProto takes a state proto message & returns the local state type -func StateFromProto(pb *tmstate.State) (*State, error) { //nolint:golint +// FromProto takes a state proto message & returns the local state type +func FromProto(pb *tmstate.State) (*State, error) { if pb == nil { return nil, errors.New("nil State") } @@ -321,7 +323,7 @@ func MakeGenesisStateFromFile(genDocFile string) (State, error) { // MakeGenesisDocFromFile reads and unmarshals genesis doc from the given file. func MakeGenesisDocFromFile(genDocFile string) (*types.GenesisDoc, error) { - genDocJSON, err := ioutil.ReadFile(genDocFile) + genDocJSON, err := os.ReadFile(genDocFile) if err != nil { return nil, fmt.Errorf("couldn't read GenesisDoc file: %v", err) } diff --git a/state/state_test.go b/internal/state/state_test.go similarity index 95% rename from state/state_test.go rename to internal/state/state_test.go index 99d45bb62..3f989536a 100644 --- a/state/state_test.go +++ b/internal/state/state_test.go @@ -12,36 +12,37 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" + sm "github.com/tendermint/tendermint/internal/state" + statefactory "github.com/tendermint/tendermint/internal/state/test/factory" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" - sm "github.com/tendermint/tendermint/state" - sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/types" ) // setupTestCase does setup common to all test cases. func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { - config := cfg.ResetTestRoot("state_") - dbType := dbm.BackendType(config.DBBackend) - stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) + cfg, err := config.ResetTestRoot("state_") + require.NoError(t, err) + + dbType := dbm.BackendType(cfg.DBBackend) + stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir()) require.NoError(t, err) stateStore := sm.NewStore(stateDB) state, err := stateStore.Load() require.NoError(t, err) require.Empty(t, state) - state, err = sm.MakeGenesisStateFromFile(config.GenesisFile()) + state, err = sm.MakeGenesisStateFromFile(cfg.GenesisFile()) assert.NoError(t, err) assert.NotNil(t, state) err = stateStore.Save(state) require.NoError(t, err) - tearDown := func(t *testing.T) { os.RemoveAll(config.RootDir) } + tearDown := func(t *testing.T) { os.RemoveAll(cfg.RootDir) } return tearDown, stateDB, state } @@ -106,7 +107,7 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { state.LastBlockHeight++ // Build mock responses. - block := sf.MakeBlock(state, 2, new(types.Commit)) + block := statefactory.MakeBlock(state, 2, new(types.Commit)) abciResponses := new(tmstate.ABCIResponses) dtxs := make([]*abci.ResponseDeliverTx, 2) @@ -114,7 +115,7 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { abciResponses.DeliverTxs[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Events: nil} abciResponses.DeliverTxs[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Events: nil} - pbpk, err := cryptoenc.PubKeyToProto(ed25519.GenPrivKey().PubKey()) + pbpk, err := encoding.PubKeyToProto(ed25519.GenPrivKey().PubKey()) require.NoError(t, err) abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{{PubKey: pbpk, Power: 10}}} @@ -309,6 +310,8 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { } func TestProposerFrequency(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // some explicit test cases testCases := []struct { @@ -369,7 +372,7 @@ func TestProposerFrequency(t *testing.T) { votePower := int64(mrand.Int()%maxPower) + 1 totalVotePower += votePower privVal := types.NewMockPV() - pubKey, err := privVal.GetPubKey(context.Background()) + pubKey, err := privVal.GetPubKey(ctx) require.NoError(t, err) val := types.NewValidator(pubKey, votePower) val.ProposerPriority = mrand.Int63() @@ -448,7 +451,7 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { // NewValidatorSet calls IncrementProposerPriority but uses on a copy of val1 assert.EqualValues(t, 0, val1.ProposerPriority) - block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) + block := statefactory.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} abciResponses := &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, @@ -465,7 +468,7 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { // add a validator val2PubKey := ed25519.GenPrivKey().PubKey() val2VotingPower := int64(100) - fvp, err := cryptoenc.PubKeyToProto(val2PubKey) + fvp, err := encoding.PubKeyToProto(val2PubKey) require.NoError(t, err) updateAddVal := abci.ValidatorUpdate{PubKey: fvp, Power: val2VotingPower} @@ -562,7 +565,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { // we only have one validator: assert.Equal(t, val1PubKey.Address(), state.Validators.Proposer.Address) - block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) + block := statefactory.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} // no updates: abciResponses := &tmstate.ABCIResponses{ @@ -583,7 +586,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { // add a validator with the same voting power as the first val2PubKey := ed25519.GenPrivKey().PubKey() - fvp, err := cryptoenc.PubKeyToProto(val2PubKey) + fvp, err := encoding.PubKeyToProto(val2PubKey) require.NoError(t, err) updateAddVal := abci.ValidatorUpdate{PubKey: fvp, Power: val1VotingPower} validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal}) @@ -749,7 +752,7 @@ func TestLargeGenesisValidator(t *testing.T) { validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) + block := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) @@ -769,7 +772,7 @@ func TestLargeGenesisValidator(t *testing.T) { // see: https://github.com/tendermint/tendermint/issues/2960 firstAddedValPubKey := ed25519.GenPrivKey().PubKey() firstAddedValVotingPower := int64(10) - fvp, err := cryptoenc.PubKeyToProto(firstAddedValPubKey) + fvp, err := encoding.PubKeyToProto(firstAddedValPubKey) require.NoError(t, err) firstAddedVal := abci.ValidatorUpdate{PubKey: fvp, Power: firstAddedValVotingPower} validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{firstAddedVal}) @@ -778,7 +781,7 @@ func TestLargeGenesisValidator(t *testing.T) { BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{firstAddedVal}}, } - block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) + block := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -793,7 +796,7 @@ func TestLargeGenesisValidator(t *testing.T) { validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block := sf.MakeBlock(lastState, lastState.LastBlockHeight+1, new(types.Commit)) + block := statefactory.MakeBlock(lastState, lastState.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedStateInner, err := sm.UpdateState(lastState, blockID, &block.Header, abciResponses, validatorUpdates) @@ -816,7 +819,7 @@ func TestLargeGenesisValidator(t *testing.T) { // add 10 validators with the same voting power as the one added directly after genesis: for i := 0; i < 10; i++ { addedPubKey := ed25519.GenPrivKey().PubKey() - ap, err := cryptoenc.PubKeyToProto(addedPubKey) + ap, err := encoding.PubKeyToProto(addedPubKey) require.NoError(t, err) addedVal := abci.ValidatorUpdate{PubKey: ap, Power: firstAddedValVotingPower} validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{addedVal}) @@ -826,7 +829,7 @@ func TestLargeGenesisValidator(t *testing.T) { BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{addedVal}}, } - block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) + block := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} state, err = sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -834,14 +837,14 @@ func TestLargeGenesisValidator(t *testing.T) { require.Equal(t, 10+2, len(state.NextValidators.Validators)) // remove genesis validator: - gp, err := cryptoenc.PubKeyToProto(genesisPubKey) + gp, err := encoding.PubKeyToProto(genesisPubKey) require.NoError(t, err) removeGenesisVal := abci.ValidatorUpdate{PubKey: gp, Power: 0} abciResponses = &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{removeGenesisVal}}, } - block = sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) + block = statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) @@ -862,7 +865,7 @@ func TestLargeGenesisValidator(t *testing.T) { } validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block = sf.MakeBlock(curState, curState.LastBlockHeight+1, new(types.Commit)) + block = statefactory.MakeBlock(curState, curState.LastBlockHeight+1, new(types.Commit)) blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} curState, err = sm.UpdateState(curState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -887,7 +890,7 @@ func TestLargeGenesisValidator(t *testing.T) { validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block := sf.MakeBlock(updatedState, updatedState.LastBlockHeight+1, new(types.Commit)) + block := statefactory.MakeBlock(updatedState, updatedState.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedState, err = sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) @@ -982,7 +985,7 @@ func TestStateMakeBlock(t *testing.T) { proposerAddress := state.Validators.GetProposer().Address stateVersion := state.Version.Consensus - block := sf.MakeBlock(state, 2, new(types.Commit)) + block := statefactory.MakeBlock(state, 2, new(types.Commit)) // test we set some fields assert.Equal(t, stateVersion, block.Version) @@ -1080,7 +1083,7 @@ func TestStateProto(t *testing.T) { assert.NoError(t, err, tt.testName) } - smt, err := sm.StateFromProto(pbs) + smt, err := sm.FromProto(pbs) if tt.expPass2 { require.NoError(t, err, tt.testName) require.Equal(t, tt.state, smt, tt.testName) diff --git a/state/store.go b/internal/state/store.go similarity index 97% rename from state/store.go rename to internal/state/store.go index 5ce11e47d..0f1d2b444 100644 --- a/state/store.go +++ b/internal/state/store.go @@ -11,7 +11,6 @@ import ( abci "github.com/tendermint/tendermint/abci/types" tmmath "github.com/tendermint/tendermint/libs/math" - tmos "github.com/tendermint/tendermint/libs/os" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -68,7 +67,7 @@ func init() { //---------------------- -//go:generate ../scripts/mockery_generate.sh Store +//go:generate ../../scripts/mockery_generate.sh Store // Store defines the state store interface // @@ -126,11 +125,10 @@ func (store dbStore) loadState(key []byte) (state State, err error) { err = proto.Unmarshal(buf, sp) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - tmos.Exit(fmt.Sprintf(`LoadState: Data has been corrupted or its spec has changed: - %v\n`, err)) + panic(fmt.Sprintf("data has been corrupted or its spec has changed: %+v", err)) } - sm, err := StateFromProto(sp) + sm, err := FromProto(sp) if err != nil { return state, err } @@ -424,8 +422,7 @@ func (store dbStore) LoadABCIResponses(height int64) (*tmstate.ABCIResponses, er err = abciResponses.Unmarshal(buf) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - tmos.Exit(fmt.Sprintf(`LoadABCIResponses: Data has been corrupted or its spec has - changed: %v\n`, err)) + panic(fmt.Sprintf("data has been corrupted or its spec has changed: %+v", err)) } // TODO: ensure that buf is completely read. @@ -544,8 +541,7 @@ func loadValidatorsInfo(db dbm.DB, height int64) (*tmstate.ValidatorsInfo, error err = v.Unmarshal(buf) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - tmos.Exit(fmt.Sprintf(`LoadValidators: Data has been corrupted or its spec has changed: - %v\n`, err)) + panic(fmt.Sprintf("data has been corrupted or its spec has changed: %+v", err)) } // TODO: ensure that buf is completely read. @@ -632,8 +628,7 @@ func (store dbStore) loadConsensusParamsInfo(height int64) (*tmstate.ConsensusPa paramsInfo := new(tmstate.ConsensusParamsInfo) if err = paramsInfo.Unmarshal(buf); err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - tmos.Exit(fmt.Sprintf(`LoadConsensusParams: Data has been corrupted or its spec has changed: - %v\n`, err)) + panic(fmt.Sprintf(`data has been corrupted or its spec has changed: %+v`, err)) } // TODO: ensure that buf is completely read. diff --git a/state/store_test.go b/internal/state/store_test.go similarity index 96% rename from state/store_test.go rename to internal/state/store_test.go index 8cce9cc83..0ba1faa33 100644 --- a/state/store_test.go +++ b/internal/state/store_test.go @@ -8,17 +8,16 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/test/factory" tmrand "github.com/tendermint/tendermint/libs/rand" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -103,13 +102,15 @@ func TestStoreLoadValidators(t *testing.T) { func BenchmarkLoadValidators(b *testing.B) { const valSetSize = 100 - config := cfg.ResetTestRoot("state_") - defer os.RemoveAll(config.RootDir) - dbType := dbm.BackendType(config.DBBackend) - stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) + cfg, err := config.ResetTestRoot("state_") + require.NoError(b, err) + + defer os.RemoveAll(cfg.RootDir) + dbType := dbm.BackendType(cfg.DBBackend) + stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir()) require.NoError(b, err) stateStore := sm.NewStore(stateDB) - state, err := sm.MakeGenesisStateFromFile(config.GenesisFile()) + state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) if err != nil { b.Fatal(err) } diff --git a/state/test/factory/block.go b/internal/state/test/factory/block.go similarity index 96% rename from state/test/factory/block.go rename to internal/state/test/factory/block.go index b4eb83fa7..dfcf5ebd9 100644 --- a/state/test/factory/block.go +++ b/internal/state/test/factory/block.go @@ -3,8 +3,8 @@ package factory import ( "time" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/test/factory" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) diff --git a/state/time.go b/internal/state/time.go similarity index 100% rename from state/time.go rename to internal/state/time.go diff --git a/state/time_test.go b/internal/state/time_test.go similarity index 100% rename from state/time_test.go rename to internal/state/time_test.go diff --git a/state/tx_filter.go b/internal/state/tx_filter.go similarity index 65% rename from state/tx_filter.go rename to internal/state/tx_filter.go index 61340e135..871e08ae6 100644 --- a/state/tx_filter.go +++ b/internal/state/tx_filter.go @@ -1,22 +1,22 @@ package state import ( - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/types" ) // TxPreCheck returns a function to filter transactions before processing. // The function limits the size of a transaction to the block's maximum data size. -func TxPreCheck(state State) mempl.PreCheckFunc { +func TxPreCheck(state State) mempool.PreCheckFunc { maxDataBytes := types.MaxDataBytesNoEvidence( state.ConsensusParams.Block.MaxBytes, state.Validators.Size(), ) - return mempl.PreCheckMaxBytes(maxDataBytes) + return mempool.PreCheckMaxBytes(maxDataBytes) } // TxPostCheck returns a function to filter transactions after processing. // The function limits the gas wanted by a transaction to the block's maximum total gas. -func TxPostCheck(state State) mempl.PostCheckFunc { - return mempl.PostCheckMaxGas(state.ConsensusParams.Block.MaxGas) +func TxPostCheck(state State) mempool.PostCheckFunc { + return mempool.PostCheckMaxGas(state.ConsensusParams.Block.MaxGas) } diff --git a/state/tx_filter_test.go b/internal/state/tx_filter_test.go similarity index 94% rename from state/tx_filter_test.go rename to internal/state/tx_filter_test.go index d6236fcbf..27af28a40 100644 --- a/state/tx_filter_test.go +++ b/internal/state/tx_filter_test.go @@ -6,8 +6,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + sm "github.com/tendermint/tendermint/internal/state" tmrand "github.com/tendermint/tendermint/libs/rand" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) diff --git a/state/validation.go b/internal/state/validation.go similarity index 100% rename from state/validation.go rename to internal/state/validation.go diff --git a/state/validation_test.go b/internal/state/validation_test.go similarity index 87% rename from state/validation_test.go rename to internal/state/validation_test.go index 151f2be61..65c0648d4 100644 --- a/state/validation_test.go +++ b/internal/state/validation_test.go @@ -8,29 +8,31 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" memmock "github.com/tendermint/tendermint/internal/mempool/mock" - "github.com/tendermint/tendermint/internal/test/factory" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/mocks" + statefactory "github.com/tendermint/tendermint/internal/state/test/factory" + "github.com/tendermint/tendermint/internal/store" + testfactory "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmtime "github.com/tendermint/tendermint/libs/time" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/mocks" - sf "github.com/tendermint/tendermint/state/test/factory" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) const validationTestsStopHeight int64 = 10 func TestValidateBlockHeader(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + proxyApp := newTestApp() - require.NoError(t, proxyApp.Start()) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests + require.NoError(t, proxyApp.Start(ctx)) state, stateDB, privVals := makeState(3, 1) stateStore := sm.NewStore(stateDB) @@ -90,7 +92,7 @@ func TestValidateBlockHeader(t *testing.T) { Invalid blocks don't pass */ for _, tc := range testCases { - block := sf.MakeBlock(state, height, lastCommit) + block := statefactory.MakeBlock(state, height, lastCommit) tc.malleateBlock(block) err := blockExec.ValidateBlock(state, block) t.Logf("%s: %v", tc.name, err) @@ -107,7 +109,7 @@ func TestValidateBlockHeader(t *testing.T) { } nextHeight := validationTestsStopHeight - block := sf.MakeBlock(state, nextHeight, lastCommit) + block := statefactory.MakeBlock(state, nextHeight, lastCommit) state.InitialHeight = nextHeight + 1 err := blockExec.ValidateBlock(state, block) require.Error(t, err, "expected an error when state is ahead of block") @@ -115,9 +117,11 @@ func TestValidateBlockHeader(t *testing.T) { } func TestValidateBlockCommit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + proxyApp := newTestApp() - require.NoError(t, proxyApp.Start()) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests + require.NoError(t, proxyApp.Start(ctx)) state, stateDB, privVals := makeState(1, 1) stateStore := sm.NewStore(stateDB) @@ -141,7 +145,7 @@ func TestValidateBlockCommit(t *testing.T) { #2589: ensure state.LastValidators.VerifyCommit fails here */ // should be height-1 instead of height - wrongHeightVote, err := factory.MakeVote( + wrongHeightVote, err := testfactory.MakeVote( privVals[proposerAddr.String()], chainID, 1, @@ -158,7 +162,7 @@ func TestValidateBlockCommit(t *testing.T) { state.LastBlockID, []types.CommitSig{wrongHeightVote.CommitSig()}, ) - block := sf.MakeBlock(state, height, wrongHeightCommit) + block := statefactory.MakeBlock(state, height, wrongHeightCommit) err = blockExec.ValidateBlock(state, block) _, isErrInvalidCommitHeight := err.(types.ErrInvalidCommitHeight) require.True(t, isErrInvalidCommitHeight, "expected ErrInvalidCommitHeight at height %d but got: %v", height, err) @@ -166,7 +170,7 @@ func TestValidateBlockCommit(t *testing.T) { /* #2589: test len(block.LastCommit.Signatures) == state.LastValidators.Size() */ - block = sf.MakeBlock(state, height, wrongSigsCommit) + block = statefactory.MakeBlock(state, height, wrongSigsCommit) err = blockExec.ValidateBlock(state, block) _, isErrInvalidCommitSignatures := err.(types.ErrInvalidCommitSignatures) require.True(t, isErrInvalidCommitSignatures, @@ -195,7 +199,7 @@ func TestValidateBlockCommit(t *testing.T) { /* wrongSigsCommit is fine except for the extra bad precommit */ - goodVote, err := factory.MakeVote( + goodVote, err := testfactory.MakeVote( privVals[proposerAddr.String()], chainID, 1, @@ -207,7 +211,7 @@ func TestValidateBlockCommit(t *testing.T) { ) require.NoError(t, err, "height %d", height) - bpvPubKey, err := badPrivVal.GetPubKey(context.Background()) + bpvPubKey, err := badPrivVal.GetPubKey(ctx) require.NoError(t, err) badVote := &types.Vote{ @@ -223,9 +227,9 @@ func TestValidateBlockCommit(t *testing.T) { g := goodVote.ToProto() b := badVote.ToProto() - err = badPrivVal.SignVote(context.Background(), chainID, g) + err = badPrivVal.SignVote(ctx, chainID, g) require.NoError(t, err, "height %d", height) - err = badPrivVal.SignVote(context.Background(), chainID, b) + err = badPrivVal.SignVote(ctx, chainID, b) require.NoError(t, err, "height %d", height) goodVote.Signature, badVote.Signature = g.Signature, b.Signature @@ -236,9 +240,11 @@ func TestValidateBlockCommit(t *testing.T) { } func TestValidateBlockEvidence(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + proxyApp := newTestApp() - require.NoError(t, proxyApp.Start()) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests + require.NoError(t, proxyApp.Start(ctx)) state, stateDB, privVals := makeState(4, 1) stateStore := sm.NewStore(stateDB) @@ -270,7 +276,7 @@ func TestValidateBlockEvidence(t *testing.T) { A block with too much evidence fails */ evidence := make([]types.Evidence, 0) - var currentBytes int64 = 0 + var currentBytes int64 // more bytes than the maximum allowed for evidence for currentBytes <= maxBytesEvidence { newEv := types.NewMockDuplicateVoteEvidenceWithValidator(height, time.Now(), @@ -278,7 +284,7 @@ func TestValidateBlockEvidence(t *testing.T) { evidence = append(evidence, newEv) currentBytes += int64(len(newEv.Bytes())) } - block, _ := state.MakeBlock(height, factory.MakeTenTxs(height), lastCommit, evidence, proposerAddr) + block, _ := state.MakeBlock(height, testfactory.MakeTenTxs(height), lastCommit, evidence, proposerAddr) err := blockExec.ValidateBlock(state, block) if assert.Error(t, err) { _, ok := err.(*types.ErrEvidenceOverflow) @@ -290,7 +296,7 @@ func TestValidateBlockEvidence(t *testing.T) { A good block with several pieces of good evidence passes */ evidence := make([]types.Evidence, 0) - var currentBytes int64 = 0 + var currentBytes int64 // precisely the amount of allowed evidence for { newEv := types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime, diff --git a/internal/statesync/block_queue.go b/internal/statesync/block_queue.go index 56ed3c376..80b0ffbd5 100644 --- a/internal/statesync/block_queue.go +++ b/internal/statesync/block_queue.go @@ -200,7 +200,7 @@ func (q *blockQueue) retry(height int64) { // Success is called when a light block has been successfully verified and // processed -func (q *blockQueue) success(height int64) { +func (q *blockQueue) success() { q.mtx.Lock() defer q.mtx.Unlock() if q.terminal != nil && q.verifyHeight == q.terminal.Height { diff --git a/internal/statesync/block_queue_test.go b/internal/statesync/block_queue_test.go index cc4bad134..fc11b824b 100644 --- a/internal/statesync/block_queue_test.go +++ b/internal/statesync/block_queue_test.go @@ -58,7 +58,7 @@ loop: // assert that the queue serializes the blocks require.Equal(t, resp.block.Height, trackingHeight) trackingHeight-- - queue.success(resp.block.Height) + queue.success() } } @@ -105,7 +105,7 @@ func TestBlockQueueWithFailures(t *testing.T) { queue.retry(resp.block.Height) } else { trackingHeight-- - queue.success(resp.block.Height) + queue.success() } case <-queue.done(): @@ -223,7 +223,7 @@ func TestBlockQueueStopTime(t *testing.T) { // assert that the queue serializes the blocks assert.Equal(t, resp.block.Height, trackingHeight) trackingHeight-- - queue.success(resp.block.Height) + queue.success() case <-queue.done(): wg.Wait() @@ -268,7 +268,7 @@ loop: case resp := <-queue.verifyNext(): require.GreaterOrEqual(t, resp.block.Height, initialHeight) - queue.success(resp.block.Height) + queue.success() } } } diff --git a/internal/statesync/chunks.go b/internal/statesync/chunks.go index 590f128da..2075adae5 100644 --- a/internal/statesync/chunks.go +++ b/internal/statesync/chunks.go @@ -3,7 +3,6 @@ package statesync import ( "errors" "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -42,7 +41,7 @@ type chunkQueue struct { // newChunkQueue creates a new chunk queue for a snapshot, using a temp dir for storage. // Callers must call Close() when done. func newChunkQueue(snapshot *snapshot, tempDir string) (*chunkQueue, error) { - dir, err := ioutil.TempDir(tempDir, "tm-statesync") + dir, err := os.MkdirTemp(tempDir, "tm-statesync") if err != nil { return nil, fmt.Errorf("unable to create temp dir for state sync chunks: %w", err) } @@ -87,7 +86,7 @@ func (q *chunkQueue) Add(chunk *chunk) (bool, error) { } path := filepath.Join(q.dir, strconv.FormatUint(uint64(chunk.Index), 10)) - err := ioutil.WriteFile(path, chunk.Chunk, 0600) + err := os.WriteFile(path, chunk.Chunk, 0600) if err != nil { return false, fmt.Errorf("failed to save chunk %v to file %v: %w", chunk.Index, path, err) } @@ -229,7 +228,7 @@ func (q *chunkQueue) load(index uint32) (*chunk, error) { return nil, nil } - body, err := ioutil.ReadFile(path) + body, err := os.ReadFile(path) if err != nil { return nil, fmt.Errorf("failed to load chunk %v: %w", index, err) } @@ -355,3 +354,16 @@ func (q *chunkQueue) WaitFor(index uint32) <-chan uint32 { return ch } + +func (q *chunkQueue) numChunksReturned() int { + q.Lock() + defer q.Unlock() + + cnt := 0 + for _, b := range q.chunkReturned { + if b { + cnt++ + } + } + return cnt +} diff --git a/internal/statesync/chunks_test.go b/internal/statesync/chunks_test.go index ad7f19b3b..c3604df9d 100644 --- a/internal/statesync/chunks_test.go +++ b/internal/statesync/chunks_test.go @@ -1,7 +1,6 @@ package statesync import ( - "io/ioutil" "os" "testing" @@ -36,20 +35,20 @@ func TestNewChunkQueue_TempDir(t *testing.T) { Hash: []byte{7}, Metadata: nil, } - dir, err := ioutil.TempDir("", "newchunkqueue") + dir, err := os.MkdirTemp("", "newchunkqueue") require.NoError(t, err) defer os.RemoveAll(dir) queue, err := newChunkQueue(snapshot, dir) require.NoError(t, err) - files, err := ioutil.ReadDir(dir) + files, err := os.ReadDir(dir) require.NoError(t, err) assert.Len(t, files, 1) err = queue.Close() require.NoError(t, err) - files, err = ioutil.ReadDir(dir) + files, err = os.ReadDir(dir) require.NoError(t, err) assert.Len(t, files, 0) } @@ -421,15 +420,7 @@ func TestChunkQueue_Retry(t *testing.T) { queue, teardown := setupChunkQueue(t) defer teardown() - // Allocate and add all chunks to the queue - for i := uint32(0); i < queue.Size(); i++ { - _, err := queue.Allocate() - require.NoError(t, err) - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: i, Chunk: []byte{byte(i)}}) - require.NoError(t, err) - _, err = queue.Next() - require.NoError(t, err) - } + allocateAddChunksToQueue(t, queue) // Retrying a couple of chunks makes Next() return them, but they are not allocatable queue.Retry(3) @@ -454,15 +445,7 @@ func TestChunkQueue_RetryAll(t *testing.T) { queue, teardown := setupChunkQueue(t) defer teardown() - // Allocate and add all chunks to the queue - for i := uint32(0); i < queue.Size(); i++ { - _, err := queue.Allocate() - require.NoError(t, err) - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: i, Chunk: []byte{byte(i)}}) - require.NoError(t, err) - _, err = queue.Next() - require.NoError(t, err) - } + allocateAddChunksToQueue(t, queue) _, err := queue.Next() assert.Equal(t, errDone, err) @@ -552,3 +535,29 @@ func TestChunkQueue_WaitFor(t *testing.T) { _, ok = <-w assert.False(t, ok) } + +func TestNumChunkReturned(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + assert.EqualValues(t, 5, queue.Size()) + + allocateAddChunksToQueue(t, queue) + assert.EqualValues(t, 5, queue.numChunksReturned()) + + err := queue.Close() + require.NoError(t, err) +} + +// Allocate and add all chunks to the queue +func allocateAddChunksToQueue(t *testing.T, q *chunkQueue) { + t.Helper() + for i := uint32(0); i < q.Size(); i++ { + _, err := q.Allocate() + require.NoError(t, err) + _, err = q.Add(&chunk{Height: 3, Format: 1, Index: i, Chunk: []byte{byte(i)}}) + require.NoError(t, err) + _, err = q.Next() + require.NoError(t, err) + } +} diff --git a/internal/statesync/dispatcher.go b/internal/statesync/dispatcher.go index 37010986f..844cb5e32 100644 --- a/internal/statesync/dispatcher.go +++ b/internal/statesync/dispatcher.go @@ -9,7 +9,7 @@ import ( "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/light/provider" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - proto "github.com/tendermint/tendermint/proto/tendermint/types" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -109,7 +109,7 @@ func (d *Dispatcher) dispatch(peer types.NodeID, height int64) (chan *types.Ligh // Respond allows the underlying process which receives requests on the // requestCh to respond with the respective light block. A nil response is used to // represent that the receiver of the request does not have a light block at that height. -func (d *Dispatcher) Respond(lb *proto.LightBlock, peer types.NodeID) error { +func (d *Dispatcher) Respond(lb *tmproto.LightBlock, peer types.NodeID) error { d.mtx.Lock() defer d.mtx.Unlock() @@ -297,3 +297,16 @@ func (l *peerList) All() []types.NodeID { defer l.mtx.Unlock() return l.peers } + +func (l *peerList) Contains(id types.NodeID) bool { + l.mtx.Lock() + defer l.mtx.Unlock() + + for _, p := range l.peers { + if id == p { + return true + } + } + + return false +} diff --git a/internal/statesync/dispatcher_test.go b/internal/statesync/dispatcher_test.go index e5a6a85cd..e717dad12 100644 --- a/internal/statesync/dispatcher_test.go +++ b/internal/statesync/dispatcher_test.go @@ -114,6 +114,10 @@ func TestDispatcherProviders(t *testing.T) { func TestPeerListBasic(t *testing.T) { t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peerList := newPeerList() assert.Zero(t, peerList.Len()) numPeers := 10 @@ -199,6 +203,9 @@ func TestEmptyPeerListReturnsWhenContextCanceled(t *testing.T) { func TestPeerListConcurrent(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peerList := newPeerList() numPeers := 10 @@ -229,7 +236,6 @@ func TestPeerListConcurrent(t *testing.T) { // we use a context with cancel and a separate go routine to wait for all // the other goroutines to close. - ctx, cancel := context.WithCancel(context.Background()) go func() { wg.Wait(); cancel() }() select { diff --git a/internal/statesync/metrics.go b/internal/statesync/metrics.go new file mode 100644 index 000000000..fb134f580 --- /dev/null +++ b/internal/statesync/metrics.go @@ -0,0 +1,91 @@ +package statesync + +import ( + "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics/discard" + "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +const ( + // MetricsSubsystem is a subsystem shared by all metrics exposed by this package. + MetricsSubsystem = "statesync" +) + +// Metrics contains metrics exposed by this package. +type Metrics struct { + TotalSnapshots metrics.Counter + ChunkProcessAvgTime metrics.Gauge + SnapshotHeight metrics.Gauge + SnapshotChunk metrics.Counter + SnapshotChunkTotal metrics.Gauge + BackFilledBlocks metrics.Counter + BackFillBlocksTotal metrics.Gauge +} + +// PrometheusMetrics returns Metrics build using Prometheus client library. +// Optionally, labels can be provided along with their values ("foo", +// "fooValue"). +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + TotalSnapshots: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "total_snapshots", + Help: "The total number of snapshots discovered.", + }, labels).With(labelsAndValues...), + ChunkProcessAvgTime: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "chunk_process_avg_time", + Help: "The average processing time per chunk.", + }, labels).With(labelsAndValues...), + SnapshotHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "snapshot_height", + Help: "The height of the current snapshot the has been processed.", + }, labels).With(labelsAndValues...), + SnapshotChunk: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "snapshot_chunk", + Help: "The current number of chunks that have been processed.", + }, labels).With(labelsAndValues...), + SnapshotChunkTotal: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "snapshot_chunks_total", + Help: "The total number of chunks in the current snapshot.", + }, labels).With(labelsAndValues...), + BackFilledBlocks: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "backfilled_blocks", + Help: "The current number of blocks that have been back-filled.", + }, labels).With(labelsAndValues...), + BackFillBlocksTotal: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "backfilled_blocks_total", + Help: "The total number of blocks that need to be back-filled.", + }, labels).With(labelsAndValues...), + } +} + +// NopMetrics returns no-op Metrics. +func NopMetrics() *Metrics { + return &Metrics{ + TotalSnapshots: discard.NewCounter(), + ChunkProcessAvgTime: discard.NewGauge(), + SnapshotHeight: discard.NewGauge(), + SnapshotChunk: discard.NewCounter(), + SnapshotChunkTotal: discard.NewGauge(), + BackFilledBlocks: discard.NewCounter(), + BackFillBlocksTotal: discard.NewGauge(), + } +} diff --git a/internal/statesync/mocks/Metricer.go b/internal/statesync/mocks/Metricer.go new file mode 100644 index 000000000..c4721b304 --- /dev/null +++ b/internal/statesync/mocks/Metricer.go @@ -0,0 +1,112 @@ +// Code generated by mockery 2.9.4. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// Metricer is an autogenerated mock type for the Metricer type +type Metricer struct { + mock.Mock +} + +// BackFillBlocksTotal provides a mock function with given fields: +func (_m *Metricer) BackFillBlocksTotal() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// BackFilledBlocks provides a mock function with given fields: +func (_m *Metricer) BackFilledBlocks() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// ChunkProcessAvgTime provides a mock function with given fields: +func (_m *Metricer) ChunkProcessAvgTime() time.Duration { + ret := _m.Called() + + var r0 time.Duration + if rf, ok := ret.Get(0).(func() time.Duration); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Duration) + } + + return r0 +} + +// SnapshotChunksCount provides a mock function with given fields: +func (_m *Metricer) SnapshotChunksCount() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// SnapshotChunksTotal provides a mock function with given fields: +func (_m *Metricer) SnapshotChunksTotal() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// SnapshotHeight provides a mock function with given fields: +func (_m *Metricer) SnapshotHeight() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// TotalSnapshots provides a mock function with given fields: +func (_m *Metricer) TotalSnapshots() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} diff --git a/internal/statesync/mocks/state_provider.go b/internal/statesync/mocks/state_provider.go index 538c619fc..b8d681631 100644 --- a/internal/statesync/mocks/state_provider.go +++ b/internal/statesync/mocks/state_provider.go @@ -6,7 +6,7 @@ import ( context "context" mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/state" + state "github.com/tendermint/tendermint/internal/state" types "github.com/tendermint/tendermint/types" ) diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 6c0d26812..6566f823b 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -14,74 +14,20 @@ import ( "github.com/tendermint/tendermint/config" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/light" "github.com/tendermint/tendermint/light/provider" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) var ( _ service.Service = (*Reactor)(nil) _ p2p.Wrapper = (*ssproto.Message)(nil) - - // ChannelShims contains a map of ChannelDescriptorShim objects, where each - // object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding - // p2p proto.Message the new p2p Channel is responsible for handling. - // - // - // TODO: Remove once p2p refactor is complete. - // ref: https://github.com/tendermint/tendermint/issues/5670 - ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - SnapshotChannel: { - MsgType: new(ssproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(SnapshotChannel), - Priority: 6, - SendQueueCapacity: 10, - RecvMessageCapacity: snapshotMsgSize, - RecvBufferCapacity: 128, - MaxSendBytes: 400, - }, - }, - ChunkChannel: { - MsgType: new(ssproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(ChunkChannel), - Priority: 3, - SendQueueCapacity: 4, - RecvMessageCapacity: chunkMsgSize, - RecvBufferCapacity: 128, - MaxSendBytes: 400, - }, - }, - LightBlockChannel: { - MsgType: new(ssproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(LightBlockChannel), - Priority: 5, - SendQueueCapacity: 10, - RecvMessageCapacity: lightBlockMsgSize, - RecvBufferCapacity: 128, - MaxSendBytes: 400, - }, - }, - ParamsChannel: { - MsgType: new(ssproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(ParamsChannel), - Priority: 2, - SendQueueCapacity: 10, - RecvMessageCapacity: paramMsgSize, - RecvBufferCapacity: 128, - MaxSendBytes: 400, - }, - }, - } ) const ( @@ -125,6 +71,57 @@ const ( maxLightBlockRequestRetries = 20 ) +func GetChannelDescriptors() []*p2p.ChannelDescriptor { + return []*p2p.ChannelDescriptor{ + { + + ID: SnapshotChannel, + MessageType: new(ssproto.Message), + Priority: 6, + SendQueueCapacity: 10, + RecvMessageCapacity: snapshotMsgSize, + RecvBufferCapacity: 128, + }, + { + ID: ChunkChannel, + Priority: 3, + MessageType: new(ssproto.Message), + SendQueueCapacity: 4, + RecvMessageCapacity: chunkMsgSize, + RecvBufferCapacity: 128, + }, + { + ID: LightBlockChannel, + MessageType: new(ssproto.Message), + Priority: 5, + SendQueueCapacity: 10, + RecvMessageCapacity: lightBlockMsgSize, + RecvBufferCapacity: 128, + }, + { + ID: ParamsChannel, + MessageType: new(ssproto.Message), + Priority: 2, + SendQueueCapacity: 10, + RecvMessageCapacity: paramMsgSize, + RecvBufferCapacity: 128, + }, + } + +} + +// Metricer defines an interface used for the rpc sync info query, please see statesync.metrics +// for the details. +type Metricer interface { + TotalSnapshots() int64 + ChunkProcessAvgTime() time.Duration + SnapshotHeight() int64 + SnapshotChunksCount() int64 + SnapshotChunksTotal() int64 + BackFilledBlocks() int64 + BackFillBlocksTotal() int64 +} + // Reactor handles state sync, both restoring snapshots for the local node and // serving snapshots for other nodes. type Reactor struct { @@ -158,6 +155,10 @@ type Reactor struct { syncer *syncer providers map[types.NodeID]*BlockProvider stateProvider StateProvider + + metrics *Metrics + backfillBlockTotal int64 + backfilledBlocks int64 } // NewReactor returns a reference to a new state sync reactor, which implements @@ -176,6 +177,7 @@ func NewReactor( stateStore sm.Store, blockStore *store.BlockStore, tempDir string, + ssMetrics *Metrics, ) *Reactor { r := &Reactor{ chainID: chainID, @@ -195,6 +197,7 @@ func NewReactor( peers: newPeerList(), dispatcher: NewDispatcher(blockCh.Out), providers: make(map[types.NodeID]*BlockProvider), + metrics: ssMetrics, } r.BaseService = *service.NewBaseService(logger, "StateSync", r) @@ -207,15 +210,11 @@ func NewReactor( // handle individual envelopes as to not have to deal with bounding workers or pools. // The caller must be sure to execute OnStop to ensure the outbound p2p Channels are // closed. No error is returned. -func (r *Reactor) OnStart() error { - go r.processSnapshotCh() - - go r.processChunkCh() - - go r.processBlockCh() - - go r.processParamsCh() - +func (r *Reactor) OnStart(ctx context.Context) error { + go r.processCh(r.snapshotCh, "snapshot") + go r.processCh(r.chunkCh, "chunk") + go r.processCh(r.blockCh, "light block") + go r.processCh(r.paramsCh, "consensus params") go r.processPeerUpdates() return nil @@ -236,11 +235,11 @@ func (r *Reactor) OnStop() { // Wait for all p2p Channels to be closed before returning. This ensures we // can easily reason about synchronization of all p2p Channels and ensure no // panics will occur. + <-r.peerUpdates.Done() <-r.snapshotCh.Done() <-r.chunkCh.Done() <-r.blockCh.Done() <-r.paramsCh.Done() - <-r.peerUpdates.Done() } // Sync runs a state sync, fetching snapshots and providing chunks to the @@ -251,7 +250,10 @@ func (r *Reactor) OnStop() { func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { // We need at least two peers (for cross-referencing of light blocks) before we can // begin state sync - r.waitForEnoughPeers(ctx, 2) + if err := r.waitForEnoughPeers(ctx, 2); err != nil { + return sm.State{}, err + } + r.mtx.Lock() if r.syncer != nil { r.mtx.Unlock() @@ -259,6 +261,7 @@ func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { } if err := r.initStateProvider(ctx, r.chainID, r.initialHeight); err != nil { + r.mtx.Unlock() return sm.State{}, err } @@ -270,7 +273,9 @@ func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { r.stateProvider, r.snapshotCh.Out, r.chunkCh.Out, + r.snapshotCh.Done(), r.tempDir, + r.metrics, ) r.mtx.Unlock() defer func() { @@ -283,10 +288,16 @@ func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { requestSnapshotsHook := func() { // request snapshots from all currently connected peers - r.snapshotCh.Out <- p2p.Envelope{ + msg := p2p.Envelope{ Broadcast: true, Message: &ssproto.SnapshotsRequest{}, } + + select { + case <-ctx.Done(): + case <-r.closeCh: + case r.snapshotCh.Out <- msg: + } } state, commit, err := r.syncer.SyncAny(ctx, r.cfg.DiscoveryTime, requestSnapshotsHook) @@ -347,6 +358,9 @@ func (r *Reactor) backfill( r.Logger.Info("starting backfill process...", "startHeight", startHeight, "stopHeight", stopHeight, "stopTime", stopTime, "trustedBlockID", trustedBlockID) + r.backfillBlockTotal = startHeight - stopHeight + 1 + r.metrics.BackFillBlocksTotal.Set(float64(r.backfillBlockTotal)) + const sleepTime = 1 * time.Second var ( lastValidatorSet *types.ValidatorSet @@ -476,11 +490,21 @@ func (r *Reactor) backfill( } trustedBlockID = resp.block.LastBlockID - queue.success(resp.block.Height) + queue.success() r.Logger.Info("backfill: verified and stored light block", "height", resp.block.Height) lastValidatorSet = resp.block.ValidatorSet + r.backfilledBlocks++ + r.metrics.BackFilledBlocks.Add(1) + + // The block height might be less than the stopHeight because of the stopTime condition + // hasn't been fulfilled. + if resp.block.Height < stopHeight { + r.backfillBlockTotal++ + r.metrics.BackFillBlocksTotal.Set(float64(r.backfillBlockTotal)) + } + case <-queue.done(): if err := queue.error(); err != nil { return err @@ -579,7 +603,7 @@ func (r *Reactor) handleChunkMessage(envelope p2p.Envelope) error { "chunk", msg.Index, "peer", envelope.From, ) - resp, err := r.conn.LoadSnapshotChunkSync(context.Background(), abci.RequestLoadSnapshotChunk{ + resp, err := r.conn.LoadSnapshotChunkSync(context.TODO(), abci.RequestLoadSnapshotChunk{ Height: msg.Height, Format: msg.Format, Chunk: msg.Index, @@ -691,7 +715,7 @@ func (r *Reactor) handleLightBlockMessage(envelope p2p.Envelope) error { } case *ssproto.LightBlockResponse: - var height int64 = 0 + var height int64 if msg.LightBlock != nil { height = msg.LightBlock.SignedHeader.Header.Height } @@ -736,7 +760,8 @@ func (r *Reactor) handleParamsMessage(envelope p2p.Envelope) error { if sp, ok := r.stateProvider.(*stateProviderP2P); ok { select { case sp.paramsRecvCh <- cp: - default: + case <-time.After(time.Second): + return errors.New("failed to send consensus params, stateprovider not ready for response") } } else { r.Logger.Debug("received unexpected params response; using RPC state provider", "peer", envelope.From) @@ -786,28 +811,6 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err return err } -// processSnapshotCh initiates a blocking process where we listen for and handle -// envelopes on the SnapshotChannel. -func (r *Reactor) processSnapshotCh() { - r.processCh(r.snapshotCh, "snapshot") -} - -// processChunkCh initiates a blocking process where we listen for and handle -// envelopes on the ChunkChannel. -func (r *Reactor) processChunkCh() { - r.processCh(r.chunkCh, "chunk") -} - -// processBlockCh initiates a blocking process where we listen for and handle -// envelopes on the LightBlockChannel. -func (r *Reactor) processBlockCh() { - r.processCh(r.blockCh, "light block") -} - -func (r *Reactor) processParamsCh() { - r.processCh(r.paramsCh, "consensus params") -} - // processCh routes state sync messages to their respective handlers. Any error // encountered during message execution will result in a PeerError being sent on // the respective channel. When the reactor is stopped, we will catch the signal @@ -819,8 +822,11 @@ func (r *Reactor) processCh(ch *p2p.Channel, chName string) { select { case envelope := <-ch.In: if err := r.handleMessage(ch.ID, envelope); err != nil { - r.Logger.Error(fmt.Sprintf("failed to process %s message", chName), - "ch_id", ch.ID, "envelope", envelope, "err", err) + r.Logger.Error("failed to process message", + "err", err, + "channel", chName, + "ch_id", ch.ID, + "envelope", envelope) ch.Error <- p2p.PeerError{ NodeID: envelope.From, Err: err, @@ -828,7 +834,7 @@ func (r *Reactor) processCh(ch *p2p.Channel, chName string) { } case <-r.closeCh: - r.Logger.Debug(fmt.Sprintf("stopped listening on %s channel; closing...", chName)) + r.Logger.Debug("channel closed", "channel", chName) return } } @@ -847,17 +853,20 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { } r.mtx.Lock() + defer r.mtx.Unlock() if r.syncer == nil { - r.mtx.Unlock() return } - defer r.mtx.Unlock() switch peerUpdate.Status { case p2p.PeerStatusUp: newProvider := NewBlockProvider(peerUpdate.NodeID, r.chainID, r.dispatcher) r.providers[peerUpdate.NodeID] = newProvider - r.syncer.AddPeer(peerUpdate.NodeID) + err := r.syncer.AddPeer(peerUpdate.NodeID) + if err != nil { + r.Logger.Error("error adding peer to syncer", "error", err) + return + } if sp, ok := r.stateProvider.(*stateProviderP2P); ok { // we do this in a separate routine to not block whilst waiting for the light client to finish // whatever call it's currently executing @@ -891,7 +900,7 @@ func (r *Reactor) processPeerUpdates() { // recentSnapshots fetches the n most recent snapshots from the app func (r *Reactor) recentSnapshots(n uint32) ([]*snapshot, error) { - resp, err := r.conn.ListSnapshotsSync(context.Background(), abci.RequestListSnapshots{}) + resp, err := r.conn.ListSnapshotsSync(context.TODO(), abci.RequestListSnapshots{}) if err != nil { return nil, err } @@ -960,19 +969,35 @@ func (r *Reactor) fetchLightBlock(height uint64) (*types.LightBlock, error) { }, nil } -func (r *Reactor) waitForEnoughPeers(ctx context.Context, numPeers int) { - t := time.NewTicker(200 * time.Millisecond) +func (r *Reactor) waitForEnoughPeers(ctx context.Context, numPeers int) error { + startAt := time.Now() + t := time.NewTicker(100 * time.Millisecond) defer t.Stop() - for { + logT := time.NewTicker(time.Minute) + defer logT.Stop() + var iter int + for r.peers.Len() < numPeers { + iter++ select { case <-ctx.Done(): - return + return fmt.Errorf("operation canceled while waiting for peers after %.2fs [%d/%d]", + time.Since(startAt).Seconds(), r.peers.Len(), numPeers) + case <-r.closeCh: + return fmt.Errorf("shutdown while waiting for peers after %.2fs [%d/%d]", + time.Since(startAt).Seconds(), r.peers.Len(), numPeers) case <-t.C: - if r.peers.Len() >= numPeers { - return - } + continue + case <-logT.C: + r.Logger.Info("waiting for sufficient peers to start statesync", + "duration", time.Since(startAt).String(), + "target", numPeers, + "peers", r.peers.Len(), + "iters", iter, + ) + continue } } + return nil } func (r *Reactor) initStateProvider(ctx context.Context, chainID string, initialHeight int64) error { @@ -987,6 +1012,10 @@ func (r *Reactor) initStateProvider(ctx context.Context, chainID string, initial "trustHeight", to.Height, "useP2P", r.cfg.UseP2P) if r.cfg.UseP2P { + if err := r.waitForEnoughPeers(ctx, 2); err != nil { + return err + } + peers := r.peers.All() providers := make([]provider.Provider, len(peers)) for idx, p := range peers { @@ -1005,3 +1034,66 @@ func (r *Reactor) initStateProvider(ctx context.Context, chainID string, initial } return nil } + +func (r *Reactor) TotalSnapshots() int64 { + r.mtx.RLock() + defer r.mtx.RUnlock() + + if r.syncer != nil && r.syncer.snapshots != nil { + return int64(len(r.syncer.snapshots.snapshots)) + } + return 0 +} + +func (r *Reactor) ChunkProcessAvgTime() time.Duration { + r.mtx.RLock() + defer r.mtx.RUnlock() + + if r.syncer != nil { + return time.Duration(r.syncer.avgChunkTime) + } + return time.Duration(0) +} + +func (r *Reactor) SnapshotHeight() int64 { + r.mtx.RLock() + defer r.mtx.RUnlock() + + if r.syncer != nil { + return r.syncer.lastSyncedSnapshotHeight + } + return 0 +} +func (r *Reactor) SnapshotChunksCount() int64 { + r.mtx.RLock() + defer r.mtx.RUnlock() + + if r.syncer != nil && r.syncer.chunks != nil { + return int64(r.syncer.chunks.numChunksReturned()) + } + return 0 +} + +func (r *Reactor) SnapshotChunksTotal() int64 { + r.mtx.RLock() + defer r.mtx.RUnlock() + + if r.syncer != nil && r.syncer.processingSnapshot != nil { + return int64(r.syncer.processingSnapshot.Chunks) + } + return 0 +} + +func (r *Reactor) BackFilledBlocks() int64 { + r.mtx.RLock() + defer r.mtx.RUnlock() + + return r.backfilledBlocks +} + +func (r *Reactor) BackFillBlocksTotal() int64 { + r.mtx.RLock() + defer r.mtx.RUnlock() + + return r.backfillBlockTotal +} diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index 6906e7c0c..206f2b246 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -16,19 +16,23 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/proxy" + proxymocks "github.com/tendermint/tendermint/internal/proxy/mocks" + smmocks "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/internal/statesync/mocks" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/light/provider" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/proxy" - proxymocks "github.com/tendermint/tendermint/proxy/mocks" - smmocks "github.com/tendermint/tendermint/state/mocks" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) +var ( + m = PrometheusMetrics(config.TestConfig().Instrumentation.Namespace) +) + type reactorTestSuite struct { reactor *Reactor syncer *syncer @@ -65,6 +69,7 @@ type reactorTestSuite struct { } func setup( + ctx context.Context, t *testing.T, conn *proxymocks.AppConnSnapshot, connQuery *proxymocks.AppConnQuery, @@ -156,6 +161,7 @@ func setup( rts.stateStore, rts.blockStore, "", + m, ) rts.syncer = newSyncer( @@ -166,14 +172,16 @@ func setup( stateProvider, rts.snapshotOutCh, rts.chunkOutCh, + rts.snapshotChannel.Done(), "", + rts.reactor.metrics, ) - require.NoError(t, rts.reactor.Start()) + require.NoError(t, rts.reactor.Start(ctx)) require.True(t, rts.reactor.IsRunning()) t.Cleanup(func() { - require.NoError(t, rts.reactor.Stop()) + rts.reactor.Wait() require.False(t, rts.reactor.IsRunning()) }) @@ -181,8 +189,11 @@ func setup( } func TestReactor_Sync(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + const snapshotHeight = 7 - rts := setup(t, nil, nil, nil, 2) + rts := setup(ctx, t, nil, nil, nil, 2) chain := buildLightBlockChain(t, 1, 10, time.Now()) // app accepts any snapshot rts.conn.On("OfferSnapshotSync", ctx, mock.AnythingOfType("types.RequestOfferSnapshot")). @@ -193,7 +204,7 @@ func TestReactor_Sync(t *testing.T) { Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) // app query returns valid state app hash - rts.connQuery.On("InfoSync", ctx, proxy.RequestInfo).Return(&abci.ResponseInfo{ + rts.connQuery.On("InfoSync", mock.Anything, proxy.RequestInfo).Return(&abci.ResponseInfo{ AppVersion: 9, LastBlockHeight: snapshotHeight, LastBlockAppHash: chain[snapshotHeight+1].AppHash, @@ -206,7 +217,7 @@ func TestReactor_Sync(t *testing.T) { closeCh := make(chan struct{}) defer close(closeCh) - go handleLightBlockRequests(t, chain, rts.blockOutCh, + go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) go graduallyAddPeers(rts.peerUpdateCh, closeCh, 1*time.Second) go handleSnapshotRequests(t, rts.snapshotOutCh, rts.snapshotInCh, closeCh, []snapshot{ @@ -219,7 +230,7 @@ func TestReactor_Sync(t *testing.T) { go handleChunkRequests(t, rts.chunkOutCh, rts.chunkInCh, closeCh, []byte("abc")) - go handleConsensusParamsRequest(t, rts.paramsOutCh, rts.paramsInCh, closeCh) + go handleConsensusParamsRequest(ctx, t, rts.paramsOutCh, rts.paramsInCh, closeCh) // update the config to use the p2p provider rts.reactor.cfg.UseP2P = true @@ -228,12 +239,15 @@ func TestReactor_Sync(t *testing.T) { rts.reactor.cfg.DiscoveryTime = 1 * time.Second // Run state sync - _, err := rts.reactor.Sync(context.Background()) + _, err := rts.reactor.Sync(ctx) require.NoError(t, err) } func TestReactor_ChunkRequest_InvalidRequest(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, nil, 2) rts.chunkInCh <- p2p.Envelope{ From: types.NodeID("aa"), @@ -275,19 +289,23 @@ func TestReactor_ChunkRequest(t *testing.T) { }, } - for name, tc := range testcases { - tc := tc + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + for name, tc := range testcases { t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(bctx) + defer cancel() + // mock ABCI connection to return local snapshots conn := &proxymocks.AppConnSnapshot{} - conn.On("LoadSnapshotChunkSync", context.Background(), abci.RequestLoadSnapshotChunk{ + conn.On("LoadSnapshotChunkSync", mock.Anything, abci.RequestLoadSnapshotChunk{ Height: tc.request.Height, Format: tc.request.Format, Chunk: tc.request.Index, }).Return(&abci.ResponseLoadSnapshotChunk{Chunk: tc.chunk}, nil) - rts := setup(t, conn, nil, nil, 2) + rts := setup(ctx, t, conn, nil, nil, 2) rts.chunkInCh <- p2p.Envelope{ From: types.NodeID("aa"), @@ -304,7 +322,10 @@ func TestReactor_ChunkRequest(t *testing.T) { } func TestReactor_SnapshotsRequest_InvalidRequest(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, nil, 2) rts.snapshotInCh <- p2p.Envelope{ From: types.NodeID("aa"), @@ -353,18 +374,23 @@ func TestReactor_SnapshotsRequest(t *testing.T) { }, }, } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // mock ABCI connection to return local snapshots conn := &proxymocks.AppConnSnapshot{} - conn.On("ListSnapshotsSync", context.Background(), abci.RequestListSnapshots{}).Return(&abci.ResponseListSnapshots{ + conn.On("ListSnapshotsSync", mock.Anything, abci.RequestListSnapshots{}).Return(&abci.ResponseListSnapshots{ Snapshots: tc.snapshots, }, nil) - rts := setup(t, conn, nil, nil, 100) + rts := setup(ctx, t, conn, nil, nil, 100) rts.snapshotInCh <- p2p.Envelope{ From: types.NodeID("aa"), @@ -372,7 +398,7 @@ func TestReactor_SnapshotsRequest(t *testing.T) { } if len(tc.expectResponses) > 0 { - retryUntil(t, func() bool { return len(rts.snapshotOutCh) == len(tc.expectResponses) }, time.Second) + retryUntil(ctx, t, func() bool { return len(rts.snapshotOutCh) == len(tc.expectResponses) }, time.Second) } responses := make([]*ssproto.SnapshotsResponse, len(tc.expectResponses)) @@ -388,7 +414,10 @@ func TestReactor_SnapshotsRequest(t *testing.T) { } func TestReactor_LightBlockResponse(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, nil, 2) var height int64 = 10 h := factory.MakeRandomHeader() @@ -441,7 +470,10 @@ func TestReactor_LightBlockResponse(t *testing.T) { } func TestReactor_BlockProviders(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, nil, 2) rts.peerUpdateCh <- p2p.PeerUpdate{ NodeID: types.NodeID("aa"), Status: p2p.PeerStatusUp, @@ -455,7 +487,7 @@ func TestReactor_BlockProviders(t *testing.T) { defer close(closeCh) chain := buildLightBlockChain(t, 1, 10, time.Now()) - go handleLightBlockRequests(t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) + go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) peers := rts.reactor.peers.All() require.Len(t, peers, 2) @@ -472,7 +504,7 @@ func TestReactor_BlockProviders(t *testing.T) { go func(t *testing.T, p provider.Provider) { defer wg.Done() for height := 2; height < 10; height++ { - lb, err := p.LightBlock(context.Background(), int64(height)) + lb, err := p.LightBlock(ctx, int64(height)) require.NoError(t, err) require.NotNil(t, lb) require.Equal(t, height, int(lb.Height)) @@ -480,7 +512,6 @@ func TestReactor_BlockProviders(t *testing.T) { }(t, p) } - ctx, cancel := context.WithCancel(context.Background()) go func() { wg.Wait(); cancel() }() select { @@ -494,7 +525,10 @@ func TestReactor_BlockProviders(t *testing.T) { } func TestReactor_StateProviderP2P(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, nil, 2) // make syncer non nil else test won't think we are state syncing rts.reactor.syncer = rts.syncer peerA := types.NodeID(strings.Repeat("a", 2*types.NodeIDByteLength)) @@ -512,29 +546,42 @@ func TestReactor_StateProviderP2P(t *testing.T) { defer close(closeCh) chain := buildLightBlockChain(t, 1, 10, time.Now()) - go handleLightBlockRequests(t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) - go handleConsensusParamsRequest(t, rts.paramsOutCh, rts.paramsInCh, closeCh) + go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) + go handleConsensusParamsRequest(ctx, t, rts.paramsOutCh, rts.paramsInCh, closeCh) rts.reactor.cfg.UseP2P = true rts.reactor.cfg.TrustHeight = 1 rts.reactor.cfg.TrustHash = fmt.Sprintf("%X", chain[1].Hash()) - ctx := context.Background() + + for _, p := range []types.NodeID{peerA, peerB} { + if !rts.reactor.peers.Contains(p) { + rts.reactor.peers.Append(p) + } + } + require.True(t, rts.reactor.peers.Len() >= 2, "peer network not configured") + + ictx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + rts.reactor.mtx.Lock() - err := rts.reactor.initStateProvider(ctx, factory.DefaultTestChainID, 1) + err := rts.reactor.initStateProvider(ictx, factory.DefaultTestChainID, 1) rts.reactor.mtx.Unlock() require.NoError(t, err) rts.reactor.syncer.stateProvider = rts.reactor.stateProvider - appHash, err := rts.reactor.stateProvider.AppHash(ctx, 5) + actx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + appHash, err := rts.reactor.stateProvider.AppHash(actx, 5) require.NoError(t, err) require.Len(t, appHash, 32) - state, err := rts.reactor.stateProvider.State(ctx, 5) + state, err := rts.reactor.stateProvider.State(actx, 5) require.NoError(t, err) require.Equal(t, appHash, state.AppHash) require.Equal(t, types.DefaultConsensusParams(), &state.ConsensusParams) - commit, err := rts.reactor.stateProvider.Commit(ctx, 5) + commit, err := rts.reactor.stateProvider.Commit(actx, 5) require.NoError(t, err) require.Equal(t, commit.BlockID, state.LastBlockID) @@ -546,13 +593,19 @@ func TestReactor_StateProviderP2P(t *testing.T) { } func TestReactor_Backfill(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // test backfill algorithm with varying failure rates [0, 10] failureRates := []int{0, 2, 9} for _, failureRate := range failureRates { failureRate := failureRate t.Run(fmt.Sprintf("failure rate: %d", failureRate), func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + t.Cleanup(leaktest.CheckTimeout(t, 1*time.Minute)) - rts := setup(t, nil, nil, nil, 21) + rts := setup(ctx, t, nil, nil, nil, 21) var ( startHeight int64 = 20 @@ -582,11 +635,11 @@ func TestReactor_Backfill(t *testing.T) { closeCh := make(chan struct{}) defer close(closeCh) - go handleLightBlockRequests(t, chain, rts.blockOutCh, + go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, failureRate) err := rts.reactor.backfill( - context.Background(), + ctx, factory.DefaultTestChainID, startHeight, stopHeight, @@ -596,6 +649,9 @@ func TestReactor_Backfill(t *testing.T) { ) if failureRate > 3 { require.Error(t, err) + + require.NotEqual(t, rts.reactor.backfilledBlocks, rts.reactor.backfillBlockTotal) + require.Equal(t, startHeight-stopHeight+1, rts.reactor.backfillBlockTotal) } else { require.NoError(t, err) @@ -606,15 +662,20 @@ func TestReactor_Backfill(t *testing.T) { require.Nil(t, rts.blockStore.LoadBlockMeta(stopHeight-1)) require.Nil(t, rts.blockStore.LoadBlockMeta(startHeight+1)) + + require.Equal(t, startHeight-stopHeight+1, rts.reactor.backfilledBlocks) + require.Equal(t, startHeight-stopHeight+1, rts.reactor.backfillBlockTotal) } + require.Equal(t, rts.reactor.backfilledBlocks, rts.reactor.BackFilledBlocks()) + require.Equal(t, rts.reactor.backfillBlockTotal, rts.reactor.BackFillBlocksTotal()) }) } } // retryUntil will continue to evaluate fn and will return successfully when true // or fail when the timeout is reached. -func retryUntil(t *testing.T, fn func() bool, timeout time.Duration) { - ctx, cancel := context.WithTimeout(context.Background(), timeout) +func retryUntil(ctx context.Context, t *testing.T, fn func() bool, timeout time.Duration) { + ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() for { @@ -625,7 +686,9 @@ func retryUntil(t *testing.T, fn func() bool, timeout time.Duration) { } } -func handleLightBlockRequests(t *testing.T, +func handleLightBlockRequests( + ctx context.Context, + t *testing.T, chain map[int64]*types.LightBlock, receiving chan p2p.Envelope, sending chan p2p.Envelope, @@ -635,6 +698,8 @@ func handleLightBlockRequests(t *testing.T, errorCount := 0 for { select { + case <-ctx.Done(): + return case envelope := <-receiving: if msg, ok := envelope.Message.(*ssproto.LightBlockRequest); ok { if requests%10 >= failureRate { @@ -678,13 +743,24 @@ func handleLightBlockRequests(t *testing.T, } } -func handleConsensusParamsRequest(t *testing.T, receiving, sending chan p2p.Envelope, closeCh chan struct{}) { +func handleConsensusParamsRequest( + ctx context.Context, + t *testing.T, + receiving, sending chan p2p.Envelope, + closeCh chan struct{}, +) { t.Helper() params := types.DefaultConsensusParams() paramsProto := params.ToProto() for { select { + case <-ctx.Done(): + return case envelope := <-receiving: + if ctx.Err() != nil { + return + } + t.Log("received consensus params request") msg, ok := envelope.Message.(*ssproto.ParamsRequest) require.True(t, ok) diff --git a/internal/statesync/stateprovider.go b/internal/statesync/stateprovider.go index b58cb35de..b622824cd 100644 --- a/internal/statesync/stateprovider.go +++ b/internal/statesync/stateprovider.go @@ -12,6 +12,7 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/light" lightprovider "github.com/tendermint/tendermint/light/provider" @@ -20,7 +21,6 @@ import ( lightdb "github.com/tendermint/tendermint/light/store/db" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" rpchttp "github.com/tendermint/tendermint/rpc/client/http" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) @@ -337,9 +337,39 @@ func (s *stateProviderP2P) addProvider(p lightprovider.Provider) { } } -// consensusParams sends out a request for consensus params blocking until one is returned. -// If it fails to get a valid set of consensus params from any of the providers it returns an error. +// consensusParams sends out a request for consensus params blocking +// until one is returned. +// +// If it fails to get a valid set of consensus params from any of the +// providers it returns an error; however, it will retry indefinitely +// (with backoff) until the context is canceled. func (s *stateProviderP2P) consensusParams(ctx context.Context, height int64) (types.ConsensusParams, error) { + iterCount := 0 + for { + params, err := s.tryGetConsensusParamsFromWitnesses(ctx, height) + if err != nil { + return types.ConsensusParams{}, err + } + if params != nil { + return *params, nil + } + iterCount++ + + select { + case <-ctx.Done(): + return types.ConsensusParams{}, ctx.Err() + case <-time.After(time.Duration(iterCount) * consensusParamsResponseTimeout): + } + } +} + +// tryGetConsensusParamsFromWitnesses attempts to get consensus +// parameters from the light clients available witnesses. If both +// return parameters are nil, then it can be retried. +func (s *stateProviderP2P) tryGetConsensusParamsFromWitnesses( + ctx context.Context, + height int64, +) (*types.ConsensusParams, error) { for _, provider := range s.lc.Witnesses() { p, ok := provider.(*BlockProvider) if !ok { @@ -349,7 +379,7 @@ func (s *stateProviderP2P) consensusParams(ctx context.Context, height int64) (t // extract the nodeID of the provider peer, err := types.NewNodeID(p.String()) if err != nil { - return types.ConsensusParams{}, fmt.Errorf("invalid provider (%s) node id: %w", p.String(), err) + return nil, fmt.Errorf("invalid provider (%s) node id: %w", p.String(), err) } select { @@ -360,7 +390,7 @@ func (s *stateProviderP2P) consensusParams(ctx context.Context, height int64) (t }, }: case <-ctx.Done(): - return types.ConsensusParams{}, ctx.Err() + return nil, ctx.Err() } select { @@ -368,13 +398,15 @@ func (s *stateProviderP2P) consensusParams(ctx context.Context, height int64) (t case <-time.After(consensusParamsResponseTimeout): continue case <-ctx.Done(): - return types.ConsensusParams{}, ctx.Err() + return nil, ctx.Err() case params, ok := <-s.paramsRecvCh: if !ok { - return types.ConsensusParams{}, errors.New("params channel closed") + return nil, errors.New("params channel closed") } - return params, nil + return ¶ms, nil } } - return types.ConsensusParams{}, errors.New("unable to fetch consensus params from connected providers") + + // signal to caller to retry. + return nil, nil } diff --git a/internal/statesync/syncer.go b/internal/statesync/syncer.go index 559e98a8f..f266017dd 100644 --- a/internal/statesync/syncer.go +++ b/internal/statesync/syncer.go @@ -11,11 +11,11 @@ import ( "github.com/tendermint/tendermint/config" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/light" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -63,8 +63,14 @@ type syncer struct { fetchers int32 retryTimeout time.Duration - mtx tmsync.RWMutex - chunks *chunkQueue + mtx tmsync.RWMutex + chunks *chunkQueue + metrics *Metrics + + avgChunkTime int64 + lastSyncedSnapshotHeight int64 + processingSnapshot *snapshot + closeCh <-chan struct{} } // newSyncer creates a new syncer. @@ -74,8 +80,11 @@ func newSyncer( conn proxy.AppConnSnapshot, connQuery proxy.AppConnQuery, stateProvider StateProvider, - snapshotCh, chunkCh chan<- p2p.Envelope, + snapshotCh chan<- p2p.Envelope, + chunkCh chan<- p2p.Envelope, + closeCh <-chan struct{}, tempDir string, + metrics *Metrics, ) *syncer { return &syncer{ logger: logger, @@ -88,6 +97,8 @@ func newSyncer( tempDir: tempDir, fetchers: cfg.Fetchers, retryTimeout: cfg.ChunkRequestTimeout, + metrics: metrics, + closeCh: closeCh, } } @@ -121,6 +132,7 @@ func (s *syncer) AddSnapshot(peerID types.NodeID, snapshot *snapshot) (bool, err return false, err } if added { + s.metrics.TotalSnapshots.Add(1) s.logger.Info("Discovered new snapshot", "height", snapshot.Height, "format", snapshot.Format, "hash", snapshot.Hash) } @@ -129,12 +141,29 @@ func (s *syncer) AddSnapshot(peerID types.NodeID, snapshot *snapshot) (bool, err // AddPeer adds a peer to the pool. For now we just keep it simple and send a // single request to discover snapshots, later we may want to do retries and stuff. -func (s *syncer) AddPeer(peerID types.NodeID) { +func (s *syncer) AddPeer(peerID types.NodeID) (err error) { + defer func() { + // TODO: remove panic recover once AddPeer can no longer accientally send on + // closed channel. + // This recover was added to protect against the p2p message being sent + // to the snapshot channel after the snapshot channel was closed. + if r := recover(); r != nil { + err = fmt.Errorf("panic sending peer snapshot request: %v", r) + } + }() + s.logger.Debug("Requesting snapshots from peer", "peer", peerID) - s.snapshotCh <- p2p.Envelope{ + + msg := p2p.Envelope{ To: peerID, Message: &ssproto.SnapshotsRequest{}, } + + select { + case <-s.closeCh: + case s.snapshotCh <- msg: + } + return err } // RemovePeer removes a peer from the pool. @@ -190,9 +219,14 @@ func (s *syncer) SyncAny( defer chunks.Close() // in case we forget to close it elsewhere } + s.processingSnapshot = snapshot + s.metrics.SnapshotChunkTotal.Set(float64(snapshot.Chunks)) + newState, commit, err := s.Sync(ctx, snapshot, chunks) switch { case err == nil: + s.metrics.SnapshotHeight.Set(float64(snapshot.Height)) + s.lastSyncedSnapshotHeight = int64(snapshot.Height) return newState, commit, nil case errors.Is(err, errAbort): @@ -237,6 +271,7 @@ func (s *syncer) SyncAny( } snapshot = nil chunks = nil + s.processingSnapshot = nil } } @@ -286,6 +321,7 @@ func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, chunks *chunkQueu // Spawn chunk fetchers. They will terminate when the chunk queue is closed or context canceled. fetchCtx, cancel := context.WithCancel(ctx) defer cancel() + fetchStartTime := time.Now() for i := int32(0); i < s.fetchers; i++ { go s.fetchChunks(fetchCtx, snapshot, chunks) } @@ -324,7 +360,7 @@ func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, chunks *chunkQueu } // Restore snapshot - err = s.applyChunks(ctx, chunks) + err = s.applyChunks(ctx, chunks, fetchStartTime) if err != nil { return sm.State{}, nil, err } @@ -381,7 +417,7 @@ func (s *syncer) offerSnapshot(ctx context.Context, snapshot *snapshot) error { // applyChunks applies chunks to the app. It returns various errors depending on the app's // response, or nil once the snapshot is fully restored. -func (s *syncer) applyChunks(ctx context.Context, chunks *chunkQueue) error { +func (s *syncer) applyChunks(ctx context.Context, chunks *chunkQueue, start time.Time) error { for { chunk, err := chunks.Next() if err == errDone { @@ -423,6 +459,9 @@ func (s *syncer) applyChunks(ctx context.Context, chunks *chunkQueue) error { switch resp.Result { case abci.ResponseApplySnapshotChunk_ACCEPT: + s.metrics.SnapshotChunk.Add(1) + s.avgChunkTime = time.Since(start).Nanoseconds() / int64(chunks.numChunksReturned()) + s.metrics.ChunkProcessAvgTime.Set(float64(s.avgChunkTime)) case abci.ResponseApplySnapshotChunk_ABORT: return errAbort case abci.ResponseApplySnapshotChunk_RETRY: @@ -455,6 +494,8 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch select { case <-ctx.Done(): return + case <-s.closeCh: + return case <-time.After(2 * time.Second): continue } @@ -481,6 +522,8 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch case <-ctx.Done(): return + case <-s.closeCh: + return } ticker.Stop() @@ -504,7 +547,7 @@ func (s *syncer) requestChunk(snapshot *snapshot, chunk uint32) { "peer", peer, ) - s.chunkCh <- p2p.Envelope{ + msg := p2p.Envelope{ To: peer, Message: &ssproto.ChunkRequest{ Height: snapshot.Height, @@ -512,12 +555,17 @@ func (s *syncer) requestChunk(snapshot *snapshot, chunk uint32) { Index: chunk, }, } + + select { + case s.chunkCh <- msg: + case <-s.closeCh: + } } // verifyApp verifies the sync, checking the app hash and last block height. It returns the // app version, which should be returned as part of the initial state. func (s *syncer) verifyApp(snapshot *snapshot) (uint64, error) { - resp, err := s.connQuery.InfoSync(context.Background(), proxy.RequestInfo) + resp, err := s.connQuery.InfoSync(context.TODO(), proxy.RequestInfo) if err != nil { return 0, fmt.Errorf("failed to query ABCI app for appHash: %w", err) } diff --git a/internal/statesync/syncer_test.go b/internal/statesync/syncer_test.go index c1d6b462a..4c240830f 100644 --- a/internal/statesync/syncer_test.go +++ b/internal/statesync/syncer_test.go @@ -13,18 +13,19 @@ import ( abci "github.com/tendermint/tendermint/abci/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/internal/proxy" + proxymocks "github.com/tendermint/tendermint/internal/proxy/mocks" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/statesync/mocks" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - "github.com/tendermint/tendermint/proxy" - proxymocks "github.com/tendermint/tendermint/proxy/mocks" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) -var ctx = context.Background() - func TestSyncer_SyncAny(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + state := sm.State{ ChainID: "chain", Version: sm.Version{ @@ -68,19 +69,23 @@ func TestSyncer_SyncAny(t *testing.T) { peerAID := types.NodeID("aa") peerBID := types.NodeID("bb") peerCID := types.NodeID("cc") - rts := setup(t, connSnapshot, connQuery, stateProvider, 3) + rts := setup(ctx, t, connSnapshot, connQuery, stateProvider, 3) + + rts.reactor.syncer = rts.syncer // Adding a chunk should error when no sync is in progress _, err := rts.syncer.AddChunk(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{1}}) require.Error(t, err) // Adding a couple of peers should trigger snapshot discovery messages - rts.syncer.AddPeer(peerAID) + err = rts.syncer.AddPeer(peerAID) + require.NoError(t, err) e := <-rts.snapshotOutCh require.Equal(t, &ssproto.SnapshotsRequest{}, e.Message) require.Equal(t, peerAID, e.To) - rts.syncer.AddPeer(peerBID) + err = rts.syncer.AddPeer(peerBID) + require.NoError(t, err) e = <-rts.snapshotOutCh require.Equal(t, &ssproto.SnapshotsRequest{}, e.Message) require.Equal(t, peerBID, e.To) @@ -106,7 +111,7 @@ func TestSyncer_SyncAny(t *testing.T) { // We start a sync, with peers sending back chunks when requested. We first reject the snapshot // with height 2 format 2, and accept the snapshot at height 1. - connSnapshot.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + connSnapshot.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: &abci.Snapshot{ Height: 2, Format: 2, @@ -115,7 +120,7 @@ func TestSyncer_SyncAny(t *testing.T) { }, AppHash: []byte("app_hash_2"), }).Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil) - connSnapshot.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + connSnapshot.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: &abci.Snapshot{ Height: s.Height, Format: s.Format, @@ -156,7 +161,7 @@ func TestSyncer_SyncAny(t *testing.T) { // The first time we're applying chunk 2 we tell it to retry the snapshot and discard chunk 1, // which should cause it to keep the existing chunk 0 and 2, and restart restoration from // beginning. We also wait for a little while, to exercise the retry logic in fetchChunks(). - connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + connSnapshot.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{1, 1, 2}, }).Once().Run(func(args mock.Arguments) { time.Sleep(2 * time.Second) }).Return( &abci.ResponseApplySnapshotChunk{ @@ -164,16 +169,16 @@ func TestSyncer_SyncAny(t *testing.T) { RefetchChunks: []uint32{1}, }, nil) - connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + connSnapshot.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 0, Chunk: []byte{1, 1, 0}, }).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + connSnapshot.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 1, Chunk: []byte{1, 1, 1}, }).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + connSnapshot.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{1, 1, 2}, }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connQuery.On("InfoSync", ctx, proxy.RequestInfo).Return(&abci.ResponseInfo{ + connQuery.On("InfoSync", mock.Anything, proxy.RequestInfo).Return(&abci.ResponseInfo{ AppVersion: 9, LastBlockHeight: 1, LastBlockAppHash: []byte("app_hash"), @@ -195,6 +200,16 @@ func TestSyncer_SyncAny(t *testing.T) { require.Equal(t, expectState, newState) require.Equal(t, commit, lastCommit) + require.Equal(t, len(chunks), int(rts.syncer.processingSnapshot.Chunks)) + require.Equal(t, expectState.LastBlockHeight, rts.syncer.lastSyncedSnapshotHeight) + require.True(t, rts.syncer.avgChunkTime > 0) + + require.Equal(t, int64(rts.syncer.processingSnapshot.Chunks), rts.reactor.SnapshotChunksTotal()) + require.Equal(t, rts.syncer.lastSyncedSnapshotHeight, rts.reactor.SnapshotHeight()) + require.Equal(t, time.Duration(rts.syncer.avgChunkTime), rts.reactor.ChunkProcessAvgTime()) + require.Equal(t, int64(len(rts.syncer.snapshots.snapshots)), rts.reactor.TotalSnapshots()) + require.Equal(t, int64(0), rts.reactor.SnapshotChunksCount()) + connSnapshot.AssertExpectations(t) connQuery.AssertExpectations(t) } @@ -203,7 +218,10 @@ func TestSyncer_SyncAny_noSnapshots(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, stateProvider, 2) _, _, err := rts.syncer.SyncAny(ctx, 0, func() {}) require.Equal(t, errNoSnapshots, err) @@ -213,7 +231,10 @@ func TestSyncer_SyncAny_abort(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, stateProvider, 2) s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} peerID := types.NodeID("aa") @@ -221,7 +242,7 @@ func TestSyncer_SyncAny_abort(t *testing.T) { _, err := rts.syncer.AddSnapshot(peerID, s) require.NoError(t, err) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil) @@ -234,7 +255,10 @@ func TestSyncer_SyncAny_reject(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, stateProvider, 2) // s22 is tried first, then s12, then s11, then errNoSnapshots s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} @@ -252,15 +276,15 @@ func TestSyncer_SyncAny_reject(t *testing.T) { _, err = rts.syncer.AddSnapshot(peerID, s11) require.NoError(t, err) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s22), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s12), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s11), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) @@ -273,7 +297,10 @@ func TestSyncer_SyncAny_reject_format(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, stateProvider, 2) // s22 is tried first, which reject s22 and s12, then s11 will abort. s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} @@ -291,11 +318,11 @@ func TestSyncer_SyncAny_reject_format(t *testing.T) { _, err = rts.syncer.AddSnapshot(peerID, s11) require.NoError(t, err) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s22), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s11), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil) @@ -308,7 +335,10 @@ func TestSyncer_SyncAny_reject_sender(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, stateProvider, 2) peerAID := types.NodeID("aa") peerBID := types.NodeID("bb") @@ -337,11 +367,11 @@ func TestSyncer_SyncAny_reject_sender(t *testing.T) { _, err = rts.syncer.AddSnapshot(peerCID, sbc) require.NoError(t, err) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(sbc), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_SENDER}, nil) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(sa), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) @@ -354,7 +384,10 @@ func TestSyncer_SyncAny_abciError(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, stateProvider, 2) errBoom := errors.New("boom") s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} @@ -364,7 +397,7 @@ func TestSyncer_SyncAny_abciError(t *testing.T) { _, err := rts.syncer.AddSnapshot(peerID, s) require.NoError(t, err) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s), AppHash: []byte("app_hash"), }).Once().Return(nil, errBoom) @@ -391,16 +424,23 @@ func TestSyncer_offerSnapshot(t *testing.T) { "error": {0, boom, boom}, "unknown non-zero": {9, nil, unknownErr}, } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, nil, stateProvider, 2) s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}, trustedAppHash: []byte("app_hash")} - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s), AppHash: []byte("app_hash"), }).Return(&abci.ResponseOfferSnapshot{Result: tc.result}, tc.err) @@ -437,31 +477,41 @@ func TestSyncer_applyChunks_Results(t *testing.T) { "error": {0, boom, boom}, "unknown non-zero": {9, nil, unknownErr}, } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, nil, stateProvider, 2) body := []byte{1, 2, 3} chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 1}, "") require.NoError(t, err) + + fetchStartTime := time.Now() + _, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: body}) require.NoError(t, err) - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 0, Chunk: body, }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: tc.result}, tc.err) if tc.result == abci.ResponseApplySnapshotChunk_RETRY { - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 0, Chunk: body, }).Once().Return(&abci.ResponseApplySnapshotChunk{ Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) } - err = rts.syncer.applyChunks(ctx, chunks) + err = rts.syncer.applyChunks(ctx, chunks, fetchStartTime) if tc.expectErr == unknownErr { require.Error(t, err) } else { @@ -488,16 +538,25 @@ func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { "retry_snapshot": {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT}, "reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT}, } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, nil, stateProvider, 2) chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 3}, "") require.NoError(t, err) + + fetchStartTime := time.Now() + added, err := chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{0}}) require.True(t, added) require.NoError(t, err) @@ -509,13 +568,13 @@ func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { require.NoError(t, err) // The first two chunks are accepted, before the last one asks for 1 to be refetched - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 0, Chunk: []byte{0}, }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 1, Chunk: []byte{1}, }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{2}, }).Once().Return(&abci.ResponseApplySnapshotChunk{ Result: tc.result, @@ -526,7 +585,7 @@ func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { // check the queue contents, and finally close the queue to end the goroutine. // We don't really care about the result of applyChunks, since it has separate test. go func() { - rts.syncer.applyChunks(ctx, chunks) //nolint:errcheck // purposefully ignore error + rts.syncer.applyChunks(ctx, chunks, fetchStartTime) //nolint:errcheck // purposefully ignore error }() time.Sleep(50 * time.Millisecond) @@ -550,13 +609,19 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { "retry_snapshot": {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT}, "reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT}, } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, nil, stateProvider, 2) // Set up three peers across two snapshots, and ask for one of them to be banned. // It should be banned from all snapshots. @@ -588,6 +653,8 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { chunks, err := newChunkQueue(s1, "") require.NoError(t, err) + fetchStartTime := time.Now() + added, err := chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{0}, Sender: peerAID}) require.True(t, added) require.NoError(t, err) @@ -601,13 +668,13 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { require.NoError(t, err) // The first two chunks are accepted, before the last one asks for b sender to be rejected - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 0, Chunk: []byte{0}, Sender: "aa", }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 1, Chunk: []byte{1}, Sender: "bb", }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{2}, Sender: "cc", }).Once().Return(&abci.ResponseApplySnapshotChunk{ Result: tc.result, @@ -616,7 +683,7 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { // On retry, the last chunk will be tried again, so we just accept it then. if tc.result == abci.ResponseApplySnapshotChunk_RETRY { - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{2}, Sender: "cc", }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) } @@ -625,7 +692,7 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { // However, it will block on e.g. retry result, so we spawn a goroutine that will // be shut down when the chunk queue closes. go func() { - rts.syncer.applyChunks(ctx, chunks) //nolint:errcheck // purposefully ignore error + rts.syncer.applyChunks(ctx, chunks, fetchStartTime) //nolint:errcheck // purposefully ignore error }() time.Sleep(50 * time.Millisecond) @@ -671,12 +738,18 @@ func TestSyncer_verifyApp(t *testing.T) { }, nil, errVerifyFailed}, "error": {nil, boom, boom}, } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(ctx) + defer cancel() - rts.connQuery.On("InfoSync", ctx, proxy.RequestInfo).Return(tc.response, tc.err) + rts := setup(ctx, t, nil, nil, nil, 2) + + rts.connQuery.On("InfoSync", mock.Anything, proxy.RequestInfo).Return(tc.response, tc.err) version, err := rts.syncer.verifyApp(s) unwrapped := errors.Unwrap(err) if unwrapped != nil { diff --git a/store/store.go b/internal/store/store.go similarity index 99% rename from store/store.go rename to internal/store/store.go index 8848b76d9..c978241ff 100644 --- a/store/store.go +++ b/internal/store/store.go @@ -345,7 +345,7 @@ func (bs *BlockStore) pruneRange( var ( err error pruned uint64 - totalPruned uint64 = 0 + totalPruned uint64 ) batch := bs.db.NewBatch() @@ -392,7 +392,7 @@ func (bs *BlockStore) batchDelete( start, end []byte, preDeletionHook func(key, value []byte, batch dbm.Batch) error, ) (uint64, []byte, error) { - var pruned uint64 = 0 + var pruned uint64 iter, err := bs.db.Iterator(start, end) if err != nil { return pruned, start, err diff --git a/store/store_test.go b/internal/store/store_test.go similarity index 95% rename from store/store_test.go rename to internal/store/store_test.go index 2132d9aff..14163f488 100644 --- a/store/store_test.go +++ b/internal/store/store_test.go @@ -12,13 +12,13 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) @@ -46,13 +46,17 @@ func makeTestCommit(height int64, timestamp time.Time) *types.Commit { } func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFunc) { - config := cfg.ResetTestRoot("blockchain_reactor_test") + cfg, err := config.ResetTestRoot("blockchain_reactor_test") + if err != nil { + panic(err) + } + blockDB := dbm.NewMemDB() - state, err := sm.MakeGenesisStateFromFile(config.GenesisFile()) + state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) if err != nil { panic(fmt.Errorf("error constructing state from genesis file: %w", err)) } - return state, NewBlockStore(blockDB), func() { os.RemoveAll(config.RootDir) } + return state, NewBlockStore(blockDB), func() { os.RemoveAll(cfg.RootDir) } } func freshBlockStore() (*BlockStore, dbm.DB) { @@ -292,9 +296,11 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } func TestLoadBaseMeta(t *testing.T) { - config := cfg.ResetTestRoot("blockchain_reactor_test") - defer os.RemoveAll(config.RootDir) - state, err := sm.MakeGenesisStateFromFile(config.GenesisFile()) + cfg, err := config.ResetTestRoot("blockchain_reactor_test") + require.NoError(t, err) + + defer os.RemoveAll(cfg.RootDir) + state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) require.NoError(t, err) bs := NewBlockStore(dbm.NewMemDB()) @@ -348,9 +354,11 @@ func TestLoadBlockPart(t *testing.T) { } func TestPruneBlocks(t *testing.T) { - config := cfg.ResetTestRoot("blockchain_reactor_test") - defer os.RemoveAll(config.RootDir) - state, err := sm.MakeGenesisStateFromFile(config.GenesisFile()) + cfg, err := config.ResetTestRoot("blockchain_reactor_test") + require.NoError(t, err) + + defer os.RemoveAll(cfg.RootDir) + state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) require.NoError(t, err) db := dbm.NewMemDB() bs := NewBlockStore(db) diff --git a/internal/test/factory/genesis.go b/internal/test/factory/genesis.go index c49f9fce8..c36c9948a 100644 --- a/internal/test/factory/genesis.go +++ b/internal/test/factory/genesis.go @@ -3,12 +3,12 @@ package factory import ( "time" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/types" ) func GenesisDoc( - config *cfg.Config, + cfg *config.Config, time time.Time, validators []*types.Validator, consensusParams *types.ConsensusParams, @@ -26,7 +26,7 @@ func GenesisDoc( return &types.GenesisDoc{ GenesisTime: time, InitialHeight: 1, - ChainID: config.ChainID(), + ChainID: cfg.ChainID(), Validators: genesisValidators, ConsensusParams: consensusParams, } diff --git a/libs/bits/bit_array.go b/libs/bits/bit_array.go index b78fafddd..a0258521c 100644 --- a/libs/bits/bit_array.go +++ b/libs/bits/bit_array.go @@ -30,9 +30,21 @@ func NewBitArray(bits int) *BitArray { if bits <= 0 { return nil } - return &BitArray{ - Bits: bits, - Elems: make([]uint64, numElems(bits)), + bA := &BitArray{} + bA.reset(bits) + return bA +} + +// reset changes size of BitArray to `bits` and re-allocates (zeroed) data buffer +func (bA *BitArray) reset(bits int) { + bA.mtx.Lock() + defer bA.mtx.Unlock() + + bA.Bits = bits + if bits == 0 { + bA.Elems = nil + } else { + bA.Elems = make([]uint64, numElems(bits)) } } @@ -399,8 +411,7 @@ func (bA *BitArray) UnmarshalJSON(bz []byte) error { if b == "null" { // This is required e.g. for encoding/json when decoding // into a pointer with pre-allocated BitArray. - bA.Bits = 0 - bA.Elems = nil + bA.reset(0) return nil } @@ -410,16 +421,15 @@ func (bA *BitArray) UnmarshalJSON(bz []byte) error { return fmt.Errorf("bitArray in JSON should be a string of format %q but got %s", bitArrayJSONRegexp.String(), b) } bits := match[1] - - // Construct new BitArray and copy over. numBits := len(bits) - bA2 := NewBitArray(numBits) + + bA.reset(numBits) for i := 0; i < numBits; i++ { if bits[i] == 'x' { - bA2.SetIndex(i, true) + bA.SetIndex(i, true) } } - *bA = *bA2 //nolint:govet + return nil } diff --git a/libs/cli/helper.go b/libs/cli/helper.go index 4b87bd60b..37fe34fc9 100644 --- a/libs/cli/helper.go +++ b/libs/cli/helper.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "os" "path/filepath" @@ -19,7 +18,7 @@ func WriteConfigVals(dir string, vals map[string]string) error { data += fmt.Sprintf("%s = \"%s\"\n", k, v) } cfile := filepath.Join(dir, "config.toml") - return ioutil.WriteFile(cfile, []byte(data), 0600) + return os.WriteFile(cfile, []byte(data), 0600) } // RunWithArgs executes the given command with the specified command line args diff --git a/libs/cli/setup_test.go b/libs/cli/setup_test.go index 0cb322344..fec49e5c1 100644 --- a/libs/cli/setup_test.go +++ b/libs/cli/setup_test.go @@ -2,7 +2,7 @@ package cli import ( "fmt" - "io/ioutil" + "os" "strconv" "strings" "testing" @@ -55,7 +55,7 @@ func TestSetupEnv(t *testing.T) { } func tempDir() string { - cdir, err := ioutil.TempDir("", "test-cli") + cdir, err := os.MkdirTemp("", "test-cli") if err != nil { panic(err) } diff --git a/libs/cmap/cmap.go b/libs/cmap/cmap.go deleted file mode 100644 index 539870363..000000000 --- a/libs/cmap/cmap.go +++ /dev/null @@ -1,77 +0,0 @@ -package cmap - -import ( - tmsync "github.com/tendermint/tendermint/internal/libs/sync" -) - -// CMap is a goroutine-safe map -type CMap struct { - m map[string]interface{} - l tmsync.Mutex -} - -func NewCMap() *CMap { - return &CMap{ - m: make(map[string]interface{}), - } -} - -func (cm *CMap) Set(key string, value interface{}) { - cm.l.Lock() - cm.m[key] = value - cm.l.Unlock() -} - -func (cm *CMap) Get(key string) interface{} { - cm.l.Lock() - val := cm.m[key] - cm.l.Unlock() - return val -} - -func (cm *CMap) Has(key string) bool { - cm.l.Lock() - _, ok := cm.m[key] - cm.l.Unlock() - return ok -} - -func (cm *CMap) Delete(key string) { - cm.l.Lock() - delete(cm.m, key) - cm.l.Unlock() -} - -func (cm *CMap) Size() int { - cm.l.Lock() - size := len(cm.m) - cm.l.Unlock() - return size -} - -func (cm *CMap) Clear() { - cm.l.Lock() - cm.m = make(map[string]interface{}) - cm.l.Unlock() -} - -func (cm *CMap) Keys() []string { - cm.l.Lock() - - keys := make([]string, 0, len(cm.m)) - for k := range cm.m { - keys = append(keys, k) - } - cm.l.Unlock() - return keys -} - -func (cm *CMap) Values() []interface{} { - cm.l.Lock() - items := make([]interface{}, 0, len(cm.m)) - for _, v := range cm.m { - items = append(items, v) - } - cm.l.Unlock() - return items -} diff --git a/libs/cmap/cmap_test.go b/libs/cmap/cmap_test.go deleted file mode 100644 index bab78da96..000000000 --- a/libs/cmap/cmap_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package cmap - -import ( - "fmt" - "strings" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestIterateKeysWithValues(t *testing.T) { - cmap := NewCMap() - - for i := 1; i <= 10; i++ { - cmap.Set(fmt.Sprintf("key%d", i), fmt.Sprintf("value%d", i)) - } - - // Testing size - assert.Equal(t, 10, cmap.Size()) - assert.Equal(t, 10, len(cmap.Keys())) - assert.Equal(t, 10, len(cmap.Values())) - - // Iterating Keys, checking for matching Value - for _, key := range cmap.Keys() { - val := strings.ReplaceAll(key, "key", "value") - assert.Equal(t, val, cmap.Get(key)) - } - - // Test if all keys are within []Keys() - keys := cmap.Keys() - for i := 1; i <= 10; i++ { - assert.Contains(t, keys, fmt.Sprintf("key%d", i), "cmap.Keys() should contain key") - } - - // Delete 1 Key - cmap.Delete("key1") - - assert.NotEqual( - t, - len(keys), - len(cmap.Keys()), - "[]keys and []Keys() should not be equal, they are copies, one item was removed", - ) -} - -func TestContains(t *testing.T) { - cmap := NewCMap() - - cmap.Set("key1", "value1") - - // Test for known values - assert.True(t, cmap.Has("key1")) - assert.Equal(t, "value1", cmap.Get("key1")) - - // Test for unknown values - assert.False(t, cmap.Has("key2")) - assert.Nil(t, cmap.Get("key2")) -} - -func BenchmarkCMapHas(b *testing.B) { - m := NewCMap() - for i := 0; i < 1000; i++ { - m.Set(string(rune(i)), i) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.Has(string(rune(i))) - } -} diff --git a/libs/events/event_cache_test.go b/libs/events/event_cache_test.go index d6199bc80..a5bb975c9 100644 --- a/libs/events/event_cache_test.go +++ b/libs/events/event_cache_test.go @@ -1,6 +1,7 @@ package events import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -8,8 +9,11 @@ import ( ) func TestEventCache_Flush(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + evsw := NewEventSwitch() - err := evsw.Start() + err := evsw.Start(ctx) require.NoError(t, err) err = evsw.AddListenerForEvent("nothingness", "", func(data EventData) { diff --git a/libs/events/events.go b/libs/events/events.go index 146a9cfa7..f6151e734 100644 --- a/libs/events/events.go +++ b/libs/events/events.go @@ -2,6 +2,7 @@ package events import ( + "context" "fmt" tmsync "github.com/tendermint/tendermint/internal/libs/sync" @@ -45,6 +46,7 @@ type Fireable interface { type EventSwitch interface { service.Service Fireable + Stop() error AddListenerForEvent(listenerID, eventValue string, cb EventCallback) error RemoveListenerForEvent(event string, listenerID string) @@ -68,7 +70,7 @@ func NewEventSwitch() EventSwitch { return evsw } -func (evsw *eventSwitch) OnStart() error { +func (evsw *eventSwitch) OnStart(ctx context.Context) error { return nil } diff --git a/libs/events/events_test.go b/libs/events/events_test.go index 9e21e0235..0e8667908 100644 --- a/libs/events/events_test.go +++ b/libs/events/events_test.go @@ -1,6 +1,7 @@ package events import ( + "context" "fmt" "testing" "time" @@ -14,23 +15,20 @@ import ( // TestAddListenerForEventFireOnce sets up an EventSwitch, subscribes a single // listener to an event, and sends a string "data". func TestAddListenerForEventFireOnce(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + require.NoError(t, evsw.Start(ctx)) + t.Cleanup(evsw.Wait) messages := make(chan EventData) - err = evsw.AddListenerForEvent("listener", "event", + require.NoError(t, evsw.AddListenerForEvent("listener", "event", func(data EventData) { // test there's no deadlock if we remove the listener inside a callback evsw.RemoveListener("listener") messages <- data - }) - require.NoError(t, err) + })) go evsw.FireEvent("event", "data") received := <-messages if received != "data" { @@ -41,24 +39,21 @@ func TestAddListenerForEventFireOnce(t *testing.T) { // TestAddListenerForEventFireMany sets up an EventSwitch, subscribes a single // listener to an event, and sends a thousand integers. func TestAddListenerForEventFireMany(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + require.NoError(t, evsw.Start(ctx)) + t.Cleanup(evsw.Wait) doneSum := make(chan uint64) doneSending := make(chan uint64) numbers := make(chan uint64, 4) // subscribe one listener for one event - err = evsw.AddListenerForEvent("listener", "event", + require.NoError(t, evsw.AddListenerForEvent("listener", "event", func(data EventData) { numbers <- data.(uint64) - }) - require.NoError(t, err) + })) // collect received events go sumReceivedNumbers(numbers, doneSum) // go fire events @@ -75,14 +70,12 @@ func TestAddListenerForEventFireMany(t *testing.T) { // listener to three different events and sends a thousand integers for each // of the three events. func TestAddListenerForDifferentEvents(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + require.NoError(t, evsw.Start(ctx)) + t.Cleanup(evsw.Wait) doneSum := make(chan uint64) doneSending1 := make(chan uint64) @@ -90,21 +83,18 @@ func TestAddListenerForDifferentEvents(t *testing.T) { doneSending3 := make(chan uint64) numbers := make(chan uint64, 4) // subscribe one listener to three events - err = evsw.AddListenerForEvent("listener", "event1", + require.NoError(t, evsw.AddListenerForEvent("listener", "event1", func(data EventData) { numbers <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener", "event2", + })) + require.NoError(t, evsw.AddListenerForEvent("listener", "event2", func(data EventData) { numbers <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener", "event3", + })) + require.NoError(t, evsw.AddListenerForEvent("listener", "event3", func(data EventData) { numbers <- data.(uint64) - }) - require.NoError(t, err) + })) // collect received events go sumReceivedNumbers(numbers, doneSum) // go fire events @@ -127,15 +117,13 @@ func TestAddListenerForDifferentEvents(t *testing.T) { // listener to two of those three events, and then sends a thousand integers // for each of the three events. func TestAddDifferentListenerForDifferentEvents(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + evsw := NewEventSwitch() + require.NoError(t, evsw.Start(ctx)) + + t.Cleanup(evsw.Wait) doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) @@ -145,31 +133,26 @@ func TestAddDifferentListenerForDifferentEvents(t *testing.T) { numbers1 := make(chan uint64, 4) numbers2 := make(chan uint64, 4) // subscribe two listener to three events - err = evsw.AddListenerForEvent("listener1", "event1", + require.NoError(t, evsw.AddListenerForEvent("listener1", "event1", func(data EventData) { numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener1", "event2", + })) + require.NoError(t, evsw.AddListenerForEvent("listener1", "event2", func(data EventData) { numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener1", "event3", + })) + require.NoError(t, evsw.AddListenerForEvent("listener1", "event3", func(data EventData) { numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener2", "event2", + })) + require.NoError(t, evsw.AddListenerForEvent("listener2", "event2", func(data EventData) { numbers2 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener2", "event3", + })) + require.NoError(t, evsw.AddListenerForEvent("listener2", "event3", func(data EventData) { numbers2 <- data.(uint64) - }) - require.NoError(t, err) + })) // collect received events for listener1 go sumReceivedNumbers(numbers1, doneSum1) // collect received events for listener2 @@ -199,14 +182,12 @@ func TestAddAndRemoveListenerConcurrency(t *testing.T) { roundCount = 2000 ) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + require.NoError(t, evsw.Start(ctx)) + t.Cleanup(evsw.Wait) done1 := make(chan struct{}) done2 := make(chan struct{}) @@ -249,14 +230,12 @@ func TestAddAndRemoveListenerConcurrency(t *testing.T) { // two events, fires a thousand integers for the first event, then unsubscribes // the listener and fires a thousand integers for the second event. func TestAddAndRemoveListener(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + require.NoError(t, evsw.Start(ctx)) + t.Cleanup(evsw.Wait) doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) @@ -265,16 +244,14 @@ func TestAddAndRemoveListener(t *testing.T) { numbers1 := make(chan uint64, 4) numbers2 := make(chan uint64, 4) // subscribe two listener to three events - err = evsw.AddListenerForEvent("listener", "event1", + require.NoError(t, evsw.AddListenerForEvent("listener", "event1", func(data EventData) { numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener", "event2", + })) + require.NoError(t, evsw.AddListenerForEvent("listener", "event2", func(data EventData) { numbers2 <- data.(uint64) - }) - require.NoError(t, err) + })) // collect received events for event1 go sumReceivedNumbers(numbers1, doneSum1) // collect received events for event2 @@ -300,29 +277,23 @@ func TestAddAndRemoveListener(t *testing.T) { // TestRemoveListener does basic tests on adding and removing func TestRemoveListener(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + require.NoError(t, evsw.Start(ctx)) + t.Cleanup(evsw.Wait) count := 10 sum1, sum2 := 0, 0 // add some listeners and make sure they work - err = evsw.AddListenerForEvent("listener", "event1", + require.NoError(t, evsw.AddListenerForEvent("listener", "event1", func(data EventData) { sum1++ - }) - require.NoError(t, err) - - err = evsw.AddListenerForEvent("listener", "event2", + })) + require.NoError(t, evsw.AddListenerForEvent("listener", "event2", func(data EventData) { sum2++ - }) - require.NoError(t, err) + })) for i := 0; i < count; i++ { evsw.FireEvent("event1", true) @@ -361,14 +332,11 @@ func TestRemoveListener(t *testing.T) { // NOTE: it is important to run this test with race conditions tracking on, // `go test -race`, to examine for possible race conditions. func TestRemoveListenersAsync(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + require.NoError(t, evsw.Start(ctx)) + t.Cleanup(evsw.Wait) doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) @@ -378,36 +346,30 @@ func TestRemoveListenersAsync(t *testing.T) { numbers1 := make(chan uint64, 4) numbers2 := make(chan uint64, 4) // subscribe two listener to three events - err = evsw.AddListenerForEvent("listener1", "event1", + require.NoError(t, evsw.AddListenerForEvent("listener1", "event1", func(data EventData) { numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener1", "event2", + })) + require.NoError(t, evsw.AddListenerForEvent("listener1", "event2", func(data EventData) { numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener1", "event3", + })) + require.NoError(t, evsw.AddListenerForEvent("listener1", "event3", func(data EventData) { numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener2", "event1", + })) + require.NoError(t, evsw.AddListenerForEvent("listener2", "event1", func(data EventData) { numbers2 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener2", "event2", + })) + require.NoError(t, evsw.AddListenerForEvent("listener2", "event2", func(data EventData) { numbers2 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener2", "event3", + })) + require.NoError(t, evsw.AddListenerForEvent("listener2", "event3", func(data EventData) { numbers2 <- data.(uint64) - }) - require.NoError(t, err) + })) // collect received events for event1 go sumReceivedNumbers(numbers1, doneSum1) // collect received events for event2 diff --git a/libs/json/helpers_test.go b/libs/json/helpers_test.go index a87bc51f1..ccb3c0038 100644 --- a/libs/json/helpers_test.go +++ b/libs/json/helpers_test.go @@ -61,7 +61,6 @@ func (c CustomValue) MarshalJSON() ([]byte, error) { } func (c CustomValue) UnmarshalJSON(bz []byte) error { - c.Value = "custom" return nil } diff --git a/libs/os/os.go b/libs/os/os.go index f4b0f1810..02b98c52a 100644 --- a/libs/os/os.go +++ b/libs/os/os.go @@ -29,11 +29,6 @@ func TrapSignal(logger logger, cb func()) { }() } -func Exit(s string) { - fmt.Printf(s + "\n") - os.Exit(1) -} - // EnsureDir ensures the given directory exists, creating it if necessary. // Errors if the path already exists as a non-directory. func EnsureDir(dir string, mode os.FileMode) error { diff --git a/libs/os/os_test.go b/libs/os/os_test.go index 3a31de04a..22d739ad7 100644 --- a/libs/os/os_test.go +++ b/libs/os/os_test.go @@ -3,7 +3,6 @@ package os_test import ( "bytes" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -16,7 +15,7 @@ import ( ) func TestCopyFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "example") + tmpfile, err := os.CreateTemp("", "example") if err != nil { t.Fatal(err) } @@ -33,7 +32,7 @@ func TestCopyFile(t *testing.T) { if _, err := os.Stat(copyfile); os.IsNotExist(err) { t.Fatal("copy should exist") } - data, err := ioutil.ReadFile(copyfile) + data, err := os.ReadFile(copyfile) if err != nil { t.Fatal(err) } @@ -70,7 +69,7 @@ func TestTrapSignal(t *testing.T) { } func TestEnsureDir(t *testing.T) { - tmp, err := ioutil.TempDir("", "ensure-dir") + tmp, err := os.MkdirTemp("", "ensure-dir") require.NoError(t, err) defer os.RemoveAll(tmp) @@ -84,7 +83,7 @@ func TestEnsureDir(t *testing.T) { require.NoError(t, err) // Should fail on file. - err = ioutil.WriteFile(filepath.Join(tmp, "file"), []byte{}, 0644) + err = os.WriteFile(filepath.Join(tmp, "file"), []byte{}, 0644) require.NoError(t, err) err = tmos.EnsureDir(filepath.Join(tmp, "file"), 0755) require.Error(t, err) @@ -140,7 +139,7 @@ func newTestProgram(t *testing.T, environVar string) (cmd *exec.Cmd, stdout *byt // the origin is positively a non-directory and that it is ready for copying. // See https://github.com/tendermint/tendermint/issues/6427 func TestTrickedTruncation(t *testing.T) { - tmpDir, err := ioutil.TempDir(os.TempDir(), "pwn_truncate") + tmpDir, err := os.MkdirTemp(os.TempDir(), "pwn_truncate") if err != nil { t.Fatal(err) } @@ -148,12 +147,12 @@ func TestTrickedTruncation(t *testing.T) { originalWALPath := filepath.Join(tmpDir, "wal") originalWALContent := []byte("I AM BECOME DEATH, DESTROYER OF ALL WORLDS!") - if err := ioutil.WriteFile(originalWALPath, originalWALContent, 0755); err != nil { + if err := os.WriteFile(originalWALPath, originalWALContent, 0755); err != nil { t.Fatal(err) } // 1. Sanity check. - readWAL, err := ioutil.ReadFile(originalWALPath) + readWAL, err := os.ReadFile(originalWALPath) if err != nil { t.Fatal(err) } @@ -168,7 +167,7 @@ func TestTrickedTruncation(t *testing.T) { } // 3. Check the WAL's content - reReadWAL, err := ioutil.ReadFile(originalWALPath) + reReadWAL, err := os.ReadFile(originalWALPath) if err != nil { t.Fatal(err) } diff --git a/libs/pubsub/example_test.go b/libs/pubsub/example_test.go index fd4a94382..4d317215f 100644 --- a/libs/pubsub/example_test.go +++ b/libs/pubsub/example_test.go @@ -7,27 +7,19 @@ import ( "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/pubsub/query" ) func TestExample(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + s := newTestServer(ctx, t) - require.NoError(t, s.Start()) - - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - - subscription, err := s.Subscribe(ctx, "example-client", query.MustParse("abci.account.name='John'")) - require.NoError(t, err) + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: "example-client", + Query: query.MustParse("abci.account.name='John'"), + })) events := []abci.Event{ { @@ -35,8 +27,6 @@ func TestExample(t *testing.T) { Attributes: []abci.EventAttribute{{Key: "name", Value: "John"}}, }, } - err = s.PublishWithEvents(ctx, "Tombstone", events) - require.NoError(t, err) - - assertReceive(t, "Tombstone", subscription.Out()) + require.NoError(t, s.PublishWithEvents(ctx, "Tombstone", events)) + sub.mustReceive(ctx, "Tombstone") } diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index 68d1ec941..930dd47bc 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -1,35 +1,35 @@ -// Package pubsub implements a pub-sub model with a single publisher (Server) -// and multiple subscribers (clients). +// Package pubsub implements an event dispatching server with a single publisher +// and multiple subscriber clients. Multiple goroutines can safely publish to a +// single Server instance. // -// Though you can have multiple publishers by sharing a pointer to a server or -// by giving the same channel to each publisher and publishing messages from -// that channel (fan-in). -// -// Clients subscribe for messages, which could be of any type, using a query. -// When some message is published, we match it with all queries. If there is a -// match, this message will be pushed to all clients, subscribed to that query. -// See query subpackage for our implementation. +// Clients register subscriptions with a query to select which messages they +// wish to receive. When messages are published, they are broadcast to all +// clients whose subscription query matches that message. Queries are +// constructed using the github.com/tendermint/tendermint/libs/pubsub/query +// package. // // Example: // -// q, err := query.New("account.name='John'") +// q, err := query.New(`account.name='John'`) // if err != nil { // return err // } -// ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Second) -// defer cancel() -// subscription, err := pubsub.Subscribe(ctx, "johns-transactions", q) +// sub, err := pubsub.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ +// ClientID: "johns-transactions", +// Query: q, +// }) // if err != nil { // return err // } // // for { -// select { -// case msg <- subscription.Out(): -// // handle msg.Data() and msg.Events() -// case <-subscription.Canceled(): -// return subscription.Err() +// next, err := sub.Next(ctx) +// if err == pubsub.ErrTerminated { +// return err // terminated by publisher +// } else if err != nil { +// return err // timed out, client unsubscribed, etc. // } +// process(next) // } // package pubsub @@ -38,22 +38,12 @@ import ( "context" "errors" "fmt" + "sync" "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/libs/service" ) -type operation int - -const ( - sub operation = iota - pub - unsub - shutdown -) - var ( // ErrSubscriptionNotFound is returned when a client tries to unsubscribe // from not existing subscription. @@ -62,6 +52,10 @@ var ( // ErrAlreadySubscribed is returned when a client tries to subscribe twice or // more using the same query. ErrAlreadySubscribed = errors.New("already subscribed") + + // ErrServerStopped is returned when attempting to publish or subscribe to a + // server that has been stopped. + ErrServerStopped = errors.New("pubsub server is stopped") ) // Query defines an interface for a query to be used for subscribing. A query @@ -75,17 +69,29 @@ type Query interface { String() string } -type UnsubscribeArgs struct { - ID string - Subscriber string - Query Query +// SubscribeArgs are the parameters to create a new subscription. +type SubscribeArgs struct { + ClientID string // Client ID + Query Query // filter query for events (required) + Limit int // subscription queue capacity limit (0 means 1) + Quota int // subscription queue soft quota (0 uses Limit) } +// UnsubscribeArgs are the parameters to remove a subscription. +// The subscriber ID must be populated, and at least one of the client ID or +// the registered query. +type UnsubscribeArgs struct { + Subscriber string // subscriber ID chosen by the client (required) + ID string // subscription ID (assigned by the server) + Query Query // the query registered with the subscription +} + +// Validate returns nil if args are valid to identify a subscription to remove. +// Otherwise, it reports an error. func (args UnsubscribeArgs) Validate() error { if args.Subscriber == "" { return errors.New("must specify a subscriber") } - if args.ID == "" && args.Query == nil { return fmt.Errorf("subscription is not fully defined [subscriber=%q]", args.Subscriber) } @@ -93,35 +99,33 @@ func (args UnsubscribeArgs) Validate() error { return nil } -type cmd struct { - op operation - - // subscribe, unsubscribe - query Query - subscription *Subscription - clientID string - - // publish - msg interface{} - events []types.Event -} - // Server allows clients to subscribe/unsubscribe for messages, publishing // messages with or without events, and manages internal state. type Server struct { service.BaseService - cmds chan cmd - cmdsCap int + queue chan item + done <-chan struct{} // closed when server should exit + stop func() // signal the server to exit + pubs sync.RWMutex // excl: shutdown; shared: active publisher + exited chan struct{} // server exited - // check if we have subscription before - // subscribing or unsubscribing - mtx tmsync.RWMutex + // All subscriptions currently known. + // Lock exclusive to add, remove, or cancel subscriptions. + // Lock shared to look up or publish to subscriptions. + subs struct { + sync.RWMutex + index *subIndex - // subscriber -> [query->id (string) OR id->query (string))], - // track connections both by ID (new) and query (legacy) to - // avoid breaking the interface. - subscriptions map[string]map[string]string + // This function is called synchronously with each message published + // before it is delivered to any other subscriber. This allows an index + // to be persisted before any subscribers see the messages. + observe func(Message) error + } + + // TODO(creachadair): Rework the options so that this does not need to live + // as a field. It is not otherwise needed. + queueCap int } // Option sets a parameter for the server. @@ -131,395 +135,328 @@ type Option func(*Server) // for a detailed description of how to configure buffering. If no options are // provided, the resulting server's queue is unbuffered. func NewServer(options ...Option) *Server { - s := &Server{ - subscriptions: make(map[string]map[string]string), - } + s := new(Server) s.BaseService = *service.NewBaseService(nil, "PubSub", s) - - for _, option := range options { - option(s) + for _, opt := range options { + opt(s) } - // if BufferCapacity option was not set, the channel is unbuffered - s.cmds = make(chan cmd, s.cmdsCap) + // The queue receives items to be published. + s.queue = make(chan item, s.queueCap) + + // The index tracks subscriptions by ID and query terms. + s.subs.index = newSubIndex() return s } -// BufferCapacity allows you to specify capacity for the internal server's -// queue. Since the server, given Y subscribers, could only process X messages, -// this option could be used to survive spikes (e.g. high amount of -// transactions during peak hours). +// BufferCapacity allows you to specify capacity for publisher's queue. This +// is the number of messages that can be published without blocking. If no +// buffer is specified, publishing is synchronous with delivery. This function +// will panic if cap < 0. func BufferCapacity(cap int) Option { - return func(s *Server) { - if cap > 0 { - s.cmdsCap = cap + if cap < 0 { + panic("negative buffer capacity") + } + return func(s *Server) { s.queueCap = cap } +} + +// BufferCapacity returns capacity of the publication queue. +func (s *Server) BufferCapacity() int { return cap(s.queue) } + +// Subscribe creates a subscription for the given client ID and query. +// If len(capacities) > 0, its first value is used as the queue capacity. +// +// Deprecated: Use SubscribeWithArgs. This method will be removed in v0.36. +func (s *Server) Subscribe(ctx context.Context, + clientID string, query Query, capacities ...int) (*Subscription, error) { + + args := SubscribeArgs{ + ClientID: clientID, + Query: query, + Limit: 1, + } + if len(capacities) > 0 { + args.Limit = capacities[0] + if len(capacities) > 1 { + args.Quota = capacities[1] + } + // bounds are checked below + } + return s.SubscribeWithArgs(ctx, args) +} + +// Observe registers an observer function that will be called synchronously +// with each published message matching any of the given queries, prior to it +// being forwarded to any subscriber. If no queries are specified, all +// messages will be observed. An error is reported if an observer is already +// registered. +func (s *Server) Observe(ctx context.Context, observe func(Message) error, queries ...Query) error { + s.subs.Lock() + defer s.subs.Unlock() + if observe == nil { + return errors.New("observe callback is nil") + } else if s.subs.observe != nil { + return errors.New("an observer is already registered") + } + + // Compile the message filter. + var matches func(Message) bool + if len(queries) == 0 { + matches = func(Message) bool { return true } + } else { + matches = func(msg Message) bool { + for _, q := range queries { + match, err := q.Matches(msg.events) + if err == nil && match { + return true + } + } + return false } } -} -// BufferCapacity returns capacity of the internal server's queue. -func (s *Server) BufferCapacity() int { - return s.cmdsCap -} - -// Subscribe creates a subscription for the given client. -// -// An error will be returned to the caller if the context is canceled or if -// subscription already exist for pair clientID and query. -// -// outCapacity can be used to set a capacity for Subscription#Out channel (1 by -// default). Panics if outCapacity is less than or equal to zero. If you want -// an unbuffered channel, use SubscribeUnbuffered. -func (s *Server) Subscribe( - ctx context.Context, - clientID string, - query Query, - outCapacity ...int) (*Subscription, error) { - outCap := 1 - if len(outCapacity) > 0 { - if outCapacity[0] <= 0 { - panic("Negative or zero capacity. Use SubscribeUnbuffered if you want an unbuffered channel") + s.subs.observe = func(msg Message) error { + if matches(msg) { + return observe(msg) } - outCap = outCapacity[0] + return nil // nothing to do for this message } - - return s.subscribe(ctx, clientID, query, outCap) + return nil } -// SubscribeUnbuffered does the same as Subscribe, except it returns a -// subscription with unbuffered channel. Use with caution as it can freeze the -// server. -func (s *Server) SubscribeUnbuffered(ctx context.Context, clientID string, query Query) (*Subscription, error) { - return s.subscribe(ctx, clientID, query, 0) -} - -func (s *Server) subscribe(ctx context.Context, clientID string, query Query, outCapacity int) (*Subscription, error) { - s.mtx.RLock() - clientSubscriptions, ok := s.subscriptions[clientID] - if ok { - _, ok = clientSubscriptions[query.String()] +// SubscribeWithArgs creates a subscription for the given arguments. It is an +// error if the query is nil, a subscription already exists for the specified +// client ID and query, or if the capacity arguments are invalid. +func (s *Server) SubscribeWithArgs(ctx context.Context, args SubscribeArgs) (*Subscription, error) { + if args.Query == nil { + return nil, errors.New("query is nil") } - s.mtx.RUnlock() - if ok { + s.subs.Lock() + defer s.subs.Unlock() + + if s.subs.index == nil { + return nil, ErrServerStopped + } else if s.subs.index.contains(args.ClientID, args.Query.String()) { return nil, ErrAlreadySubscribed } - subscription := NewSubscription(outCapacity) - select { - case s.cmds <- cmd{op: sub, clientID: clientID, query: query, subscription: subscription}: - s.mtx.Lock() - if _, ok = s.subscriptions[clientID]; !ok { - s.subscriptions[clientID] = make(map[string]string) - } - s.subscriptions[clientID][query.String()] = subscription.id - s.subscriptions[clientID][subscription.id] = query.String() - s.mtx.Unlock() - return subscription, nil - case <-ctx.Done(): - return nil, ctx.Err() - case <-s.Quit(): - return nil, nil + if args.Limit == 0 { + args.Limit = 1 } + sub, err := newSubscription(args.Quota, args.Limit) + if err != nil { + return nil, err + } + s.subs.index.add(&subInfo{ + clientID: args.ClientID, + query: args.Query, + subID: sub.id, + sub: sub, + }) + return sub, nil } -// Unsubscribe removes the subscription on the given query. An error will be -// returned to the caller if the context is canceled or if subscription does -// not exist. +// Unsubscribe removes the subscription for the given client and/or query. It +// returns ErrSubscriptionNotFound if no such subscription exists. func (s *Server) Unsubscribe(ctx context.Context, args UnsubscribeArgs) error { if err := args.Validate(); err != nil { return err } - var qs string - - if args.Query != nil { - qs = args.Query.String() + s.subs.Lock() + defer s.subs.Unlock() + if s.subs.index == nil { + return ErrServerStopped } - clientSubscriptions, err := func() (map[string]string, error) { - s.mtx.RLock() - defer s.mtx.RUnlock() + // TODO(creachadair): Do we need to support unsubscription for an "empty" + // query? I believe that case is not possible by the Query grammar, but we + // should make sure. + // + // Revisit this logic once we are able to remove indexing by query. - clientSubscriptions, ok := s.subscriptions[args.Subscriber] - if args.ID != "" { - qs, ok = clientSubscriptions[args.ID] - - if ok && args.Query == nil { - var err error - args.Query, err = query.New(qs) - if err != nil { - return nil, err - } - } - } else if qs != "" { - args.ID, ok = clientSubscriptions[qs] + var evict subInfoSet + if args.Subscriber != "" { + evict = s.subs.index.findClientID(args.Subscriber) + if args.Query != nil { + evict = evict.withQuery(args.Query.String()) } - - if !ok { - return nil, ErrSubscriptionNotFound - } - - return clientSubscriptions, nil - }() - - if err != nil { - return err + } else { + evict = s.subs.index.findQuery(args.Query.String()) } - select { - case s.cmds <- cmd{op: unsub, clientID: args.Subscriber, query: args.Query, subscription: &Subscription{id: args.ID}}: - s.mtx.Lock() - defer s.mtx.Unlock() - - delete(clientSubscriptions, args.ID) - delete(clientSubscriptions, qs) - - if len(clientSubscriptions) == 0 { - delete(s.subscriptions, args.Subscriber) - } - return nil - case <-ctx.Done(): - return ctx.Err() - case <-s.Quit(): - return nil - } -} - -// UnsubscribeAll removes all client subscriptions. An error will be returned -// to the caller if the context is canceled or if subscription does not exist. -func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { - s.mtx.RLock() - _, ok := s.subscriptions[clientID] - s.mtx.RUnlock() - if !ok { + if len(evict) == 0 { return ErrSubscriptionNotFound } + s.removeSubs(evict, ErrUnsubscribed) + return nil +} - select { - case s.cmds <- cmd{op: unsub, clientID: clientID}: - s.mtx.Lock() - defer s.mtx.Unlock() +// UnsubscribeAll removes all subscriptions for the given client ID. +// It returns ErrSubscriptionNotFound if no subscriptions exist for that client. +func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { + s.subs.Lock() + defer s.subs.Unlock() - delete(s.subscriptions, clientID) - - return nil - case <-ctx.Done(): - return ctx.Err() - case <-s.Quit(): - return nil + evict := s.subs.index.findClientID(clientID) + if len(evict) == 0 { + return ErrSubscriptionNotFound } + s.removeSubs(evict, ErrUnsubscribed) + return nil } // NumClients returns the number of clients. func (s *Server) NumClients() int { - s.mtx.RLock() - defer s.mtx.RUnlock() - return len(s.subscriptions) + s.subs.RLock() + defer s.subs.RUnlock() + return len(s.subs.index.byClient) } // NumClientSubscriptions returns the number of subscriptions the client has. func (s *Server) NumClientSubscriptions(clientID string) int { - s.mtx.RLock() - defer s.mtx.RUnlock() - return len(s.subscriptions[clientID]) / 2 + s.subs.RLock() + defer s.subs.RUnlock() + return len(s.subs.index.findClientID(clientID)) } // Publish publishes the given message. An error will be returned to the caller // if the context is canceled. func (s *Server) Publish(ctx context.Context, msg interface{}) error { - return s.PublishWithEvents(ctx, msg, []types.Event{}) + return s.publish(ctx, msg, []types.Event{}) } // PublishWithEvents publishes the given message with the set of events. The set // is matched with clients queries. If there is a match, the message is sent to // the client. func (s *Server) PublishWithEvents(ctx context.Context, msg interface{}, events []types.Event) error { - select { - case s.cmds <- cmd{op: pub, msg: msg, events: events}: - return nil - case <-ctx.Done(): - return ctx.Err() - case <-s.Quit(): - return nil - } + return s.publish(ctx, msg, events) } // OnStop implements Service.OnStop by shutting down the server. -func (s *Server) OnStop() { - s.cmds <- cmd{op: shutdown} -} +func (s *Server) OnStop() { s.stop() } -// NOTE: not goroutine safe -type state struct { - // query string -> client -> subscription - subscriptions map[string]map[string]*Subscription - // query string -> queryPlusRefCount - queries map[string]*queryPlusRefCount -} - -// queryPlusRefCount holds a pointer to a query and reference counter. When -// refCount is zero, query will be removed. -type queryPlusRefCount struct { - q Query - refCount int -} +// Wait implements Service.Wait by blocking until the server has exited, then +// yielding to the base service wait. +func (s *Server) Wait() { <-s.exited; s.BaseService.Wait() } // OnStart implements Service.OnStart by starting the server. -func (s *Server) OnStart() error { - go s.loop(state{ - subscriptions: make(map[string]map[string]*Subscription), - queries: make(map[string]*queryPlusRefCount), - }) - return nil +func (s *Server) OnStart(ctx context.Context) error { s.run(); return nil } + +// OnReset implements Service.OnReset. It has no effect for this service. +func (s *Server) OnReset() error { return nil } + +func (s *Server) publish(ctx context.Context, data interface{}, events []types.Event) error { + s.pubs.RLock() + defer s.pubs.RUnlock() + + select { + case <-s.done: + return ErrServerStopped + case <-ctx.Done(): + return ctx.Err() + case s.queue <- item{ + Data: data, + Events: events, + }: + return nil + } } -// OnReset implements Service.OnReset -func (s *Server) OnReset() error { - return nil -} +func (s *Server) run() { + // The server runs until ctx is canceled. + ctx, cancel := context.WithCancel(context.Background()) + s.done = ctx.Done() + s.stop = cancel -func (s *Server) loop(state state) { -loop: - for cmd := range s.cmds { - switch cmd.op { - case unsub: - if cmd.query != nil { - state.remove(cmd.clientID, cmd.query.String(), cmd.subscription.id, ErrUnsubscribed) - } else { - state.removeClient(cmd.clientID, ErrUnsubscribed) - } - case shutdown: - state.removeAll(nil) - break loop - case sub: - state.add(cmd.clientID, cmd.query, cmd.subscription) - case pub: - if err := state.send(cmd.msg, cmd.events); err != nil { - s.Logger.Error("Error querying for events", "err", err) + // Shutdown monitor: When the context ends, wait for any active publish + // calls to exit, then close the queue to signal the sender to exit. + go func() { + <-ctx.Done() + s.pubs.Lock() + defer s.pubs.Unlock() + close(s.queue) + }() + + s.exited = make(chan struct{}) + go func() { + defer close(s.exited) + + // Sender: Service the queue and forward messages to subscribers. + for it := range s.queue { + if err := s.send(it.Data, it.Events); err != nil { + s.Logger.Error("Error sending event", "err", err) } } - } + // Terminate all subscribers before exit. + s.subs.Lock() + defer s.subs.Unlock() + for si := range s.subs.index.all { + si.sub.stop(ErrTerminated) + } + s.subs.index = nil + }() } -func (state *state) add(clientID string, q Query, subscription *Subscription) { - qStr := q.String() - - // initialize subscription for this client per query if needed - if _, ok := state.subscriptions[qStr]; !ok { - state.subscriptions[qStr] = make(map[string]*Subscription) +// removeSubs cancels and removes all the subscriptions in evict with the given +// error. The caller must hold the s.subs lock. +func (s *Server) removeSubs(evict subInfoSet, reason error) { + for si := range evict { + si.sub.stop(reason) } - - if _, ok := state.subscriptions[subscription.id]; !ok { - state.subscriptions[subscription.id] = make(map[string]*Subscription) - } - - // create subscription - state.subscriptions[qStr][clientID] = subscription - state.subscriptions[subscription.id][clientID] = subscription - - // initialize query if needed - if _, ok := state.queries[qStr]; !ok { - state.queries[qStr] = &queryPlusRefCount{q: q, refCount: 0} - } - // increment reference counter - state.queries[qStr].refCount++ + s.subs.index.removeAll(evict) } -func (state *state) remove(clientID string, qStr, id string, reason error) { - clientSubscriptions, ok := state.subscriptions[qStr] - if !ok { - return - } - - subscription, ok := clientSubscriptions[clientID] - if !ok { - return - } - - subscription.cancel(reason) - - // remove client from query map. - // if query has no other clients subscribed, remove it. - delete(state.subscriptions[qStr], clientID) - delete(state.subscriptions[id], clientID) - if len(state.subscriptions[qStr]) == 0 { - delete(state.subscriptions, qStr) - } - - // decrease ref counter in queries - if ref, ok := state.queries[qStr]; ok { - ref.refCount-- - if ref.refCount == 0 { - // remove the query if nobody else is using it - delete(state.queries, qStr) +// send delivers the given message to all matching subscribers. An error in +// query matching stops transmission and is returned. +func (s *Server) send(data interface{}, events []types.Event) error { + // At exit, evict any subscriptions that were too slow. + evict := make(subInfoSet) + defer func() { + if len(evict) != 0 { + s.subs.Lock() + defer s.subs.Unlock() + s.removeSubs(evict, ErrTerminated) } - } -} + }() -func (state *state) removeClient(clientID string, reason error) { - seen := map[string]struct{}{} - for qStr, clientSubscriptions := range state.subscriptions { - if sub, ok := clientSubscriptions[clientID]; ok { - if _, ok = seen[sub.id]; ok { - // all subscriptions are double indexed by ID and query, only - // process them once. - continue - } - state.remove(clientID, qStr, sub.id, reason) - seen[sub.id] = struct{}{} - } - } -} + // N.B. Order is important here. We must acquire and defer the lock release + // AFTER deferring the eviction cleanup: The cleanup must happen after the + // reader lock has released, or it will deadlock. + s.subs.RLock() + defer s.subs.RUnlock() -func (state *state) removeAll(reason error) { - for qStr, clientSubscriptions := range state.subscriptions { - sub, ok := clientSubscriptions[qStr] - if !ok || ok && sub.id == qStr { - // all subscriptions are double indexed by ID and query, only - // process them once. - continue - } - - for clientID := range clientSubscriptions { - state.remove(clientID, qStr, sub.id, reason) - } - } -} - -func (state *state) send(msg interface{}, events []types.Event) error { - for qStr, clientSubscriptions := range state.subscriptions { - if sub, ok := clientSubscriptions[qStr]; ok && sub.id == qStr { - continue - } - var q Query - if qi, ok := state.queries[qStr]; ok { - q = qi.q - } else { - continue - } - - match, err := q.Matches(events) + // If an observer is defined, give it control of the message before + // attempting to deliver it to any matching subscribers. If the observer + // fails, the message will not be forwarded. + if s.subs.observe != nil { + err := s.subs.observe(Message{ + data: data, + events: events, + }) if err != nil { - return fmt.Errorf("failed to match against query %s: %w", q.String(), err) + return fmt.Errorf("observer failed on message: %w", err) + } + } + + for si := range s.subs.index.all { + match, err := si.query.Matches(events) + if err != nil { + return fmt.Errorf("match failed against query: %w", err) + // TODO(creachadair): Should we evict this subscription? + } else if !match { + continue } - if match { - for clientID, subscription := range clientSubscriptions { - if cap(subscription.out) == 0 { - // block on unbuffered channel - select { - case subscription.out <- NewMessage(subscription.id, msg, events): - case <-subscription.canceled: - } - } else { - // don't block on buffered channels - select { - case subscription.out <- NewMessage(subscription.id, msg, events): - default: - state.remove(clientID, qStr, subscription.id, ErrOutOfCapacity) - } - } - } + // Publish the events to the subscriber's queue. If this fails, e.g., + // because the queue is over capacity or out of quota, evict the + // subscription from the index. + if err := si.sub.publish(Message{ + subID: si.sub.id, + data: data, + events: events, + }); err != nil { + evict.add(si) } } diff --git a/libs/pubsub/pubsub_test.go b/libs/pubsub/pubsub_test.go index 525415493..be7f3e6e0 100644 --- a/libs/pubsub/pubsub_test.go +++ b/libs/pubsub/pubsub_test.go @@ -2,17 +2,14 @@ package pubsub_test import ( "context" + "errors" "fmt" - "runtime/debug" "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/pubsub/query" ) @@ -21,167 +18,158 @@ const ( clientID = "test-client" ) -func TestSubscribe(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } +func TestSubscribeWithArgs(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := newTestServer(ctx, t) + + t.Run("DefaultLimit", func(t *testing.T) { + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.Empty{}, + })) + + require.Equal(t, 1, s.NumClients()) + require.Equal(t, 1, s.NumClientSubscriptions(clientID)) + + require.NoError(t, s.Publish(ctx, "Ka-Zar")) + sub.mustReceive(ctx, "Ka-Zar") }) + t.Run("PositiveLimit", func(t *testing.T) { + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID + "-2", + Query: query.Empty{}, + Limit: 10, + })) + require.NoError(t, s.Publish(ctx, "Aggamon")) + sub.mustReceive(ctx, "Aggamon") + }) +} - ctx := context.Background() - subscription, err := s.Subscribe(ctx, clientID, query.Empty{}) - require.NoError(t, err) +func TestObserver(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - require.Equal(t, 1, s.NumClients()) - require.Equal(t, 1, s.NumClientSubscriptions(clientID)) + s := newTestServer(ctx, t) - err = s.Publish(ctx, "Ka-Zar") - require.NoError(t, err) - assertReceive(t, "Ka-Zar", subscription.Out()) + done := make(chan struct{}) + var got interface{} + require.NoError(t, s.Observe(ctx, func(msg pubsub.Message) error { + defer close(done) + got = msg.Data() + return nil + })) + const input = "Lions and tigers and bears, oh my!" + require.NoError(t, s.Publish(ctx, input)) + <-done + require.Equal(t, got, input) +} + +func TestObserverErrors(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := newTestServer(ctx, t) + + require.Error(t, s.Observe(ctx, nil, query.Empty{})) + require.NoError(t, s.Observe(ctx, func(pubsub.Message) error { return nil })) + require.Error(t, s.Observe(ctx, func(pubsub.Message) error { return nil }, query.Empty{})) +} + +func TestPublishDoesNotBlock(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := newTestServer(ctx, t) + + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.Empty{}, + })) published := make(chan struct{}) go func() { defer close(published) - err := s.Publish(ctx, "Quicksilver") - require.NoError(t, err) - - err = s.Publish(ctx, "Asylum") - require.NoError(t, err) - - err = s.Publish(ctx, "Ivan") - require.NoError(t, err) + require.NoError(t, s.Publish(ctx, "Quicksilver")) + require.NoError(t, s.Publish(ctx, "Asylum")) + require.NoError(t, s.Publish(ctx, "Ivan")) }() select { case <-published: - assertReceive(t, "Quicksilver", subscription.Out()) - assertCanceled(t, subscription, pubsub.ErrOutOfCapacity) + sub.mustReceive(ctx, "Quicksilver") + sub.mustFail(ctx, pubsub.ErrTerminated) case <-time.After(3 * time.Second): - t.Fatal("Expected Publish(Asylum) not to block") + t.Fatal("Publishing should not have blocked") } } -func TestSubscribeWithCapacity(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) +func TestSubscribeErrors(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - ctx := context.Background() - require.Panics(t, func() { - _, err = s.Subscribe(ctx, clientID, query.Empty{}, -1) - require.NoError(t, err) + s := newTestServer(ctx, t) + + t.Run("EmptyQueryErr", func(t *testing.T) { + _, err := s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ClientID: clientID}) + require.Error(t, err) }) - require.Panics(t, func() { - _, err = s.Subscribe(ctx, clientID, query.Empty{}, 0) - require.NoError(t, err) + t.Run("NegativeLimitErr", func(t *testing.T) { + _, err := s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.Empty{}, + Limit: -5, + }) + require.Error(t, err) }) - subscription, err := s.Subscribe(ctx, clientID, query.Empty{}, 1) - require.NoError(t, err) - err = s.Publish(ctx, "Aggamon") - require.NoError(t, err) - assertReceive(t, "Aggamon", subscription.Out()) } -func TestSubscribeUnbuffered(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) +func TestSlowSubscriber(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - ctx := context.Background() - subscription, err := s.SubscribeUnbuffered(ctx, clientID, query.Empty{}) - require.NoError(t, err) + s := newTestServer(ctx, t) - published := make(chan struct{}) - go func() { - defer close(published) + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.Empty{}, + })) - err := s.Publish(ctx, "Ultron") - require.NoError(t, err) + require.NoError(t, s.Publish(ctx, "Fat Cobra")) + require.NoError(t, s.Publish(ctx, "Viper")) + require.NoError(t, s.Publish(ctx, "Black Panther")) - err = s.Publish(ctx, "Darkhawk") - require.NoError(t, err) - }() - - select { - case <-published: - t.Fatal("Expected Publish(Darkhawk) to block") - case <-time.After(3 * time.Second): - assertReceive(t, "Ultron", subscription.Out()) - assertReceive(t, "Darkhawk", subscription.Out()) - } -} - -func TestSlowClientIsRemovedWithErrOutOfCapacity(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - subscription, err := s.Subscribe(ctx, clientID, query.Empty{}) - require.NoError(t, err) - err = s.Publish(ctx, "Fat Cobra") - require.NoError(t, err) - err = s.Publish(ctx, "Viper") - require.NoError(t, err) - - assertCanceled(t, subscription, pubsub.ErrOutOfCapacity) + // We had capacity for one item, so we should get that item, but after that + // the subscription should have been terminated by the publisher. + sub.mustReceive(ctx, "Fat Cobra") + sub.mustFail(ctx, pubsub.ErrTerminated) } func TestDifferentClients(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - require.NoError(t, s.Start()) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) + s := newTestServer(ctx, t) - ctx := context.Background() + sub1 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: "client-1", + Query: query.MustParse("tm.events.type='NewBlock'"), + })) - subscription1, err := s.Subscribe(ctx, "client-1", query.MustParse("tm.events.type='NewBlock'")) - require.NoError(t, err) - - events := []abci.Event{ - { - Type: "tm.events", - Attributes: []abci.EventAttribute{{Key: "type", Value: "NewBlock"}}, - }, - } + events := []abci.Event{{ + Type: "tm.events", + Attributes: []abci.EventAttribute{{Key: "type", Value: "NewBlock"}}, + }} require.NoError(t, s.PublishWithEvents(ctx, "Iceman", events)) - assertReceive(t, "Iceman", subscription1.Out()) + sub1.mustReceive(ctx, "Iceman") - subscription2, err := s.Subscribe( - ctx, - "client-2", - query.MustParse("tm.events.type='NewBlock' AND abci.account.name='Igor'"), - ) - require.NoError(t, err) + sub2 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: "client-2", + Query: query.MustParse("tm.events.type='NewBlock' AND abci.account.name='Igor'"), + })) events = []abci.Event{ { @@ -195,378 +183,273 @@ func TestDifferentClients(t *testing.T) { } require.NoError(t, s.PublishWithEvents(ctx, "Ultimo", events)) - assertReceive(t, "Ultimo", subscription1.Out()) - assertReceive(t, "Ultimo", subscription2.Out()) + sub1.mustReceive(ctx, "Ultimo") + sub2.mustReceive(ctx, "Ultimo") - subscription3, err := s.Subscribe( - ctx, - "client-3", - query.MustParse("tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10"), - ) - require.NoError(t, err) + sub3 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: "client-3", + Query: query.MustParse("tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10"), + })) - events = []abci.Event{ - { - Type: "tm.events", - Attributes: []abci.EventAttribute{{Key: "type", Value: "NewRoundStep"}}, - }, - } + events = []abci.Event{{ + Type: "tm.events", + Attributes: []abci.EventAttribute{{Key: "type", Value: "NewRoundStep"}}, + }} require.NoError(t, s.PublishWithEvents(ctx, "Valeria Richards", events)) - require.Zero(t, len(subscription3.Out())) + sub3.mustTimeOut(ctx, 100*time.Millisecond) } func TestSubscribeDuplicateKeys(t *testing.T) { - ctx := context.Background() - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - require.NoError(t, s.Start()) - - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) + s := newTestServer(ctx, t) testCases := []struct { query string expected interface{} }{ - { - "withdraw.rewards='17'", - "Iceman", - }, - { - "withdraw.rewards='22'", - "Iceman", - }, - { - "withdraw.rewards='1' AND withdraw.rewards='22'", - "Iceman", - }, - { - "withdraw.rewards='100'", - nil, - }, + {`withdraw.rewards='17'`, "Iceman"}, + {`withdraw.rewards='22'`, "Iceman"}, + {`withdraw.rewards='1' AND withdraw.rewards='22'`, "Iceman"}, + {`withdraw.rewards='100'`, nil}, } for i, tc := range testCases { - sub, err := s.Subscribe(ctx, fmt.Sprintf("client-%d", i), query.MustParse(tc.query)) - require.NoError(t, err) + id := fmt.Sprintf("client-%d", i) + q := query.MustParse(tc.query) + t.Run(id, func(t *testing.T) { + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: id, + Query: q, + })) - events := []abci.Event{ - { - Type: "transfer", - Attributes: []abci.EventAttribute{ - {Key: "sender", Value: "foo"}, - {Key: "sender", Value: "bar"}, - {Key: "sender", Value: "baz"}, + events := []abci.Event{ + { + Type: "transfer", + Attributes: []abci.EventAttribute{ + {Key: "sender", Value: "foo"}, + {Key: "sender", Value: "bar"}, + {Key: "sender", Value: "baz"}, + }, }, - }, - { - Type: "withdraw", - Attributes: []abci.EventAttribute{ - {Key: "rewards", Value: "1"}, - {Key: "rewards", Value: "17"}, - {Key: "rewards", Value: "22"}, + { + Type: "withdraw", + Attributes: []abci.EventAttribute{ + {Key: "rewards", Value: "1"}, + {Key: "rewards", Value: "17"}, + {Key: "rewards", Value: "22"}, + }, }, - }, - } + } - require.NoError(t, s.PublishWithEvents(ctx, "Iceman", events)) + require.NoError(t, s.PublishWithEvents(ctx, "Iceman", events)) - if tc.expected != nil { - assertReceive(t, tc.expected, sub.Out()) - } else { - require.Zero(t, len(sub.Out())) - } + if tc.expected != nil { + sub.mustReceive(ctx, tc.expected) + } else { + sub.mustTimeOut(ctx, 100*time.Millisecond) + } + }) } } func TestClientSubscribesTwice(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := newTestServer(ctx, t) - ctx := context.Background() q := query.MustParse("tm.events.type='NewBlock'") + events := []abci.Event{{ + Type: "tm.events", + Attributes: []abci.EventAttribute{{Key: "type", Value: "NewBlock"}}, + }} - subscription1, err := s.Subscribe(ctx, clientID, q) - require.NoError(t, err) - - events := []abci.Event{ - { - Type: "tm.events", - Attributes: []abci.EventAttribute{{Key: "type", Value: "NewBlock"}}, - }, - } + sub1 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: q, + })) require.NoError(t, s.PublishWithEvents(ctx, "Goblin Queen", events)) - assertReceive(t, "Goblin Queen", subscription1.Out()) + sub1.mustReceive(ctx, "Goblin Queen") - subscription2, err := s.Subscribe(ctx, clientID, q) - require.Error(t, err) - require.Nil(t, subscription2) + // Subscribing a second time with the same client ID and query fails. + { + sub2, err := s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: q, + }) + require.Error(t, err) + require.Nil(t, sub2) + } + // The attempt to re-subscribe does not disrupt the existing sub. require.NoError(t, s.PublishWithEvents(ctx, "Spider-Man", events)) - assertReceive(t, "Spider-Man", subscription1.Out()) + sub1.mustReceive(ctx, "Spider-Man") } func TestUnsubscribe(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - ctx := context.Background() - subscription, err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'")) - require.NoError(t, err) - err = s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ + s := newTestServer(ctx, t) + + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.MustParse("tm.events.type='NewBlock'"), + })) + + // Removing the subscription we just made should succeed. + require.NoError(t, s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ Subscriber: clientID, - Query: query.MustParse("tm.events.type='NewBlock'")}) - require.NoError(t, err) + Query: query.MustParse("tm.events.type='NewBlock'"), + })) - err = s.Publish(ctx, "Nick Fury") - require.NoError(t, err) - require.Zero(t, len(subscription.Out()), "Should not receive anything after Unsubscribe") + // Publishing should still work. + require.NoError(t, s.Publish(ctx, "Nick Fury")) - assertCanceled(t, subscription, pubsub.ErrUnsubscribed) + // The unsubscribed subscriber should report as such. + sub.mustFail(ctx, pubsub.ErrUnsubscribed) } func TestClientUnsubscribesTwice(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - ctx := context.Background() - _, err = s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'")) - require.NoError(t, err) - err = s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ - Subscriber: clientID, - Query: query.MustParse("tm.events.type='NewBlock'")}) - require.NoError(t, err) + s := newTestServer(ctx, t) - err = s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ + newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.MustParse("tm.events.type='NewBlock'"), + })) + require.NoError(t, s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ Subscriber: clientID, - Query: query.MustParse("tm.events.type='NewBlock'")}) - require.Equal(t, pubsub.ErrSubscriptionNotFound, err) - err = s.UnsubscribeAll(ctx, clientID) - require.Equal(t, pubsub.ErrSubscriptionNotFound, err) + Query: query.MustParse("tm.events.type='NewBlock'"), + })) + require.ErrorIs(t, s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ + Subscriber: clientID, + Query: query.MustParse("tm.events.type='NewBlock'"), + }), pubsub.ErrSubscriptionNotFound) + require.ErrorIs(t, s.UnsubscribeAll(ctx, clientID), pubsub.ErrSubscriptionNotFound) } func TestResubscribe(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - ctx := context.Background() - _, err = s.Subscribe(ctx, clientID, query.Empty{}) - require.NoError(t, err) - err = s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{Subscriber: clientID, Query: query.Empty{}}) - require.NoError(t, err) - subscription, err := s.Subscribe(ctx, clientID, query.Empty{}) - require.NoError(t, err) + s := newTestServer(ctx, t) - err = s.Publish(ctx, "Cable") - require.NoError(t, err) - assertReceive(t, "Cable", subscription.Out()) + args := pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.Empty{}, + } + newTestSub(t).must(s.SubscribeWithArgs(ctx, args)) + + require.NoError(t, s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ + Subscriber: clientID, + Query: query.Empty{}, + })) + + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, args)) + + require.NoError(t, s.Publish(ctx, "Cable")) + sub.mustReceive(ctx, "Cable") } func TestUnsubscribeAll(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - ctx := context.Background() - subscription1, err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'")) - require.NoError(t, err) - subscription2, err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlockHeader'")) - require.NoError(t, err) + s := newTestServer(ctx, t) - err = s.UnsubscribeAll(ctx, clientID) - require.NoError(t, err) + sub1 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.MustParse("tm.events.type='NewBlock'"), + })) + sub2 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.MustParse("tm.events.type='NewBlockHeader'"), + })) - err = s.Publish(ctx, "Nick Fury") - require.NoError(t, err) - require.Zero(t, len(subscription1.Out()), "Should not receive anything after UnsubscribeAll") - require.Zero(t, len(subscription2.Out()), "Should not receive anything after UnsubscribeAll") + require.NoError(t, s.UnsubscribeAll(ctx, clientID)) + require.NoError(t, s.Publish(ctx, "Nick Fury")) + + sub1.mustFail(ctx, pubsub.ErrUnsubscribed) + sub2.mustFail(ctx, pubsub.ErrUnsubscribed) - assertCanceled(t, subscription1, pubsub.ErrUnsubscribed) - assertCanceled(t, subscription2, pubsub.ErrUnsubscribed) } func TestBufferCapacity(t *testing.T) { - s := pubsub.NewServer(pubsub.BufferCapacity(2)) - s.SetLogger(log.TestingLogger()) + s := pubsub.NewServer(pubsub.BufferCapacity(2), + func(s *pubsub.Server) { + s.Logger = log.TestingLogger() + }) require.Equal(t, 2, s.BufferCapacity()) - ctx := context.Background() - err := s.Publish(ctx, "Nighthawk") - require.NoError(t, err) - err = s.Publish(ctx, "Sage") - require.NoError(t, err) - - ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err = s.Publish(ctx, "Ironclad") - if assert.Error(t, err) { - require.Equal(t, context.DeadlineExceeded, err) - } + require.NoError(t, s.Publish(ctx, "Nighthawk")) + require.NoError(t, s.Publish(ctx, "Sage")) + + ctx, cancel = context.WithTimeout(ctx, 100*time.Millisecond) + defer cancel() + + require.ErrorIs(t, s.Publish(ctx, "Ironclad"), context.DeadlineExceeded) } -func Benchmark10Clients(b *testing.B) { benchmarkNClients(10, b) } -func Benchmark100Clients(b *testing.B) { benchmarkNClients(100, b) } -func Benchmark1000Clients(b *testing.B) { benchmarkNClients(1000, b) } +func newTestServer(ctx context.Context, t testing.TB) *pubsub.Server { + t.Helper() -func Benchmark10ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(10, b) } -func Benchmark100ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(100, b) } -func Benchmark1000ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(1000, b) } - -func benchmarkNClients(n int, b *testing.B) { - s := pubsub.NewServer() - err := s.Start() - require.NoError(b, err) - - b.Cleanup(func() { - if err := s.Stop(); err != nil { - b.Error(err) - } + s := pubsub.NewServer(func(s *pubsub.Server) { + s.Logger = log.TestingLogger() }) - ctx := context.Background() - for i := 0; i < n; i++ { - subscription, err := s.Subscribe( - ctx, - clientID, - query.MustParse(fmt.Sprintf("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = %d", i)), - ) - if err != nil { - b.Fatal(err) - } - go func() { - for { - select { - case <-subscription.Out(): - continue - case <-subscription.Canceled(): - return - } - } - }() - } + require.NoError(t, s.Start(ctx)) + t.Cleanup(s.Wait) + return s +} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - events := []abci.Event{ - { - Type: "abci.Account", - Attributes: []abci.EventAttribute{{Key: "Owner", Value: "Ivan"}}, - }, - { - Type: "abci.Invoices", - Attributes: []abci.EventAttribute{{Key: "Number", Value: string(rune(i))}}, - }, - } +type testSub struct { + t testing.TB + *pubsub.Subscription +} - require.NoError(b, s.PublishWithEvents(ctx, "Gamora", events)) +func newTestSub(t testing.TB) *testSub { return &testSub{t: t} } + +func (s *testSub) must(sub *pubsub.Subscription, err error) *testSub { + s.t.Helper() + require.NoError(s.t, err) + require.NotNil(s.t, sub) + s.Subscription = sub + return s +} + +func (s *testSub) mustReceive(ctx context.Context, want interface{}) { + s.t.Helper() + got, err := s.Next(ctx) + require.NoError(s.t, err) + require.Equal(s.t, want, got.Data()) +} + +func (s *testSub) mustTimeOut(ctx context.Context, dur time.Duration) { + s.t.Helper() + tctx, cancel := context.WithTimeout(ctx, dur) + defer cancel() + got, err := s.Next(tctx) + if !errors.Is(err, context.DeadlineExceeded) { + s.t.Errorf("Next: got (%+v, %v), want %v", got, err, context.DeadlineExceeded) } } -func benchmarkNClientsOneQuery(n int, b *testing.B) { - s := pubsub.NewServer() - err := s.Start() - require.NoError(b, err) - b.Cleanup(func() { - if err := s.Stop(); err != nil { - b.Error(err) - } - }) - - ctx := context.Background() - q := query.MustParse("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = 1") - for i := 0; i < n; i++ { - subscription, err := s.Subscribe(ctx, clientID, q) - if err != nil { - b.Fatal(err) - } - go func() { - for { - select { - case <-subscription.Out(): - continue - case <-subscription.Canceled(): - return - } - } - }() - } - - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - events := []abci.Event{ - { - Type: "abci.Account", - Attributes: []abci.EventAttribute{{Key: "Owner", Value: "Ivan"}}, - }, - { - Type: "abci.Invoices", - Attributes: []abci.EventAttribute{{Key: "Number", Value: "1"}}, - }, - } - - require.NoError(b, s.PublishWithEvents(ctx, "Gamora", events)) +func (s *testSub) mustFail(ctx context.Context, want error) { + s.t.Helper() + got, err := s.Next(ctx) + if err == nil && want != nil { + s.t.Fatalf("Next: got (%+v, %v), want error %v", got, err, want) } -} - -// HELPERS - -func assertReceive(t *testing.T, expected interface{}, ch <-chan pubsub.Message, msgAndArgs ...interface{}) { - select { - case actual := <-ch: - require.Equal(t, expected, actual.Data(), msgAndArgs...) - case <-time.After(1 * time.Second): - t.Errorf("expected to receive %v from the channel, got nothing after 1s", expected) - debug.PrintStack() - } -} - -func assertCanceled(t *testing.T, subscription *pubsub.Subscription, err error) { - _, ok := <-subscription.Canceled() - require.False(t, ok) - require.Equal(t, err, subscription.Err()) + require.ErrorIs(s.t, err, want) } diff --git a/libs/pubsub/subindex.go b/libs/pubsub/subindex.go new file mode 100644 index 000000000..48dccf72d --- /dev/null +++ b/libs/pubsub/subindex.go @@ -0,0 +1,113 @@ +package pubsub + +import "github.com/tendermint/tendermint/abci/types" + +// An item to be published to subscribers. +type item struct { + Data interface{} + Events []types.Event +} + +// A subInfo value records a single subscription. +type subInfo struct { + clientID string // chosen by the client + query Query // chosen by the client + subID string // assigned at registration + sub *Subscription // receives published events +} + +// A subInfoSet is an unordered set of subscription info records. +type subInfoSet map[*subInfo]struct{} + +func (s subInfoSet) contains(si *subInfo) bool { _, ok := s[si]; return ok } +func (s subInfoSet) add(si *subInfo) { s[si] = struct{}{} } +func (s subInfoSet) remove(si *subInfo) { delete(s, si) } + +// withQuery returns the subset of s whose query string matches qs. +func (s subInfoSet) withQuery(qs string) subInfoSet { + out := make(subInfoSet) + for si := range s { + if si.query.String() == qs { + out.add(si) + } + } + return out +} + +// A subIndex is an indexed collection of subscription info records. +// The index is not safe for concurrent use without external synchronization. +type subIndex struct { + all subInfoSet // all subscriptions + byClient map[string]subInfoSet // per-client subscriptions + byQuery map[string]subInfoSet // per-query subscriptions + + // TODO(creachadair): We allow indexing by query to support existing use by + // the RPC service methods for event streaming. Fix up those methods not to + // require this, and then remove indexing by query. +} + +// newSubIndex constructs a new, empty subscription index. +func newSubIndex() *subIndex { + return &subIndex{ + all: make(subInfoSet), + byClient: make(map[string]subInfoSet), + byQuery: make(map[string]subInfoSet), + } +} + +// findClients returns the set of subscriptions for the given client ID, or nil. +func (idx *subIndex) findClientID(id string) subInfoSet { return idx.byClient[id] } + +// findQuery returns the set of subscriptions on the given query string, or nil. +func (idx *subIndex) findQuery(qs string) subInfoSet { return idx.byQuery[qs] } + +// contains reports whether idx contains any subscription matching the given +// client ID and query pair. +func (idx *subIndex) contains(clientID, query string) bool { + csubs, qsubs := idx.byClient[clientID], idx.byQuery[query] + if len(csubs) == 0 || len(qsubs) == 0 { + return false + } + for si := range csubs { + if qsubs.contains(si) { + return true + } + } + return false +} + +// add adds si to the index, replacing any previous entry with the same terms. +// It is the caller's responsibility to check for duplicates before adding. +// See also the contains method. +func (idx *subIndex) add(si *subInfo) { + idx.all.add(si) + if m := idx.byClient[si.clientID]; m == nil { + idx.byClient[si.clientID] = subInfoSet{si: struct{}{}} + } else { + m.add(si) + } + qs := si.query.String() + if m := idx.byQuery[qs]; m == nil { + idx.byQuery[qs] = subInfoSet{si: struct{}{}} + } else { + m.add(si) + } +} + +// removeAll removes all the elements of s from the index. +func (idx *subIndex) removeAll(s subInfoSet) { + for si := range s { + idx.all.remove(si) + idx.byClient[si.clientID].remove(si) + if len(idx.byClient[si.clientID]) == 0 { + delete(idx.byClient, si.clientID) + } + if si.query != nil { + qs := si.query.String() + idx.byQuery[qs].remove(si) + if len(idx.byQuery[qs]) == 0 { + delete(idx.byQuery, qs) + } + } + } +} diff --git a/libs/pubsub/subscription.go b/libs/pubsub/subscription.go index 40b84711e..6e8c6fd07 100644 --- a/libs/pubsub/subscription.go +++ b/libs/pubsub/subscription.go @@ -1,89 +1,73 @@ package pubsub import ( + "context" "errors" - "fmt" "github.com/google/uuid" "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/internal/libs/queue" ) var ( - // ErrUnsubscribed is returned by Err when a client unsubscribes. - ErrUnsubscribed = errors.New("client unsubscribed") + // ErrUnsubscribed is returned by Next when the client has unsubscribed. + ErrUnsubscribed = errors.New("subscription removed by client") - // ErrOutOfCapacity is returned by Err when a client is not pulling messages - // fast enough. Note the client's subscription will be terminated. - ErrOutOfCapacity = errors.New("client is not pulling messages fast enough") + // ErrTerminated is returned by Next when the subscription was terminated by + // the publisher. + ErrTerminated = errors.New("subscription terminated by publisher") ) -// A Subscription represents a client subscription for a particular query and -// consists of three things: -// 1) channel onto which messages and events are published -// 2) channel which is closed if a client is too slow or choose to unsubscribe -// 3) err indicating the reason for (2) +// A Subscription represents a client subscription for a particular query. type Subscription struct { - id string - out chan Message - - canceled chan struct{} - mtx tmsync.RWMutex - err error + id string + queue *queue.Queue // open until the subscription ends + stopErr error // after queue is closed, the reason why } -// NewSubscription returns a new subscription with the given outCapacity. -func NewSubscription(outCapacity int) *Subscription { - return &Subscription{ - id: uuid.NewString(), - out: make(chan Message, outCapacity), - canceled: make(chan struct{}), +// newSubscription returns a new subscription with the given queue capacity. +func newSubscription(quota, limit int) (*Subscription, error) { + queue, err := queue.New(queue.Options{ + SoftQuota: quota, + HardLimit: limit, + }) + if err != nil { + return nil, err } + return &Subscription{ + id: uuid.NewString(), + queue: queue, + }, nil } -// Out returns a channel onto which messages and events are published. -// Unsubscribe/UnsubscribeAll does not close the channel to avoid clients from -// receiving a nil message. -func (s *Subscription) Out() <-chan Message { - return s.out +// Next blocks until a message is available, ctx ends, or the subscription +// ends. Next returns ErrUnsubscribed if s was unsubscribed, ErrTerminated if +// s was terminated by the publisher, or a context error if ctx ended without a +// message being available. +func (s *Subscription) Next(ctx context.Context) (Message, error) { + next, err := s.queue.Wait(ctx) + if errors.Is(err, queue.ErrQueueClosed) { + return Message{}, s.stopErr + } else if err != nil { + return Message{}, err + } + return next.(Message), nil } +// ID returns the unique subscription identifier for s. func (s *Subscription) ID() string { return s.id } -// Canceled returns a channel that's closed when the subscription is -// terminated and supposed to be used in a select statement. -func (s *Subscription) Canceled() <-chan struct{} { - return s.canceled -} +// publish transmits msg to the subscriber. It reports a queue error if the +// queue cannot accept any further messages. +func (s *Subscription) publish(msg Message) error { return s.queue.Add(msg) } -// Err returns nil if the channel returned by Canceled is not yet closed. -// If the channel is closed, Err returns a non-nil error explaining why: -// - ErrUnsubscribed if the subscriber choose to unsubscribe, -// - ErrOutOfCapacity if the subscriber is not pulling messages fast enough -// and the channel returned by Out became full, -// After Err returns a non-nil error, successive calls to Err return the same -// error. -func (s *Subscription) Err() error { - s.mtx.RLock() - defer s.mtx.RUnlock() - return s.err -} - -func (s *Subscription) cancel(err error) { - s.mtx.Lock() - defer s.mtx.Unlock() - defer func() { - perr := recover() - if err == nil && perr != nil { - err = fmt.Errorf("problem closing subscription: %v", perr) - } - }() - - if s.err == nil && err != nil { - s.err = err +// stop terminates the subscription with the given error reason. +func (s *Subscription) stop(err error) { + if err == nil { + panic("nil stop error") } - - close(s.canceled) + s.stopErr = err + s.queue.Close() } // Message glues data and events together. @@ -93,14 +77,6 @@ type Message struct { events []types.Event } -func NewMessage(subID string, data interface{}, events []types.Event) Message { - return Message{ - subID: subID, - data: data, - events: events, - } -} - // SubscriptionID returns the unique identifier for the subscription // that produced this message. func (msg Message) SubscriptionID() string { return msg.subID } diff --git a/libs/service/service.go b/libs/service/service.go index 0af243995..f2b440e94 100644 --- a/libs/service/service.go +++ b/libs/service/service.go @@ -1,8 +1,8 @@ package service import ( + "context" "errors" - "fmt" "sync/atomic" "github.com/tendermint/tendermint/libs/log" @@ -22,39 +22,33 @@ var ( // Service defines a service that can be started, stopped, and reset. type Service interface { - // Start the service. - // If it's already started or stopped, will return an error. - // If OnStart() returns an error, it's returned by Start() - Start() error - OnStart() error - - // Stop the service. - // If it's already stopped, will return an error. - // OnStop must never error. - Stop() error - OnStop() - - // Reset the service. - // Panics by default - must be overwritten to enable reset. - Reset() error - OnReset() error + // Start is called to start the service, which should run until + // the context terminates. If the service is already running, Start + // must report an error. + Start(context.Context) error // Return true if the service is running IsRunning() bool - // Quit returns a channel, which is closed once service is stopped. - Quit() <-chan struct{} - // String representation of the service String() string - // SetLogger sets a logger. - SetLogger(log.Logger) - // Wait blocks until the service is stopped. Wait() } +// Implementation describes the implementation that the +// BaseService implementation wraps. +type Implementation interface { + Service + + // Called by the Services Start Method + OnStart(context.Context) error + + // Called when the service's context is canceled. + OnStop() +} + /* Classical-inheritance-style service declarations. Services can be started, then stopped, then optionally restarted. @@ -85,7 +79,7 @@ Typical usage: return fs } - func (fs *FooService) OnStart() error { + func (fs *FooService) OnStart(ctx context.Context) error { fs.BaseService.OnStart() // Always call the overridden method. // initialize private fields // start subroutines, etc. @@ -105,11 +99,11 @@ type BaseService struct { quit chan struct{} // The "subclass" of BaseService - impl Service + impl Implementation } // NewBaseService creates a new BaseService. -func NewBaseService(logger log.Logger, name string, impl Service) *BaseService { +func NewBaseService(logger log.Logger, name string, impl Implementation) *BaseService { if logger == nil { logger = log.NewNopLogger() } @@ -122,15 +116,10 @@ func NewBaseService(logger log.Logger, name string, impl Service) *BaseService { } } -// SetLogger implements Service by setting a logger. -func (bs *BaseService) SetLogger(l log.Logger) { - bs.Logger = l -} - -// Start implements Service by calling OnStart (if defined). An error will be -// returned if the service is already running or stopped. Not to start the -// stopped service, you need to call Reset. -func (bs *BaseService) Start() error { +// Start starts the Service and calls its OnStart method. An error will be +// returned if the service is already running or stopped. To restart a +// stopped service, call Reset. +func (bs *BaseService) Start(ctx context.Context) error { if atomic.CompareAndSwapUint32(&bs.started, 0, 1) { if atomic.LoadUint32(&bs.stopped) == 1 { bs.Logger.Error("not starting service; already stopped", "service", bs.name, "impl", bs.impl.String()) @@ -140,11 +129,26 @@ func (bs *BaseService) Start() error { bs.Logger.Info("starting service", "service", bs.name, "impl", bs.impl.String()) - if err := bs.impl.OnStart(); err != nil { + if err := bs.impl.OnStart(ctx); err != nil { // revert flag atomic.StoreUint32(&bs.started, 0) return err } + + go func(ctx context.Context) { + <-ctx.Done() + if err := bs.Stop(); err != nil { + bs.Logger.Error("stopped service", + "err", err.Error(), + "service", bs.name, + "impl", bs.impl.String()) + } + + bs.Logger.Info("stopped service", + "service", bs.name, + "impl", bs.impl.String()) + }(ctx) + return nil } @@ -155,7 +159,7 @@ func (bs *BaseService) Start() error { // OnStart implements Service by doing nothing. // NOTE: Do not put anything in here, // that way users don't need to call BaseService.OnStart() -func (bs *BaseService) OnStart() error { return nil } +func (bs *BaseService) OnStart(ctx context.Context) error { return nil } // Stop implements Service by calling OnStop (if defined) and closing quit // channel. An error will be returned if the service is already stopped. @@ -183,26 +187,6 @@ func (bs *BaseService) Stop() error { // that way users don't need to call BaseService.OnStop() func (bs *BaseService) OnStop() {} -// Reset implements Service by calling OnReset callback (if defined). An error -// will be returned if the service is running. -func (bs *BaseService) Reset() error { - if !atomic.CompareAndSwapUint32(&bs.stopped, 1, 0) { - bs.Logger.Debug("cannot reset service; not stopped", "service", bs.name, "impl", bs.impl.String()) - return fmt.Errorf("can't reset running %s", bs.name) - } - - // whether or not we've started, we can reset - atomic.CompareAndSwapUint32(&bs.started, 1, 0) - - bs.quit = make(chan struct{}) - return bs.impl.OnReset() -} - -// OnReset implements Service by panicking. -func (bs *BaseService) OnReset() error { - panic("The service cannot be reset") -} - // IsRunning implements Service by returning true or false depending on the // service's state. func (bs *BaseService) IsRunning() bool { @@ -210,16 +194,10 @@ func (bs *BaseService) IsRunning() bool { } // Wait blocks until the service is stopped. -func (bs *BaseService) Wait() { - <-bs.quit -} +func (bs *BaseService) Wait() { <-bs.quit } // String implements Service by returning a string representation of the service. -func (bs *BaseService) String() string { - return bs.name -} +func (bs *BaseService) String() string { return bs.name } // Quit Implements Service by returning a quit channel. -func (bs *BaseService) Quit() <-chan struct{} { - return bs.quit -} +func (bs *BaseService) Quit() <-chan struct{} { return bs.quit } diff --git a/libs/service/service_test.go b/libs/service/service_test.go index 7abc6f4fb..dc5d0ccb1 100644 --- a/libs/service/service_test.go +++ b/libs/service/service_test.go @@ -1,6 +1,7 @@ package service import ( + "context" "testing" "time" @@ -16,9 +17,12 @@ func (testService) OnReset() error { } func TestBaseServiceWait(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := &testService{} ts.BaseService = *NewBaseService(nil, "TestService", ts) - err := ts.Start() + err := ts.Start(ctx) require.NoError(t, err) waitFinished := make(chan struct{}) @@ -36,22 +40,3 @@ func TestBaseServiceWait(t *testing.T) { t.Fatal("expected Wait() to finish within 100 ms.") } } - -func TestBaseServiceReset(t *testing.T) { - ts := &testService{} - ts.BaseService = *NewBaseService(nil, "TestService", ts) - err := ts.Start() - require.NoError(t, err) - - err = ts.Reset() - require.Error(t, err, "expected cant reset service error") - - err = ts.Stop() - require.NoError(t, err) - - err = ts.Reset() - require.NoError(t, err) - - err = ts.Start() - require.NoError(t, err) -} diff --git a/libs/strings/string.go b/libs/strings/string.go index b09c00063..6cc0b18ee 100644 --- a/libs/strings/string.go +++ b/libs/strings/string.go @@ -55,6 +55,10 @@ func SplitAndTrim(s, sep, cutset string) []string { return spl } +// TrimSpace removes all leading and trailing whitespace from the +// string. +func TrimSpace(s string) string { return strings.TrimSpace(s) } + // Returns true if s is a non-empty printable non-tab ascii character. func IsASCIIText(s string) bool { if len(s) == 0 { diff --git a/light/client_test.go b/light/client_test.go index a2c9b916d..2ca00edb7 100644 --- a/light/client_test.go +++ b/light/client_test.go @@ -11,7 +11,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/internal/test/factory" diff --git a/light/doc.go b/light/doc.go index 700bbeb6c..c30c68eb0 100644 --- a/light/doc.go +++ b/light/doc.go @@ -94,7 +94,7 @@ Check out other examples in example_test.go ## 2. Pure functions to verify a new header (see verifier.go) Verify function verifies a new header against some trusted header. See -https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/verification.md +https://github.com/tendermint/spec/blob/master/spec/light-client/verification/README.md for details. There are two methods of verification: sequential and bisection @@ -118,10 +118,7 @@ as a wrapper, which verifies all the headers, using a light client connected to some other node. See -https://docs.tendermint.com/master/tendermint-core/light-client-protocol.html -for usage example. -Or see -https://github.com/tendermint/spec/tree/master/spec/consensus/light-client -for the full spec +https://github.com/tendermint/spec/tree/master/spec/light-client +for the light client specification. */ package light diff --git a/light/example_test.go b/light/example_test.go index 2e0feb5e1..1291670e2 100644 --- a/light/example_test.go +++ b/light/example_test.go @@ -2,7 +2,6 @@ package light_test import ( "context" - "io/ioutil" stdlog "log" "os" "time" @@ -22,7 +21,11 @@ import ( func ExampleClient() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf := rpctest.CreateConfig("ExampleClient_VerifyLightBlockAtHeight") + conf, err := rpctest.CreateConfig("ExampleClient_VerifyLightBlockAtHeight") + if err != nil { + stdlog.Fatal(err) + } + logger := log.TestingLogger() // Start a test application @@ -34,7 +37,7 @@ func ExampleClient() { } defer func() { _ = closer(ctx) }() - dbDir, err := ioutil.TempDir("", "light-client-example") + dbDir, err := os.MkdirTemp("", "light-client-example") if err != nil { stdlog.Fatal(err) } diff --git a/light/light_test.go b/light/light_test.go index f5d2ddd89..d88891fe9 100644 --- a/light/light_test.go +++ b/light/light_test.go @@ -2,7 +2,6 @@ package light_test import ( "context" - "io/ioutil" "os" "testing" "time" @@ -29,7 +28,8 @@ func TestClientIntegration_Update(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf := rpctest.CreateConfig(t.Name()) + conf, err := rpctest.CreateConfig(t.Name()) + require.NoError(t, err) // Start a test application app := kvstore.NewApplication() @@ -40,7 +40,7 @@ func TestClientIntegration_Update(t *testing.T) { // give Tendermint time to generate some blocks time.Sleep(5 * time.Second) - dbDir, err := ioutil.TempDir("", "light-client-test-update-example") + dbDir, err := os.MkdirTemp("", "light-client-test-update-example") require.NoError(t, err) defer os.RemoveAll(dbDir) @@ -89,7 +89,8 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf := rpctest.CreateConfig(t.Name()) + conf, err := rpctest.CreateConfig(t.Name()) + require.NoError(t, err) // Start a test application app := kvstore.NewApplication() @@ -98,7 +99,7 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) { require.NoError(t, err) defer func() { require.NoError(t, closer(ctx)) }() - dbDir, err := ioutil.TempDir("", "light-client-test-verify-example") + dbDir, err := os.MkdirTemp("", "light-client-test-verify-example") require.NoError(t, err) defer os.RemoveAll(dbDir) diff --git a/light/mbt/driver_test.go b/light/mbt/driver_test.go index bf6ab3d43..f61c3b234 100644 --- a/light/mbt/driver_test.go +++ b/light/mbt/driver_test.go @@ -1,7 +1,7 @@ package mbt import ( - "io/ioutil" + "os" "path/filepath" "testing" "time" @@ -22,7 +22,7 @@ func TestVerify(t *testing.T) { filename := filename t.Run(filename, func(t *testing.T) { - jsonBlob, err := ioutil.ReadFile(filename) + jsonBlob, err := os.ReadFile(filename) if err != nil { t.Fatal(err) } diff --git a/light/provider/http/http.go b/light/provider/http/http.go index ceea0f6d2..f8bf7d29e 100644 --- a/light/provider/http/http.go +++ b/light/provider/http/http.go @@ -12,7 +12,7 @@ import ( "github.com/tendermint/tendermint/light/provider" rpcclient "github.com/tendermint/tendermint/rpc/client" rpchttp "github.com/tendermint/tendermint/rpc/client/http" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -300,11 +300,11 @@ func (p *http) noBlock(e error) error { func (p *http) parseRPCError(e *rpctypes.RPCError) error { switch { // 1) check if the error indicates that the peer doesn't have the block - case strings.Contains(e.Data, ctypes.ErrHeightNotAvailable.Error()): + case strings.Contains(e.Data, coretypes.ErrHeightNotAvailable.Error()): return p.noBlock(provider.ErrLightBlockNotFound) // 2) check if the height requested is too high - case strings.Contains(e.Data, ctypes.ErrHeightExceedsChainHead.Error()): + case strings.Contains(e.Data, coretypes.ErrHeightExceedsChainHead.Error()): return p.noBlock(provider.ErrHeightTooHigh) // 3) check if the provider closed the connection diff --git a/light/provider/http/http_test.go b/light/provider/http/http_test.go index adcb69fb9..1f695d442 100644 --- a/light/provider/http/http_test.go +++ b/light/provider/http/http_test.go @@ -35,7 +35,8 @@ func TestNewProvider(t *testing.T) { func TestProvider(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cfg := rpctest.CreateConfig(t.Name()) + cfg, err := rpctest.CreateConfig(t.Name()) + require.NoError(t, err) // start a tendermint node in the background to test against app := kvstore.NewApplication() diff --git a/light/proxy/proxy.go b/light/proxy/proxy.go index 6f2622588..f8c183308 100644 --- a/light/proxy/proxy.go +++ b/light/proxy/proxy.go @@ -49,8 +49,8 @@ func NewProxy( // routes to proxy via Client, and starts up an HTTP server on the TCP network // address p.Addr. // See http#Server#ListenAndServe. -func (p *Proxy) ListenAndServe() error { - listener, mux, err := p.listen() +func (p *Proxy) ListenAndServe(ctx context.Context) error { + listener, mux, err := p.listen(ctx) if err != nil { return err } @@ -67,8 +67,8 @@ func (p *Proxy) ListenAndServe() error { // ListenAndServeTLS acts identically to ListenAndServe, except that it expects // HTTPS connections. // See http#Server#ListenAndServeTLS. -func (p *Proxy) ListenAndServeTLS(certFile, keyFile string) error { - listener, mux, err := p.listen() +func (p *Proxy) ListenAndServeTLS(ctx context.Context, certFile, keyFile string) error { + listener, mux, err := p.listen(ctx) if err != nil { return err } @@ -84,7 +84,7 @@ func (p *Proxy) ListenAndServeTLS(certFile, keyFile string) error { ) } -func (p *Proxy) listen() (net.Listener, *http.ServeMux, error) { +func (p *Proxy) listen(ctx context.Context) (net.Listener, *http.ServeMux, error) { mux := http.NewServeMux() // 1) Register regular routes. @@ -107,7 +107,7 @@ func (p *Proxy) listen() (net.Listener, *http.ServeMux, error) { // 3) Start a client. if !p.Client.IsRunning() { - if err := p.Client.Start(); err != nil { + if err := p.Client.Start(ctx); err != nil { return nil, mux, fmt.Errorf("can't start client: %w", err) } } diff --git a/light/proxy/routes.go b/light/proxy/routes.go index 62d70f545..436ae1b76 100644 --- a/light/proxy/routes.go +++ b/light/proxy/routes.go @@ -4,7 +4,7 @@ import ( "github.com/tendermint/tendermint/libs/bytes" lrpc "github.com/tendermint/tendermint/light/rpc" rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" @@ -52,91 +52,91 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc { } } -type rpcHealthFunc func(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) +type rpcHealthFunc func(ctx *rpctypes.Context) (*coretypes.ResultHealth, error) func makeHealthFunc(c *lrpc.Client) rpcHealthFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultHealth, error) { return c.Health(ctx.Context()) } } -type rpcStatusFunc func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) +type rpcStatusFunc func(ctx *rpctypes.Context) (*coretypes.ResultStatus, error) // nolint: interfacer func makeStatusFunc(c *lrpc.Client) rpcStatusFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultStatus, error) { return c.Status(ctx.Context()) } } -type rpcNetInfoFunc func(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) +type rpcNetInfoFunc func(ctx *rpctypes.Context) (*coretypes.ResultNetInfo, error) func makeNetInfoFunc(c *lrpc.Client) rpcNetInfoFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultNetInfo, error) { return c.NetInfo(ctx.Context()) } } -type rpcBlockchainInfoFunc func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) +type rpcBlockchainInfoFunc func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) //nolint:lll func makeBlockchainInfoFunc(c *lrpc.Client) rpcBlockchainInfoFunc { - return func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { + return func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { return c.BlockchainInfo(ctx.Context(), minHeight, maxHeight) } } -type rpcGenesisFunc func(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) +type rpcGenesisFunc func(ctx *rpctypes.Context) (*coretypes.ResultGenesis, error) func makeGenesisFunc(c *lrpc.Client) rpcGenesisFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultGenesis, error) { return c.Genesis(ctx.Context()) } } -type rpcGenesisChunkedFunc func(ctx *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) +type rpcGenesisChunkedFunc func(ctx *rpctypes.Context, chunk uint) (*coretypes.ResultGenesisChunk, error) func makeGenesisChunkedFunc(c *lrpc.Client) rpcGenesisChunkedFunc { - return func(ctx *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) { + return func(ctx *rpctypes.Context, chunk uint) (*coretypes.ResultGenesisChunk, error) { return c.GenesisChunked(ctx.Context(), chunk) } } -type rpcBlockFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlock, error) +type rpcBlockFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlock, error) func makeBlockFunc(c *lrpc.Client) rpcBlockFunc { - return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlock, error) { + return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlock, error) { return c.Block(ctx.Context(), height) } } -type rpcBlockByHashFunc func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) +type rpcBlockByHashFunc func(ctx *rpctypes.Context, hash []byte) (*coretypes.ResultBlock, error) func makeBlockByHashFunc(c *lrpc.Client) rpcBlockByHashFunc { - return func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) { + return func(ctx *rpctypes.Context, hash []byte) (*coretypes.ResultBlock, error) { return c.BlockByHash(ctx.Context(), hash) } } -type rpcBlockResultsFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlockResults, error) +type rpcBlockResultsFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlockResults, error) func makeBlockResultsFunc(c *lrpc.Client) rpcBlockResultsFunc { - return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlockResults, error) { + return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlockResults, error) { return c.BlockResults(ctx.Context(), height) } } -type rpcCommitFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCommit, error) +type rpcCommitFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultCommit, error) func makeCommitFunc(c *lrpc.Client) rpcCommitFunc { - return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCommit, error) { + return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultCommit, error) { return c.Commit(ctx.Context(), height) } } -type rpcTxFunc func(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) +type rpcTxFunc func(ctx *rpctypes.Context, hash []byte, prove bool) (*coretypes.ResultTx, error) func makeTxFunc(c *lrpc.Client) rpcTxFunc { - return func(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { + return func(ctx *rpctypes.Context, hash []byte, prove bool) (*coretypes.ResultTx, error) { return c.Tx(ctx.Context(), hash, prove) } } @@ -147,7 +147,7 @@ type rpcTxSearchFunc func( prove bool, page, perPage *int, orderBy string, -) (*ctypes.ResultTxSearch, error) +) (*coretypes.ResultTxSearch, error) func makeTxSearchFunc(c *lrpc.Client) rpcTxSearchFunc { return func( @@ -156,7 +156,7 @@ func makeTxSearchFunc(c *lrpc.Client) rpcTxSearchFunc { prove bool, page, perPage *int, orderBy string, - ) (*ctypes.ResultTxSearch, error) { + ) (*coretypes.ResultTxSearch, error) { return c.TxSearch(ctx.Context(), query, prove, page, perPage, orderBy) } } @@ -167,7 +167,7 @@ type rpcBlockSearchFunc func( prove bool, page, perPage *int, orderBy string, -) (*ctypes.ResultBlockSearch, error) +) (*coretypes.ResultBlockSearch, error) func makeBlockSearchFunc(c *lrpc.Client) rpcBlockSearchFunc { return func( @@ -176,90 +176,90 @@ func makeBlockSearchFunc(c *lrpc.Client) rpcBlockSearchFunc { prove bool, page, perPage *int, orderBy string, - ) (*ctypes.ResultBlockSearch, error) { + ) (*coretypes.ResultBlockSearch, error) { return c.BlockSearch(ctx.Context(), query, page, perPage, orderBy) } } type rpcValidatorsFunc func(ctx *rpctypes.Context, height *int64, - page, perPage *int) (*ctypes.ResultValidators, error) + page, perPage *int) (*coretypes.ResultValidators, error) func makeValidatorsFunc(c *lrpc.Client) rpcValidatorsFunc { - return func(ctx *rpctypes.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { + return func(ctx *rpctypes.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) { return c.Validators(ctx.Context(), height, page, perPage) } } -type rpcDumpConsensusStateFunc func(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) +type rpcDumpConsensusStateFunc func(ctx *rpctypes.Context) (*coretypes.ResultDumpConsensusState, error) func makeDumpConsensusStateFunc(c *lrpc.Client) rpcDumpConsensusStateFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultDumpConsensusState, error) { return c.DumpConsensusState(ctx.Context()) } } -type rpcConsensusStateFunc func(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) +type rpcConsensusStateFunc func(ctx *rpctypes.Context) (*coretypes.ResultConsensusState, error) func makeConsensusStateFunc(c *lrpc.Client) rpcConsensusStateFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultConsensusState, error) { return c.ConsensusState(ctx.Context()) } } -type rpcConsensusParamsFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultConsensusParams, error) +type rpcConsensusParamsFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultConsensusParams, error) func makeConsensusParamsFunc(c *lrpc.Client) rpcConsensusParamsFunc { - return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultConsensusParams, error) { + return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultConsensusParams, error) { return c.ConsensusParams(ctx.Context(), height) } } -type rpcUnconfirmedTxsFunc func(ctx *rpctypes.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) +type rpcUnconfirmedTxsFunc func(ctx *rpctypes.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) func makeUnconfirmedTxsFunc(c *lrpc.Client) rpcUnconfirmedTxsFunc { - return func(ctx *rpctypes.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) { + return func(ctx *rpctypes.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { return c.UnconfirmedTxs(ctx.Context(), limit) } } -type rpcNumUnconfirmedTxsFunc func(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) +type rpcNumUnconfirmedTxsFunc func(ctx *rpctypes.Context) (*coretypes.ResultUnconfirmedTxs, error) func makeNumUnconfirmedTxsFunc(c *lrpc.Client) rpcNumUnconfirmedTxsFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultUnconfirmedTxs, error) { return c.NumUnconfirmedTxs(ctx.Context()) } } -type rpcBroadcastTxCommitFunc func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) +type rpcBroadcastTxCommitFunc func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) func makeBroadcastTxCommitFunc(c *lrpc.Client) rpcBroadcastTxCommitFunc { - return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { + return func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { return c.BroadcastTxCommit(ctx.Context(), tx) } } -type rpcBroadcastTxSyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) +type rpcBroadcastTxSyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) func makeBroadcastTxSyncFunc(c *lrpc.Client) rpcBroadcastTxSyncFunc { - return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.BroadcastTxSync(ctx.Context(), tx) } } -type rpcBroadcastTxAsyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) +type rpcBroadcastTxAsyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) func makeBroadcastTxAsyncFunc(c *lrpc.Client) rpcBroadcastTxAsyncFunc { - return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.BroadcastTxAsync(ctx.Context(), tx) } } type rpcABCIQueryFunc func(ctx *rpctypes.Context, path string, - data bytes.HexBytes, height int64, prove bool) (*ctypes.ResultABCIQuery, error) + data bytes.HexBytes, height int64, prove bool) (*coretypes.ResultABCIQuery, error) func makeABCIQueryFunc(c *lrpc.Client) rpcABCIQueryFunc { return func(ctx *rpctypes.Context, path string, data bytes.HexBytes, - height int64, prove bool) (*ctypes.ResultABCIQuery, error) { + height int64, prove bool) (*coretypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(ctx.Context(), path, data, rpcclient.ABCIQueryOptions{ Height: height, @@ -268,19 +268,19 @@ func makeABCIQueryFunc(c *lrpc.Client) rpcABCIQueryFunc { } } -type rpcABCIInfoFunc func(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) +type rpcABCIInfoFunc func(ctx *rpctypes.Context) (*coretypes.ResultABCIInfo, error) func makeABCIInfoFunc(c *lrpc.Client) rpcABCIInfoFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultABCIInfo, error) { return c.ABCIInfo(ctx.Context()) } } -type rpcBroadcastEvidenceFunc func(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) +type rpcBroadcastEvidenceFunc func(ctx *rpctypes.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) // nolint: interfacer func makeBroadcastEvidenceFunc(c *lrpc.Client) rpcBroadcastEvidenceFunc { - return func(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { + return func(ctx *rpctypes.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { return c.BroadcastEvidence(ctx.Context(), ev) } } diff --git a/light/rpc/client.go b/light/rpc/client.go index 84761fb04..6143338f4 100644 --- a/light/rpc/client.go +++ b/light/rpc/client.go @@ -16,7 +16,7 @@ import ( tmmath "github.com/tendermint/tendermint/libs/math" service "github.com/tendermint/tendermint/libs/service" rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -98,9 +98,9 @@ func NewClient(next rpcclient.Client, lc LightClient, opts ...Option) *Client { return c } -func (c *Client) OnStart() error { +func (c *Client) OnStart(ctx context.Context) error { if !c.next.IsRunning() { - return c.next.Start() + return c.next.Start(ctx) } return nil } @@ -113,22 +113,22 @@ func (c *Client) OnStop() { } } -func (c *Client) Status(ctx context.Context) (*ctypes.ResultStatus, error) { +func (c *Client) Status(ctx context.Context) (*coretypes.ResultStatus, error) { return c.next.Status(ctx) } -func (c *Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { +func (c *Client) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { return c.next.ABCIInfo(ctx) } // ABCIQuery requests proof by default. -func (c *Client) ABCIQuery(ctx context.Context, path string, data tmbytes.HexBytes) (*ctypes.ResultABCIQuery, error) { +func (c *Client) ABCIQuery(ctx context.Context, path string, data tmbytes.HexBytes) (*coretypes.ResultABCIQuery, error) { //nolint:lll return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) } // ABCIQueryWithOptions returns an error if opts.Prove is false. func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmbytes.HexBytes, - opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts rpcclient.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { // always request the proof opts.Prove = true @@ -150,7 +150,7 @@ func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmb return nil, errors.New("no proof ops") } if resp.Height <= 0 { - return nil, ctypes.ErrZeroOrNegativeHeight + return nil, coretypes.ErrZeroOrNegativeHeight } // Update the light client if we're behind. @@ -185,46 +185,50 @@ func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmb } } - return &ctypes.ResultABCIQuery{Response: resp}, nil + return &coretypes.ResultABCIQuery{Response: resp}, nil } -func (c *Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (c *Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { return c.next.BroadcastTxCommit(ctx, tx) } -func (c *Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.next.BroadcastTxAsync(ctx, tx) } -func (c *Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.next.BroadcastTxSync(ctx, tx) } -func (c *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) { +func (c *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { return c.next.UnconfirmedTxs(ctx, limit) } -func (c *Client) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) { +func (c *Client) NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) { return c.next.NumUnconfirmedTxs(ctx) } -func (c *Client) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { +func (c *Client) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { return c.next.CheckTx(ctx, tx) } -func (c *Client) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { +func (c *Client) RemoveTx(ctx context.Context, txKey types.TxKey) error { + return c.next.RemoveTx(ctx, txKey) +} + +func (c *Client) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { return c.next.NetInfo(ctx) } -func (c *Client) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { +func (c *Client) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { return c.next.DumpConsensusState(ctx) } -func (c *Client) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { +func (c *Client) ConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { return c.next.ConsensusState(ctx) } -func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { +func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) { res, err := c.next.ConsensusParams(ctx, height) if err != nil { return nil, err @@ -235,7 +239,7 @@ func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.Re return nil, err } if res.BlockHeight <= 0 { - return nil, ctypes.ErrZeroOrNegativeHeight + return nil, coretypes.ErrZeroOrNegativeHeight } // Update the light client if we're behind. @@ -253,13 +257,13 @@ func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.Re return res, nil } -func (c *Client) Health(ctx context.Context) (*ctypes.ResultHealth, error) { +func (c *Client) Health(ctx context.Context) (*coretypes.ResultHealth, error) { return c.next.Health(ctx) } // BlockchainInfo calls rpcclient#BlockchainInfo and then verifies every header // returned. -func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { +func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { //nolint:lll res, err := c.next.BlockchainInfo(ctx, minHeight, maxHeight) if err != nil { return nil, err @@ -298,16 +302,16 @@ func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) return res, nil } -func (c *Client) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { +func (c *Client) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) { return c.next.Genesis(ctx) } -func (c *Client) GenesisChunked(ctx context.Context, id uint) (*ctypes.ResultGenesisChunk, error) { +func (c *Client) GenesisChunked(ctx context.Context, id uint) (*coretypes.ResultGenesisChunk, error) { return c.next.GenesisChunked(ctx, id) } // Block calls rpcclient#Block and then verifies the result. -func (c *Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { +func (c *Client) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { res, err := c.next.Block(ctx, height) if err != nil { return nil, err @@ -341,7 +345,7 @@ func (c *Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, } // BlockByHash calls rpcclient#BlockByHash and then verifies the result. -func (c *Client) BlockByHash(ctx context.Context, hash tmbytes.HexBytes) (*ctypes.ResultBlock, error) { +func (c *Client) BlockByHash(ctx context.Context, hash tmbytes.HexBytes) (*coretypes.ResultBlock, error) { res, err := c.next.BlockByHash(ctx, hash) if err != nil { return nil, err @@ -376,7 +380,7 @@ func (c *Client) BlockByHash(ctx context.Context, hash tmbytes.HexBytes) (*ctype // BlockResults returns the block results for the given height. If no height is // provided, the results of the block preceding the latest are returned. -func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) { +func (c *Client) BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) { var h int64 if height == nil { res, err := c.next.Status(ctx) @@ -397,7 +401,7 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul // Validate res. if res.Height <= 0 { - return nil, ctypes.ErrZeroOrNegativeHeight + return nil, coretypes.ErrZeroOrNegativeHeight } // Update the light client if we're behind. @@ -438,7 +442,7 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul return res, nil } -func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { +func (c *Client) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { // Update the light client if we're behind and retrieve the light block at the requested height // or at the latest height if no height is provided. l, err := c.updateLightClientIfNeededTo(ctx, height) @@ -446,7 +450,7 @@ func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommi return nil, err } - return &ctypes.ResultCommit{ + return &coretypes.ResultCommit{ SignedHeader: *l.SignedHeader, CanonicalCommit: true, }, nil @@ -454,7 +458,7 @@ func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommi // Tx calls rpcclient#Tx method and then verifies the proof if such was // requested. -func (c *Client) Tx(ctx context.Context, hash tmbytes.HexBytes, prove bool) (*ctypes.ResultTx, error) { +func (c *Client) Tx(ctx context.Context, hash tmbytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { res, err := c.next.Tx(ctx, hash, prove) if err != nil || !prove { return res, err @@ -462,7 +466,7 @@ func (c *Client) Tx(ctx context.Context, hash tmbytes.HexBytes, prove bool) (*ct // Validate res. if res.Height <= 0 { - return nil, ctypes.ErrZeroOrNegativeHeight + return nil, coretypes.ErrZeroOrNegativeHeight } // Update the light client if we're behind. @@ -481,7 +485,7 @@ func (c *Client) TxSearch( prove bool, page, perPage *int, orderBy string, -) (*ctypes.ResultTxSearch, error) { +) (*coretypes.ResultTxSearch, error) { return c.next.TxSearch(ctx, query, prove, page, perPage, orderBy) } @@ -490,7 +494,7 @@ func (c *Client) BlockSearch( query string, page, perPage *int, orderBy string, -) (*ctypes.ResultBlockSearch, error) { +) (*coretypes.ResultBlockSearch, error) { return c.next.BlockSearch(ctx, query, page, perPage, orderBy) } @@ -499,7 +503,7 @@ func (c *Client) Validators( ctx context.Context, height *int64, pagePtr, perPagePtr *int, -) (*ctypes.ResultValidators, error) { +) (*coretypes.ResultValidators, error) { // Update the light client if we're behind and retrieve the light block at the // requested height or at the latest height if no height is provided. @@ -518,19 +522,19 @@ func (c *Client) Validators( skipCount := validateSkipCount(page, perPage) v := l.ValidatorSet.Validators[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)] - return &ctypes.ResultValidators{ + return &coretypes.ResultValidators{ BlockHeight: l.Height, Validators: v, Count: len(v), Total: totalCount}, nil } -func (c *Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { +func (c *Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { return c.next.BroadcastEvidence(ctx, ev) } func (c *Client) Subscribe(ctx context.Context, subscriber, query string, - outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { + outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) { return c.next.Subscribe(ctx, subscriber, query, outCapacity...) } @@ -565,7 +569,7 @@ func (c *Client) RegisterOpDecoder(typ string, dec merkle.OpDecoder) { // SubscribeWS subscribes for events using the given query and remote address as // a subscriber, but does not verify responses (UNSAFE)! // TODO: verify data -func (c *Client) SubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { +func (c *Client) SubscribeWS(ctx *rpctypes.Context, query string) (*coretypes.ResultSubscribe, error) { out, err := c.next.Subscribe(context.Background(), ctx.RemoteAddr(), query) if err != nil { return nil, err @@ -588,27 +592,27 @@ func (c *Client) SubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.Resul } }() - return &ctypes.ResultSubscribe{}, nil + return &coretypes.ResultSubscribe{}, nil } // UnsubscribeWS calls original client's Unsubscribe using remote address as a // subscriber. -func (c *Client) UnsubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) { +func (c *Client) UnsubscribeWS(ctx *rpctypes.Context, query string) (*coretypes.ResultUnsubscribe, error) { err := c.next.Unsubscribe(context.Background(), ctx.RemoteAddr(), query) if err != nil { return nil, err } - return &ctypes.ResultUnsubscribe{}, nil + return &coretypes.ResultUnsubscribe{}, nil } // UnsubscribeAllWS calls original client's UnsubscribeAll using remote address // as a subscriber. -func (c *Client) UnsubscribeAllWS(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) { +func (c *Client) UnsubscribeAllWS(ctx *rpctypes.Context) (*coretypes.ResultUnsubscribe, error) { err := c.next.UnsubscribeAll(context.Background(), ctx.RemoteAddr()) if err != nil { return nil, err } - return &ctypes.ResultUnsubscribe{}, nil + return &coretypes.ResultUnsubscribe{}, nil } // XXX: Copied from rpc/core/env.go @@ -620,7 +624,7 @@ const ( func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { if perPage < 1 { - panic(fmt.Errorf("%w (%d)", ctypes.ErrZeroOrNegativePerPage, perPage)) + panic(fmt.Errorf("%w (%d)", coretypes.ErrZeroOrNegativePerPage, perPage)) } if pagePtr == nil { // no page parameter @@ -633,7 +637,7 @@ func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { } page := *pagePtr if page <= 0 || page > pages { - return 1, fmt.Errorf("%w expected range: [1, %d], given %d", ctypes.ErrPageOutOfRange, pages, page) + return 1, fmt.Errorf("%w expected range: [1, %d], given %d", coretypes.ErrPageOutOfRange, pages, page) } return page, nil diff --git a/light/store/db/db_test.go b/light/store/db/db_test.go index 6229a9a93..5e0739a52 100644 --- a/light/store/db/db_test.go +++ b/light/store/db/db_test.go @@ -7,7 +7,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/crypto" diff --git a/node/node.go b/node/node.go index efa69a724..7d3b56b47 100644 --- a/node/node.go +++ b/node/node.go @@ -6,22 +6,26 @@ import ( "fmt" "net" "net/http" - _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port "strconv" "time" - _ "github.com/lib/pq" // provide the psql db driver "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/rs/cors" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" - cs "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/pex" + "github.com/tendermint/tendermint/internal/proxy" + rpccore "github.com/tendermint/tendermint/internal/rpc/core" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/statesync" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" @@ -30,13 +34,12 @@ import ( tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" tmgrpc "github.com/tendermint/tendermint/privval/grpc" - "github.com/tendermint/tendermint/proxy" - rpccore "github.com/tendermint/tendermint/rpc/core" - grpccore "github.com/tendermint/tendermint/rpc/grpc" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" + + _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port + + _ "github.com/lib/pq" // provide the psql db driver ) // nodeImpl is the highest level interface to a full Tendermint node. @@ -45,22 +48,20 @@ type nodeImpl struct { service.BaseService // config - config *cfg.Config + config *config.Config genesisDoc *types.GenesisDoc // initial validator set privValidator types.PrivValidator // local node's validator key // network - transport *p2p.MConnTransport - sw *p2p.Switch // p2p connections peerManager *p2p.PeerManager router *p2p.Router - addrBook pex.AddrBook // known peers nodeInfo types.NodeInfo nodeKey types.NodeKey // our node privkey isListening bool // services - eventBus *types.EventBus // pub/sub for services + eventBus *eventbus.EventBus // pub/sub for services + eventSinks []indexer.EventSink stateStore sm.Store blockStore *store.BlockStore // store the blockchain to disk bcReactor service.Service // for block-syncing @@ -68,10 +69,11 @@ type nodeImpl struct { mempool mempool.Mempool stateSync bool // whether the node should state sync on startup stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots - consensusReactor *cs.Reactor // for participating in the consensus + consensusReactor *consensus.Reactor // for participating in the consensus pexReactor service.Service // for exchanging peer addresses evidenceReactor service.Service rpcListeners []net.Listener // rpc servers + shutdownOps closer indexerService service.Service rpcEnv *rpccore.Environment prometheusSrv *http.Server @@ -80,23 +82,27 @@ type nodeImpl struct { // newDefaultNode returns a Tendermint node with default settings for the // PrivValidator, ClientCreator, GenesisDoc, and DBProvider. // It implements NodeProvider. -func newDefaultNode(config *cfg.Config, logger log.Logger) (service.Service, error) { - nodeKey, err := types.LoadOrGenNodeKey(config.NodeKeyFile()) +func newDefaultNode( + ctx context.Context, + cfg *config.Config, + logger log.Logger, +) (service.Service, error) { + nodeKey, err := types.LoadOrGenNodeKey(cfg.NodeKeyFile()) if err != nil { - return nil, fmt.Errorf("failed to load or gen node key %s: %w", config.NodeKeyFile(), err) + return nil, fmt.Errorf("failed to load or gen node key %s: %w", cfg.NodeKeyFile(), err) } - if config.Mode == cfg.ModeSeed { - return makeSeedNode(config, - cfg.DefaultDBProvider, + if cfg.Mode == config.ModeSeed { + return makeSeedNode(cfg, + config.DefaultDBProvider, nodeKey, - defaultGenesisDocProviderFunc(config), + defaultGenesisDocProviderFunc(cfg), logger, ) } var pval *privval.FilePV - if config.Mode == cfg.ModeValidator { - pval, err = privval.LoadOrGenFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile()) + if cfg.Mode == config.ModeValidator { + pval, err = privval.LoadOrGenFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) if err != nil { return nil, err } @@ -104,98 +110,129 @@ func newDefaultNode(config *cfg.Config, logger log.Logger) (service.Service, err pval = nil } - appClient, _ := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()) - return makeNode(config, + appClient, _ := proxy.DefaultClientCreator(logger, cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) + + return makeNode( + ctx, + cfg, pval, nodeKey, appClient, - defaultGenesisDocProviderFunc(config), - cfg.DefaultDBProvider, + defaultGenesisDocProviderFunc(cfg), + config.DefaultDBProvider, logger, ) } // makeNode returns a new, ready to go, Tendermint Node. -func makeNode(config *cfg.Config, +func makeNode( + ctx context.Context, + cfg *config.Config, privValidator types.PrivValidator, nodeKey types.NodeKey, - clientCreator proxy.ClientCreator, + clientCreator abciclient.Creator, genesisDocProvider genesisDocProvider, - dbProvider cfg.DBProvider, - logger log.Logger) (service.Service, error) { + dbProvider config.DBProvider, + logger log.Logger, +) (service.Service, error) { + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) - blockStore, stateDB, err := initDBs(config, dbProvider) + closers := []closer{convertCancelCloser(cancel)} + + blockStore, stateDB, dbCloser, err := initDBs(cfg, dbProvider) if err != nil { - return nil, err + return nil, combineCloseError(err, dbCloser) } + closers = append(closers, dbCloser) + stateStore := sm.NewStore(stateDB) genDoc, err := genesisDocProvider() if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) } err = genDoc.ValidateAndComplete() if err != nil { - return nil, fmt.Errorf("error in genesis doc: %w", err) + return nil, combineCloseError( + fmt.Errorf("error in genesis doc: %w", err), + makeCloser(closers)) } state, err := loadStateFromDBOrGenesisDocProvider(stateStore, genDoc) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) } + nodeMetrics := defaultMetricsProvider(cfg.Instrumentation)(genDoc.ChainID) + // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). - proxyApp, err := createAndStartProxyAppConns(clientCreator, logger) + proxyApp, err := createAndStartProxyAppConns(ctx, clientCreator, logger, nodeMetrics.proxy) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) } // EventBus and IndexerService must be started before the handshake because // we might need to index the txs of the replayed block as this might not have happened // when the node stopped last time (i.e. the node stopped after it saved the block // but before it indexed the txs, or, endblocker panicked) - eventBus, err := createAndStartEventBus(logger) + eventBus, err := createAndStartEventBus(ctx, logger) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) } - indexerService, eventSinks, err := createAndStartIndexerService(config, dbProvider, eventBus, logger, genDoc.ChainID) + indexerService, eventSinks, err := createAndStartIndexerService( + ctx, cfg, dbProvider, eventBus, + logger, genDoc.ChainID, nodeMetrics.indexer) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) } // If an address is provided, listen on the socket for a connection from an // external signing process. - if config.PrivValidator.ListenAddr != "" { - protocol, _ := tmnet.ProtocolAndAddress(config.PrivValidator.ListenAddr) + if cfg.PrivValidator.ListenAddr != "" { + protocol, _ := tmnet.ProtocolAndAddress(cfg.PrivValidator.ListenAddr) // FIXME: we should start services inside OnStart switch protocol { case "grpc": - privValidator, err = createAndStartPrivValidatorGRPCClient(config, genDoc.ChainID, logger) + privValidator, err = createAndStartPrivValidatorGRPCClient(ctx, cfg, genDoc.ChainID, logger) if err != nil { - return nil, fmt.Errorf("error with private validator grpc client: %w", err) + return nil, combineCloseError( + fmt.Errorf("error with private validator grpc client: %w", err), + makeCloser(closers)) } default: - privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidator.ListenAddr, genDoc.ChainID, logger) + privValidator, err = createAndStartPrivValidatorSocketClient( + ctx, + cfg.PrivValidator.ListenAddr, + genDoc.ChainID, + logger, + ) if err != nil { - return nil, fmt.Errorf("error with private validator socket client: %w", err) + return nil, combineCloseError( + fmt.Errorf("error with private validator socket client: %w", err), + makeCloser(closers)) } } } var pubKey crypto.PubKey - if config.Mode == cfg.ModeValidator { - pubKey, err = privValidator.GetPubKey(context.TODO()) + if cfg.Mode == config.ModeValidator { + pubKey, err = privValidator.GetPubKey(ctx) if err != nil { - return nil, fmt.Errorf("can't get pubkey: %w", err) + return nil, combineCloseError(fmt.Errorf("can't get pubkey: %w", err), + makeCloser(closers)) + } if pubKey == nil { - return nil, errors.New("could not retrieve public key from private validator") + return nil, combineCloseError( + errors.New("could not retrieve public key from private validator"), + makeCloser(closers)) } } // Determine whether we should attempt state sync. - stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey) + stateSync := cfg.StateSync.Enable && !onlyValidatorIsUs(state, pubKey) if stateSync && state.LastBlockHeight > 0 { logger.Info("Found local state with non-zero height, skipping state sync") stateSync = false @@ -203,10 +240,12 @@ func makeNode(config *cfg.Config, // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, // and replays any blocks as necessary to sync tendermint with the app. - consensusLogger := logger.With("module", "consensus") if !stateSync { - if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { - return nil, err + if err := consensus.NewHandshaker( + logger.With("module", "handshaker"), + stateStore, state, blockStore, eventBus, genDoc, + ).Handshake(ctx, proxyApp); err != nil { + return nil, combineCloseError(err, makeCloser(closers)) } // Reload the state. It will have the Version.Consensus.App set by the @@ -214,51 +253,53 @@ func makeNode(config *cfg.Config, // what happened during block replay). state, err = stateStore.Load() if err != nil { - return nil, fmt.Errorf("cannot load state: %w", err) + return nil, combineCloseError( + fmt.Errorf("cannot load state: %w", err), + makeCloser(closers)) } } // Determine whether we should do block sync. This must happen after the handshake, since the // app may modify the validator set, specifying ourself as the only validator. - blockSync := config.BlockSync.Enable && !onlyValidatorIsUs(state, pubKey) + blockSync := !onlyValidatorIsUs(state, pubKey) - logNodeStartupInfo(state, pubKey, logger, consensusLogger, config.Mode) + logNodeStartupInfo(state, pubKey, logger, cfg.Mode) // TODO: Fetch and provide real options and do proper p2p bootstrapping. // TODO: Use a persistent peer database. - nodeInfo, err := makeNodeInfo(config, nodeKey, eventSinks, genDoc, state) + nodeInfo, err := makeNodeInfo(cfg, nodeKey, eventSinks, genDoc, state) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) } - p2pLogger := logger.With("module", "p2p") - transport := createTransport(p2pLogger, config) - - peerManager, err := createPeerManager(config, dbProvider, p2pLogger, nodeKey.ID) + peerManager, peerCloser, err := createPeerManager(cfg, dbProvider, nodeKey.ID) + closers = append(closers, peerCloser) if err != nil { - return nil, fmt.Errorf("failed to create peer manager: %w", err) + return nil, combineCloseError( + fmt.Errorf("failed to create peer manager: %w", err), + makeCloser(closers)) } - csMetrics, p2pMetrics, memplMetrics, smMetrics := defaultMetricsProvider(config.Instrumentation)(genDoc.ChainID) - - router, err := createRouter(p2pLogger, p2pMetrics, nodeInfo, nodeKey.PrivKey, - peerManager, transport, getRouterConfig(config, proxyApp)) + router, err := createRouter(logger, nodeMetrics.p2p, nodeInfo, nodeKey, + peerManager, cfg, proxyApp) if err != nil { - return nil, fmt.Errorf("failed to create router: %w", err) + return nil, combineCloseError( + fmt.Errorf("failed to create router: %w", err), + makeCloser(closers)) } - mpReactorShim, mpReactor, mp, err := createMempoolReactor( - config, proxyApp, state, memplMetrics, peerManager, router, logger, + mpReactor, mp, err := createMempoolReactor( + cfg, proxyApp, state, nodeMetrics.mempool, peerManager, router, logger, ) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) } - evReactorShim, evReactor, evPool, err := createEvidenceReactor( - config, dbProvider, stateDB, blockStore, peerManager, router, logger, + evReactor, evPool, err := createEvidenceReactor( + cfg, dbProvider, stateDB, blockStore, peerManager, router, logger, ) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) } // make block executor for consensus and blockchain reactors to execute blocks @@ -269,160 +310,87 @@ func makeNode(config *cfg.Config, mp, evPool, blockStore, - sm.BlockExecutorWithMetrics(smMetrics), + sm.BlockExecutorWithMetrics(nodeMetrics.state), ) - csReactorShim, csReactor, csState := createConsensusReactor( - config, state, blockExec, blockStore, mp, evPool, - privValidator, csMetrics, stateSync || blockSync, eventBus, - peerManager, router, consensusLogger, + csReactor, csState, err := createConsensusReactor( + cfg, state, blockExec, blockStore, mp, evPool, + privValidator, nodeMetrics.consensus, stateSync || blockSync, eventBus, + peerManager, router, logger, ) + if err != nil { + return nil, combineCloseError(err, makeCloser(closers)) + } // Create the blockchain reactor. Note, we do not start block sync if we're // doing a state sync first. - bcReactorShim, bcReactor, err := createBlockchainReactor( - logger, config, state, blockExec, blockStore, csReactor, - peerManager, router, blockSync && !stateSync, csMetrics, + bcReactor, err := createBlockchainReactor( + logger, state, blockExec, blockStore, csReactor, + peerManager, router, blockSync && !stateSync, nodeMetrics.consensus, ) if err != nil { - return nil, fmt.Errorf("could not create blockchain reactor: %w", err) - } - - // TODO: Remove this once the switch is removed. - var bcReactorForSwitch p2p.Reactor - if bcReactorShim != nil { - bcReactorForSwitch = bcReactorShim - } else { - bcReactorForSwitch = bcReactor.(p2p.Reactor) + return nil, combineCloseError( + fmt.Errorf("could not create blockchain reactor: %w", err), + makeCloser(closers)) } // Make ConsensusReactor. Don't enable fully if doing a state sync and/or block sync first. // FIXME We need to update metrics here, since other reactors don't have access to them. if stateSync { - csMetrics.StateSyncing.Set(1) + nodeMetrics.consensus.StateSyncing.Set(1) } else if blockSync { - csMetrics.BlockSyncing.Set(1) + nodeMetrics.consensus.BlockSyncing.Set(1) } // Set up state sync reactor, and schedule a sync if requested. // FIXME The way we do phased startups (e.g. replay -> block sync -> consensus) is very messy, // we should clean this whole thing up. See: // https://github.com/tendermint/tendermint/issues/4644 - var ( - stateSyncReactor *statesync.Reactor - stateSyncReactorShim *p2p.ReactorShim + ssChDesc := statesync.GetChannelDescriptors() + channels := make(map[p2p.ChannelID]*p2p.Channel, len(ssChDesc)) + for idx := range ssChDesc { + chd := ssChDesc[idx] + ch, err := router.OpenChannel(chd) + if err != nil { + return nil, err + } - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) - - stateSyncReactorShim = p2p.NewReactorShim(logger.With("module", "statesync"), "StateSyncShim", statesync.ChannelShims) - - if config.P2P.UseLegacy { - channels = getChannelsFromShim(stateSyncReactorShim) - peerUpdates = stateSyncReactorShim.PeerUpdates - } else { - channels = makeChannelsFromShims(router, statesync.ChannelShims) - peerUpdates = peerManager.Subscribe() + channels[ch.ID] = ch } - stateSyncReactor = statesync.NewReactor( + stateSyncReactor := statesync.NewReactor( genDoc.ChainID, genDoc.InitialHeight, - *config.StateSync, - stateSyncReactorShim.Logger, + *cfg.StateSync, + logger.With("module", "statesync"), proxyApp.Snapshot(), proxyApp.Query(), channels[statesync.SnapshotChannel], channels[statesync.ChunkChannel], channels[statesync.LightBlockChannel], channels[statesync.ParamsChannel], - peerUpdates, + peerManager.Subscribe(), stateStore, blockStore, - config.StateSync.TempDir, + cfg.StateSync.TempDir, + nodeMetrics.statesync, ) - // add the channel descriptors to both the transports - // FIXME: This should be removed when the legacy p2p stack is removed and - // transports can either be agnostic to channel descriptors or can be - // declared in the constructor. - transport.AddChannelDescriptors(mpReactorShim.GetChannels()) - transport.AddChannelDescriptors(bcReactorForSwitch.GetChannels()) - transport.AddChannelDescriptors(csReactorShim.GetChannels()) - transport.AddChannelDescriptors(evReactorShim.GetChannels()) - transport.AddChannelDescriptors(stateSyncReactorShim.GetChannels()) - - // Optionally, start the pex reactor - // - // TODO: - // - // We need to set Seeds and PersistentPeers on the switch, - // since it needs to be able to use these (and their DNS names) - // even if the PEX is off. We can include the DNS name in the NetAddress, - // but it would still be nice to have a clear list of the current "PersistentPeers" - // somewhere that we can return with net_info. - // - // If PEX is on, it should handle dialing the seeds. Otherwise the switch does it. - // Note we currently use the addrBook regardless at least for AddOurAddress - - var ( - pexReactor service.Service - sw *p2p.Switch - addrBook pex.AddrBook - ) - - pexCh := pex.ChannelDescriptor() - transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) - - if config.P2P.UseLegacy { - // setup Transport and Switch - sw = createSwitch( - config, transport, p2pMetrics, mpReactorShim, bcReactorForSwitch, - stateSyncReactorShim, csReactorShim, evReactorShim, proxyApp, nodeInfo, nodeKey, p2pLogger, - ) - - err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) + var pexReactor service.Service + if cfg.P2P.PexReactor { + pexReactor, err = createPEXReactor(logger, peerManager, router) if err != nil { - return nil, fmt.Errorf("could not add peers from persistent-peers field: %w", err) + return nil, combineCloseError(err, makeCloser(closers)) } - - err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) - } - - addrBook, err = createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey) - if err != nil { - return nil, fmt.Errorf("could not create addrbook: %w", err) - } - - pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger) - } else { - addrBook = nil - pexReactor, err = createPEXReactorV2(config, logger, peerManager, router) - if err != nil { - return nil, err - } - } - - if config.RPC.PprofListenAddress != "" { - go func() { - logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress) - logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil)) - }() } node := &nodeImpl{ - config: config, + config: cfg, genesisDoc: genDoc, privValidator: privValidator, - transport: transport, - sw: sw, peerManager: peerManager, router: router, - addrBook: addrBook, nodeInfo: nodeInfo, nodeKey: nodeKey, @@ -438,6 +406,9 @@ func makeNode(config *cfg.Config, evidenceReactor: evReactor, indexerService: indexerService, eventBus: eventBus, + eventSinks: eventSinks, + + shutdownOps: makeCloser(closers), rpcEnv: &rpccore.Environment{ ProxyAppQuery: proxyApp.Query(), @@ -449,9 +420,8 @@ func makeNode(config *cfg.Config, ConsensusState: csState, ConsensusReactor: csReactor, - BlockSyncReactor: bcReactor.(cs.BlockSyncReactor), + BlockSyncReactor: bcReactor.(consensus.BlockSyncReactor), - P2PPeers: sw, PeerManager: peerManager, GenDoc: genDoc, @@ -459,21 +429,10 @@ func makeNode(config *cfg.Config, EventBus: eventBus, Mempool: mp, Logger: logger.With("module", "rpc"), - Config: *config.RPC, + Config: *cfg.RPC, }, } - // this is a terrible, because typed nil interfaces are not == - // nil, so this is just cleanup to avoid having a non-nil - // value in the RPC environment that has the semantic - // properties of nil. - if sw == nil { - node.rpcEnv.P2PPeers = nil - } else if peerManager == nil { - node.rpcEnv.PeerManager = nil - } - // end hack - node.rpcEnv.P2PTransport = node node.BaseService = *service.NewBaseService(logger, "Node", node) @@ -482,12 +441,15 @@ func makeNode(config *cfg.Config, } // makeSeedNode returns a new seed node, containing only p2p, pex reactor -func makeSeedNode(config *cfg.Config, - dbProvider cfg.DBProvider, +func makeSeedNode(cfg *config.Config, + dbProvider config.DBProvider, nodeKey types.NodeKey, genesisDocProvider genesisDocProvider, logger log.Logger, ) (service.Service, error) { + if !cfg.P2P.PexReactor { + return nil, errors.New("cannot run seed nodes with PEX disabled") + } genDoc, err := genesisDocProvider() if err != nil { @@ -499,88 +461,45 @@ func makeSeedNode(config *cfg.Config, return nil, err } - nodeInfo, err := makeSeedNodeInfo(config, nodeKey, genDoc, state) + nodeInfo, err := makeSeedNodeInfo(cfg, nodeKey, genDoc, state) if err != nil { return nil, err } // Setup Transport and Switch. - p2pMetrics := p2p.PrometheusMetrics(config.Instrumentation.Namespace, "chain_id", genDoc.ChainID) - p2pLogger := logger.With("module", "p2p") - transport := createTransport(p2pLogger, config) + p2pMetrics := p2p.PrometheusMetrics(cfg.Instrumentation.Namespace, "chain_id", genDoc.ChainID) - peerManager, err := createPeerManager(config, dbProvider, p2pLogger, nodeKey.ID) + peerManager, closer, err := createPeerManager(cfg, dbProvider, nodeKey.ID) if err != nil { - return nil, fmt.Errorf("failed to create peer manager: %w", err) + return nil, combineCloseError( + fmt.Errorf("failed to create peer manager: %w", err), + closer) } - router, err := createRouter(p2pLogger, p2pMetrics, nodeInfo, nodeKey.PrivKey, - peerManager, transport, getRouterConfig(config, nil)) + router, err := createRouter(logger, p2pMetrics, nodeInfo, nodeKey, + peerManager, cfg, nil) if err != nil { - return nil, fmt.Errorf("failed to create router: %w", err) + return nil, combineCloseError( + fmt.Errorf("failed to create router: %w", err), + closer) } - var ( - pexReactor service.Service - sw *p2p.Switch - addrBook pex.AddrBook - ) - - // add the pex reactor - // FIXME: we add channel descriptors to both the router and the transport but only the router - // should be aware of channel info. We should remove this from transport once the legacy - // p2p stack is removed. - pexCh := pex.ChannelDescriptor() - transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) - - if config.P2P.UseLegacy { - sw = createSwitch( - config, transport, p2pMetrics, nil, nil, - nil, nil, nil, nil, nodeInfo, nodeKey, p2pLogger, - ) - - err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err) - } - - err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) - } - - addrBook, err = createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey) - if err != nil { - return nil, fmt.Errorf("could not create addrbook: %w", err) - } - - pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger) - } else { - pexReactor, err = createPEXReactorV2(config, logger, peerManager, router) - if err != nil { - return nil, err - } - } - - if config.RPC.PprofListenAddress != "" { - go func() { - logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress) - logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil)) - }() + pexReactor, err := createPEXReactor(logger, peerManager, router) + if err != nil { + return nil, combineCloseError(err, closer) } node := &nodeImpl{ - config: config, + config: cfg, genesisDoc: genDoc, - transport: transport, - sw: sw, - addrBook: addrBook, nodeInfo: nodeInfo, nodeKey: nodeKey, peerManager: peerManager, router: router, + shutdownOps: closer, + pexReactor: pexReactor, } node.BaseService = *service.NewBaseService(logger, "SeedNode", node) @@ -589,7 +508,17 @@ func makeSeedNode(config *cfg.Config, } // OnStart starts the Node. It implements service.Service. -func (n *nodeImpl) OnStart() error { +func (n *nodeImpl) OnStart(ctx context.Context) error { + if n.config.RPC.PprofListenAddress != "" { + // this service is not cleaned up (I believe that we'd + // need to have another thread and a potentially a + // context to get this functionality.) + go func() { + n.Logger.Info("Starting pprof server", "laddr", n.config.RPC.PprofListenAddress) + n.Logger.Error("pprof server error", "err", http.ListenAndServe(n.config.RPC.PprofListenAddress, nil)) + }() + } + now := tmtime.Now() genTime := n.genesisDoc.GenesisTime if genTime.After(now) { @@ -599,8 +528,8 @@ func (n *nodeImpl) OnStart() error { // Start the RPC server before the P2P server // so we can eg. receive txs for the first block - if n.config.RPC.ListenAddress != "" && n.config.Mode != cfg.ModeSeed { - listeners, err := n.startRPC() + if n.config.RPC.ListenAddress != "" && n.config.Mode != config.ModeSeed { + listeners, err := n.startRPC(ctx) if err != nil { return err } @@ -613,70 +542,48 @@ func (n *nodeImpl) OnStart() error { } // Start the transport. - addr, err := types.NewNetAddressString(n.nodeKey.ID.AddressString(n.config.P2P.ListenAddress)) - if err != nil { + if err := n.router.Start(ctx); err != nil { return err } - if err := n.transport.Listen(p2p.NewEndpoint(addr)); err != nil { - return err - } - n.isListening = true - n.Logger.Info("p2p service", "legacy_enabled", n.config.P2P.UseLegacy) - if n.config.P2P.UseLegacy { - // Add private IDs to addrbook to block those peers being added - n.addrBook.AddPrivateIDs(strings.SplitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " ")) - if err = n.sw.Start(); err != nil { + if n.config.Mode != config.ModeSeed { + if err := n.bcReactor.Start(ctx); err != nil { return err } - } else if err = n.router.Start(); err != nil { - return err - } - - if n.config.Mode != cfg.ModeSeed { - if n.config.BlockSync.Version == cfg.BlockSyncV0 { - if err := n.bcReactor.Start(); err != nil { - return err - } - } // Start the real consensus reactor separately since the switch uses the shim. - if err := n.consensusReactor.Start(); err != nil { + if err := n.consensusReactor.Start(ctx); err != nil { return err } // Start the real state sync reactor separately since the switch uses the shim. - if err := n.stateSyncReactor.Start(); err != nil { + if err := n.stateSyncReactor.Start(ctx); err != nil { return err } // Start the real mempool reactor separately since the switch uses the shim. - if err := n.mempoolReactor.Start(); err != nil { + if err := n.mempoolReactor.Start(ctx); err != nil { return err } // Start the real evidence reactor separately since the switch uses the shim. - if err := n.evidenceReactor.Start(); err != nil { + if err := n.evidenceReactor.Start(ctx); err != nil { return err } } - if n.config.P2P.UseLegacy { - // Always connect to persistent peers - err = n.sw.DialPeersAsync(strings.SplitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " ")) - if err != nil { - return fmt.Errorf("could not dial peers from persistent-peers field: %w", err) + if n.config.P2P.PexReactor { + if err := n.pexReactor.Start(ctx); err != nil { + return err } - } else if err := n.pexReactor.Start(); err != nil { - return err } // Run state sync // TODO: We shouldn't run state sync if we already have state that has a // LastBlockHeight that is not InitialHeight if n.stateSync { - bcR, ok := n.bcReactor.(cs.BlockSyncReactor) + bcR, ok := n.bcReactor.(consensus.BlockSyncReactor) if !ok { return fmt.Errorf("this blockchain reactor does not support switching from state sync") } @@ -700,7 +607,7 @@ func (n *nodeImpl) OnStart() error { // bubbling up the error and gracefully shutting down the rest of the node go func() { n.Logger.Info("starting state sync") - state, err := n.stateSyncReactor.Sync(context.TODO()) + state, err := n.stateSyncReactor.Sync(ctx) if err != nil { n.Logger.Error("state sync failed; shutting down this node", "err", err) // stop the node @@ -712,29 +619,32 @@ func (n *nodeImpl) OnStart() error { n.consensusReactor.SetStateSyncingMetrics(0) - d := types.EventDataStateSyncStatus{Complete: true, Height: state.LastBlockHeight} - if err := n.eventBus.PublishEventStateSyncStatus(d); err != nil { + if err := n.eventBus.PublishEventStateSyncStatus( + types.EventDataStateSyncStatus{ + Complete: true, + Height: state.LastBlockHeight, + }); err != nil { + n.eventBus.Logger.Error("failed to emit the statesync start event", "err", err) } // TODO: Some form of orchestrator is needed here between the state // advancing reactors to be able to control which one of the three // is running - if n.config.BlockSync.Enable { - // FIXME Very ugly to have these metrics bleed through here. - n.consensusReactor.SetBlockSyncingMetrics(1) - if err := bcR.SwitchToBlockSync(state); err != nil { - n.Logger.Error("failed to switch to block sync", "err", err) - return - } + // FIXME Very ugly to have these metrics bleed through here. + n.consensusReactor.SetBlockSyncingMetrics(1) + if err := bcR.SwitchToBlockSync(ctx, state); err != nil { + n.Logger.Error("failed to switch to block sync", "err", err) + return + } - d := types.EventDataBlockSyncStatus{Complete: false, Height: state.LastBlockHeight} - if err := n.eventBus.PublishEventBlockSyncStatus(d); err != nil { - n.eventBus.Logger.Error("failed to emit the block sync starting event", "err", err) - } + if err := n.eventBus.PublishEventBlockSyncStatus( + types.EventDataBlockSyncStatus{ + Complete: false, + Height: state.LastBlockHeight, + }); err != nil { - } else { - n.consensusReactor.SwitchToConsensus(state, true) + n.eventBus.Logger.Error("failed to emit the block sync starting event", "err", err) } }() } @@ -744,65 +654,30 @@ func (n *nodeImpl) OnStart() error { // OnStop stops the Node. It implements service.Service. func (n *nodeImpl) OnStop() { - n.Logger.Info("Stopping Node") - // first stop the non-reactor services - if err := n.eventBus.Stop(); err != nil { - n.Logger.Error("Error closing eventBus", "err", err) + if n.eventBus != nil { + n.eventBus.Wait() } - if err := n.indexerService.Stop(); err != nil { - n.Logger.Error("Error closing indexerService", "err", err) + if n.indexerService != nil { + n.indexerService.Wait() } - if n.config.Mode != cfg.ModeSeed { - // now stop the reactors - if n.config.BlockSync.Version == cfg.BlockSyncV0 { - // Stop the real blockchain reactor separately since the switch uses the shim. - if err := n.bcReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the blockchain reactor", "err", err) - } - } - - // Stop the real consensus reactor separately since the switch uses the shim. - if err := n.consensusReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the consensus reactor", "err", err) - } - - // Stop the real state sync reactor separately since the switch uses the shim. - if err := n.stateSyncReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the state sync reactor", "err", err) - } - - // Stop the real mempool reactor separately since the switch uses the shim. - if err := n.mempoolReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the mempool reactor", "err", err) - } - - // Stop the real evidence reactor separately since the switch uses the shim. - if err := n.evidenceReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the evidence reactor", "err", err) + for _, es := range n.eventSinks { + if err := es.Stop(); err != nil { + n.Logger.Error("failed to stop event sink", "err", err) } } - if err := n.pexReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the PEX v2 reactor", "err", err) + if n.config.Mode != config.ModeSeed { + n.bcReactor.Wait() + n.consensusReactor.Wait() + n.stateSyncReactor.Wait() + n.mempoolReactor.Wait() + n.evidenceReactor.Wait() } - - if n.config.P2P.UseLegacy { - if err := n.sw.Stop(); err != nil { - n.Logger.Error("failed to stop switch", "err", err) - } - } else { - if err := n.router.Stop(); err != nil { - n.Logger.Error("failed to stop router", "err", err) - } - } - - if err := n.transport.Close(); err != nil { - n.Logger.Error("Error closing transport", "err", err) - } - + n.pexReactor.Wait() + n.router.Wait() n.isListening = false // finally stop the listeners / external services @@ -814,9 +689,7 @@ func (n *nodeImpl) OnStop() { } if pvsc, ok := n.privValidator.(service.Service); ok { - if err := pvsc.Stop(); err != nil { - n.Logger.Error("Error closing private validator", "err", err) - } + pvsc.Wait() } if n.prometheusSrv != nil { @@ -824,12 +697,18 @@ func (n *nodeImpl) OnStop() { // Error from closing listeners, or context timeout: n.Logger.Error("Prometheus HTTP server Shutdown", "err", err) } + + } + if err := n.shutdownOps(); err != nil { + if strings.TrimSpace(err.Error()) != "" { + n.Logger.Error("problem shutting down additional services", "err", err) + } } } -func (n *nodeImpl) startRPC() ([]net.Listener, error) { - if n.config.Mode == cfg.ModeValidator { - pubKey, err := n.privValidator.GetPubKey(context.TODO()) +func (n *nodeImpl) startRPC(ctx context.Context) ([]net.Listener, error) { + if n.config.Mode == config.ModeValidator { + pubKey, err := n.privValidator.GetPubKey(ctx) if pubKey == nil || err != nil { return nil, fmt.Errorf("can't get pubkey: %w", err) } @@ -846,15 +725,15 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) { n.rpcEnv.AddUnsafe(routes) } - config := rpcserver.DefaultConfig() - config.MaxBodyBytes = n.config.RPC.MaxBodyBytes - config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes - config.MaxOpenConnections = n.config.RPC.MaxOpenConnections + cfg := rpcserver.DefaultConfig() + cfg.MaxBodyBytes = n.config.RPC.MaxBodyBytes + cfg.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes + cfg.MaxOpenConnections = n.config.RPC.MaxOpenConnections // If necessary adjust global WriteTimeout to ensure it's greater than // TimeoutBroadcastTxCommit. // See https://github.com/tendermint/tendermint/issues/3435 - if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { - config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second + if cfg.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { + cfg.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second } // we may expose the rpc over both a unix and tcp socket @@ -870,14 +749,14 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) { wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err) } }), - rpcserver.ReadLimit(config.MaxBodyBytes), + rpcserver.ReadLimit(cfg.MaxBodyBytes), ) wm.SetLogger(wmLogger) mux.HandleFunc("/websocket", wm.WebsocketHandler) rpcserver.RegisterRPCFuncs(mux, routes, rpcLogger) listener, err := rpcserver.Listen( listenAddr, - config.MaxOpenConnections, + cfg.MaxOpenConnections, ) if err != nil { return nil, err @@ -900,7 +779,7 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) { n.config.RPC.CertFile(), n.config.RPC.KeyFile(), rpcLogger, - config, + cfg, ); err != nil { n.Logger.Error("Error serving server with TLS", "err", err) } @@ -911,7 +790,7 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) { listener, rootHandler, rpcLogger, - config, + cfg, ); err != nil { n.Logger.Error("Error serving server", "err", err) } @@ -921,35 +800,7 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) { listeners[i] = listener } - // we expose a simplified api over grpc for convenience to app devs - grpcListenAddr := n.config.RPC.GRPCListenAddress - if grpcListenAddr != "" { - config := rpcserver.DefaultConfig() - config.MaxBodyBytes = n.config.RPC.MaxBodyBytes - config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes - // NOTE: GRPCMaxOpenConnections is used, not MaxOpenConnections - config.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections - // If necessary adjust global WriteTimeout to ensure it's greater than - // TimeoutBroadcastTxCommit. - // See https://github.com/tendermint/tendermint/issues/3435 - if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { - config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second - } - listener, err := rpcserver.Listen(grpcListenAddr, config.MaxOpenConnections) - if err != nil { - return nil, err - } - go func() { - if err := grpccore.StartGRPCServer(n.rpcEnv, listener); err != nil { - n.Logger.Error("Error starting gRPC server", "err", err) - } - }() - listeners = append(listeners, listener) - - } - return listeners, nil - } // startPrometheusServer starts a Prometheus HTTP server, listening for metrics @@ -974,7 +825,7 @@ func (n *nodeImpl) startPrometheusServer(addr string) *http.Server { } // ConsensusReactor returns the Node's ConsensusReactor. -func (n *nodeImpl) ConsensusReactor() *cs.Reactor { +func (n *nodeImpl) ConsensusReactor() *consensus.Reactor { return n.consensusReactor } @@ -984,7 +835,7 @@ func (n *nodeImpl) Mempool() mempool.Mempool { } // EventBus returns the Node's EventBus. -func (n *nodeImpl) EventBus() *types.EventBus { +func (n *nodeImpl) EventBus() *eventbus.EventBus { return n.eventBus } @@ -1028,26 +879,49 @@ type genesisDocProvider func() (*types.GenesisDoc, error) // defaultGenesisDocProviderFunc returns a GenesisDocProvider that loads // the GenesisDoc from the config.GenesisFile() on the filesystem. -func defaultGenesisDocProviderFunc(config *cfg.Config) genesisDocProvider { +func defaultGenesisDocProviderFunc(cfg *config.Config) genesisDocProvider { return func() (*types.GenesisDoc, error) { - return types.GenesisDocFromFile(config.GenesisFile()) + return types.GenesisDocFromFile(cfg.GenesisFile()) } } -// metricsProvider returns a consensus, p2p and mempool Metrics. -type metricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempool.Metrics, *sm.Metrics) +type nodeMetrics struct { + consensus *consensus.Metrics + indexer *indexer.Metrics + mempool *mempool.Metrics + p2p *p2p.Metrics + proxy *proxy.Metrics + state *sm.Metrics + statesync *statesync.Metrics +} + +// metricsProvider returns consensus, p2p, mempool, state, statesync Metrics. +type metricsProvider func(chainID string) *nodeMetrics // defaultMetricsProvider returns Metrics build using Prometheus client library // if Prometheus is enabled. Otherwise, it returns no-op Metrics. -func defaultMetricsProvider(config *cfg.InstrumentationConfig) metricsProvider { - return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempool.Metrics, *sm.Metrics) { - if config.Prometheus { - return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID), - p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID), - mempool.PrometheusMetrics(config.Namespace, "chain_id", chainID), - sm.PrometheusMetrics(config.Namespace, "chain_id", chainID) +func defaultMetricsProvider(cfg *config.InstrumentationConfig) metricsProvider { + return func(chainID string) *nodeMetrics { + if cfg.Prometheus { + return &nodeMetrics{ + consensus: consensus.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + indexer: indexer.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + mempool: mempool.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + p2p: p2p.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + proxy: proxy.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + state: sm.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + statesync: statesync.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + } + } + return &nodeMetrics{ + consensus: consensus.NopMetrics(), + indexer: indexer.NopMetrics(), + mempool: mempool.NopMetrics(), + p2p: p2p.NopMetrics(), + proxy: proxy.NopMetrics(), + state: sm.NopMetrics(), + statesync: statesync.NopMetrics(), } - return cs.NopMetrics(), p2p.NopMetrics(), mempool.NopMetrics(), sm.NopMetrics() } } @@ -1079,8 +953,8 @@ func loadStateFromDBOrGenesisDocProvider( } func createAndStartPrivValidatorSocketClient( - listenAddr, - chainID string, + ctx context.Context, + listenAddr, chainID string, logger log.Logger, ) (types.PrivValidator, error) { @@ -1089,13 +963,13 @@ func createAndStartPrivValidatorSocketClient( return nil, fmt.Errorf("failed to start private validator: %w", err) } - pvsc, err := privval.NewSignerClient(pve, chainID) + pvsc, err := privval.NewSignerClient(ctx, pve, chainID) if err != nil { return nil, fmt.Errorf("failed to start private validator: %w", err) } // try to get a pubkey from private validate first time - _, err = pvsc.GetPubKey(context.TODO()) + _, err = pvsc.GetPubKey(ctx) if err != nil { return nil, fmt.Errorf("can't get pubkey: %w", err) } @@ -1110,22 +984,23 @@ func createAndStartPrivValidatorSocketClient( } func createAndStartPrivValidatorGRPCClient( - config *cfg.Config, + ctx context.Context, + cfg *config.Config, chainID string, logger log.Logger, ) (types.PrivValidator, error) { pvsc, err := tmgrpc.DialRemoteSigner( - config.PrivValidator, + cfg.PrivValidator, chainID, logger, - config.Instrumentation.Prometheus, + cfg.Instrumentation.Prometheus, ) if err != nil { return nil, fmt.Errorf("failed to start private validator: %w", err) } // try to get a pubkey from private validate first time - _, err = pvsc.GetPubKey(context.TODO()) + _, err = pvsc.GetPubKey(ctx) if err != nil { return nil, fmt.Errorf("can't get pubkey: %w", err) } @@ -1133,18 +1008,14 @@ func createAndStartPrivValidatorGRPCClient( return pvsc, nil } -func getRouterConfig(conf *cfg.Config, proxyApp proxy.AppConns) p2p.RouterOptions { +func getRouterConfig(conf *config.Config, proxyApp proxy.AppConns) p2p.RouterOptions { opts := p2p.RouterOptions{ QueueType: conf.P2P.QueueType, } - if conf.P2P.MaxNumInboundPeers > 0 { - opts.MaxIncomingConnectionAttempts = conf.P2P.MaxIncomingConnectionAttempts - } - if conf.FilterPeers && proxyApp != nil { opts.FilterPeerByID = func(ctx context.Context, id types.NodeID) error { - res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{ + res, err := proxyApp.Query().QuerySync(ctx, abci.RequestQuery{ Path: fmt.Sprintf("/p2p/filter/id/%s", id), }) if err != nil { @@ -1175,31 +1046,3 @@ func getRouterConfig(conf *cfg.Config, proxyApp proxy.AppConns) p2p.RouterOption return opts } - -// FIXME: Temporary helper function, shims should be removed. -func makeChannelsFromShims( - router *p2p.Router, - chShims map[p2p.ChannelID]*p2p.ChannelDescriptorShim, -) map[p2p.ChannelID]*p2p.Channel { - - channels := map[p2p.ChannelID]*p2p.Channel{} - for chID, chShim := range chShims { - ch, err := router.OpenChannel(*chShim.Descriptor, chShim.MsgType, chShim.Descriptor.RecvBufferCapacity) - if err != nil { - panic(fmt.Sprintf("failed to open channel %v: %v", chID, err)) - } - - channels[chID] = ch - } - - return channels -} - -func getChannelsFromShim(reactorShim *p2p.ReactorShim) map[p2p.ChannelID]*p2p.Channel { - channels := map[p2p.ChannelID]*p2p.Channel{} - for chID := range reactorShim.Channels { - channels[chID] = reactorShim.GetChannel(chID) - } - - return channels -} diff --git a/node/node_test.go b/node/node_test.go index 297862366..ac4507df8 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -7,115 +7,142 @@ import ( "math" "net" "os" - "syscall" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" - "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/mempool" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" + "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/pubsub" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/libs/service" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) func TestNodeStartStop(t *testing.T) { - config := cfg.ResetTestRoot("node_node_test") + cfg, err := config.ResetTestRoot("node_node_test") + require.NoError(t, err) - defer os.RemoveAll(config.RootDir) + defer os.RemoveAll(cfg.RootDir) + + ctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() // create & start node - ns, err := newDefaultNode(config, log.TestingLogger()) + ns, err := newDefaultNode(ctx, cfg, log.TestingLogger()) require.NoError(t, err) - require.NoError(t, ns.Start()) + require.NoError(t, ns.Start(ctx)) + + t.Cleanup(func() { + if ns.IsRunning() { + bcancel() + ns.Wait() + } + }) n, ok := ns.(*nodeImpl) require.True(t, ok) // wait for the node to produce a block - blocksSub, err := n.EventBus().Subscribe(context.Background(), "node_test", types.EventQueryNewBlock) + blocksSub, err := n.EventBus().SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: "node_test", + Query: types.EventQueryNewBlock, + }) require.NoError(t, err) - select { - case <-blocksSub.Out(): - case <-blocksSub.Canceled(): - t.Fatal("blocksSub was canceled") - case <-time.After(10 * time.Second): - t.Fatal("timed out waiting for the node to produce a block") + tctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + if _, err := blocksSub.Next(tctx); err != nil { + t.Fatalf("Waiting for event: %v", err) } // stop the node go func() { - err = n.Stop() - require.NoError(t, err) + bcancel() + n.Wait() }() select { case <-n.Quit(): - case <-time.After(5 * time.Second): - pid := os.Getpid() - p, err := os.FindProcess(pid) - if err != nil { - panic(err) + return + case <-time.After(10 * time.Second): + if n.IsRunning() { + t.Fatal("timed out waiting for shutdown") } - err = p.Signal(syscall.SIGABRT) - fmt.Println(err) - t.Fatal("timed out waiting for shutdown") + } } -func getTestNode(t *testing.T, conf *cfg.Config, logger log.Logger) *nodeImpl { +func getTestNode(ctx context.Context, t *testing.T, conf *config.Config, logger log.Logger) *nodeImpl { t.Helper() - ns, err := newDefaultNode(conf, logger) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + ns, err := newDefaultNode(ctx, conf, logger) require.NoError(t, err) n, ok := ns.(*nodeImpl) require.True(t, ok) + + t.Cleanup(func() { + cancel() + if n.IsRunning() { + ns.Wait() + } + }) + return n } func TestNodeDelayedStart(t *testing.T) { - config := cfg.ResetTestRoot("node_delayed_start_test") - defer os.RemoveAll(config.RootDir) + cfg, err := config.ResetTestRoot("node_delayed_start_test") + require.NoError(t, err) + + defer os.RemoveAll(cfg.RootDir) now := tmtime.Now() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // create & start node - n := getTestNode(t, config, log.TestingLogger()) + n := getTestNode(ctx, t, cfg, log.TestingLogger()) n.GenesisDoc().GenesisTime = now.Add(2 * time.Second) - require.NoError(t, n.Start()) - defer n.Stop() //nolint:errcheck // ignore for tests + require.NoError(t, n.Start(ctx)) startTime := tmtime.Now() assert.Equal(t, true, startTime.After(n.GenesisDoc().GenesisTime)) } func TestNodeSetAppVersion(t *testing.T) { - config := cfg.ResetTestRoot("node_app_version_test") - defer os.RemoveAll(config.RootDir) + cfg, err := config.ResetTestRoot("node_app_version_test") + require.NoError(t, err) + defer os.RemoveAll(cfg.RootDir) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // create node - n := getTestNode(t, config, log.TestingLogger()) + n := getTestNode(ctx, t, cfg, log.TestingLogger()) // default config uses the kvstore app - var appVersion uint64 = kvstore.ProtocolVersion + appVersion := kvstore.ProtocolVersion // check version is set in state state, err := n.stateStore.Load() @@ -129,9 +156,13 @@ func TestNodeSetAppVersion(t *testing.T) { func TestNodeSetPrivValTCP(t *testing.T) { addr := "tcp://" + testFreeAddr(t) - config := cfg.ResetTestRoot("node_priv_val_tcp_test") - defer os.RemoveAll(config.RootDir) - config.PrivValidator.ListenAddr = addr + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := config.ResetTestRoot("node_priv_val_tcp_test") + require.NoError(t, err) + defer os.RemoveAll(cfg.RootDir) + cfg.PrivValidator.ListenAddr = addr dialer := privval.DialTCPFn(addr, 100*time.Millisecond, ed25519.GenPrivKey()) dialerEndpoint := privval.NewSignerDialerEndpoint( @@ -142,41 +173,54 @@ func TestNodeSetPrivValTCP(t *testing.T) { signerServer := privval.NewSignerServer( dialerEndpoint, - config.ChainID(), + cfg.ChainID(), types.NewMockPV(), ) go func() { - err := signerServer.Start() + err := signerServer.Start(ctx) if err != nil { panic(err) } }() defer signerServer.Stop() //nolint:errcheck // ignore for tests - n := getTestNode(t, config, log.TestingLogger()) + n := getTestNode(ctx, t, cfg, log.TestingLogger()) assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator()) } // address without a protocol must result in error func TestPrivValidatorListenAddrNoProtocol(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + addrNoPrefix := testFreeAddr(t) - config := cfg.ResetTestRoot("node_priv_val_tcp_test") - defer os.RemoveAll(config.RootDir) - config.PrivValidator.ListenAddr = addrNoPrefix + cfg, err := config.ResetTestRoot("node_priv_val_tcp_test") + require.NoError(t, err) + defer os.RemoveAll(cfg.RootDir) + cfg.PrivValidator.ListenAddr = addrNoPrefix + n, err := newDefaultNode(ctx, cfg, log.TestingLogger()) - _, err := newDefaultNode(config, log.TestingLogger()) assert.Error(t, err) + + if n != nil && n.IsRunning() { + cancel() + n.Wait() + } } func TestNodeSetPrivValIPC(t *testing.T) { tmpfile := "/tmp/kms." + tmrand.Str(6) + ".sock" defer os.Remove(tmpfile) // clean up - config := cfg.ResetTestRoot("node_priv_val_tcp_test") - defer os.RemoveAll(config.RootDir) - config.PrivValidator.ListenAddr = "unix://" + tmpfile + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := config.ResetTestRoot("node_priv_val_tcp_test") + require.NoError(t, err) + defer os.RemoveAll(cfg.RootDir) + cfg.PrivValidator.ListenAddr = "unix://" + tmpfile dialer := privval.DialUnixFn(tmpfile) dialerEndpoint := privval.NewSignerDialerEndpoint( @@ -187,16 +231,16 @@ func TestNodeSetPrivValIPC(t *testing.T) { pvsc := privval.NewSignerServer( dialerEndpoint, - config.ChainID(), + cfg.ChainID(), types.NewMockPV(), ) go func() { - err := pvsc.Start() + err := pvsc.Start(ctx) require.NoError(t, err) }() defer pvsc.Stop() //nolint:errcheck // ignore for tests - n := getTestNode(t, config, log.TestingLogger()) + n := getTestNode(ctx, t, cfg, log.TestingLogger()) assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator()) } @@ -212,18 +256,22 @@ func testFreeAddr(t *testing.T) string { // create a proposal block using real and full // mempool and evidence pool and validate it. func TestCreateProposalBlock(t *testing.T) { - config := cfg.ResetTestRoot("node_create_proposal") - defer os.RemoveAll(config.RootDir) - cc := proxy.NewLocalClientCreator(kvstore.NewApplication()) - proxyApp := proxy.NewAppConns(cc) - err := proxyApp.Start() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := config.ResetTestRoot("node_create_proposal") + require.NoError(t, err) + defer os.RemoveAll(cfg.RootDir) + + cc := abciclient.NewLocalCreator(kvstore.NewApplication()) + proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics()) + err = proxyApp.Start(ctx) require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests logger := log.TestingLogger() const height int64 = 1 - state, stateDB, privVals := state(1, height) + state, stateDB, privVals := state(t, 1, height) stateStore := sm.NewStore(stateDB) maxBytes := 16384 const partSize uint32 = 256 @@ -232,15 +280,12 @@ func TestCreateProposalBlock(t *testing.T) { state.ConsensusParams.Evidence.MaxBytes = maxEvidenceBytes proposerAddr, _ := state.Validators.GetByIndex(0) - mp := mempoolv0.NewCListMempool( - config.Mempool, + mp := mempool.NewTxMempool( + logger.With("module", "mempool"), + cfg.Mempool, proxyApp.Mempool(), state.LastBlockHeight, - mempoolv0.WithMetrics(mempool.NopMetrics()), - mempoolv0.WithPreCheck(sm.TxPreCheck(state)), - mempoolv0.WithPostCheck(sm.TxPostCheck(state)), ) - mp.SetLogger(logger) // Make EvidencePool evidenceDB := dbm.NewMemDB() @@ -250,7 +295,7 @@ func TestCreateProposalBlock(t *testing.T) { // fill the evidence pool with more evidence // than can fit in a block - var currentBytes int64 = 0 + var currentBytes int64 for currentBytes <= maxEvidenceBytes { ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, time.Now(), privVals[0], "test-chain") currentBytes += int64(len(ev.Bytes())) @@ -267,7 +312,7 @@ func TestCreateProposalBlock(t *testing.T) { txLength := 100 for i := 0; i <= maxBytes/txLength; i++ { tx := tmrand.Bytes(txLength) - err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) + err := mp.CheckTx(ctx, tx, nil, mempool.TxInfo{}) assert.NoError(t, err) } @@ -304,18 +349,22 @@ func TestCreateProposalBlock(t *testing.T) { } func TestMaxTxsProposalBlockSize(t *testing.T) { - config := cfg.ResetTestRoot("node_create_proposal") - defer os.RemoveAll(config.RootDir) - cc := proxy.NewLocalClientCreator(kvstore.NewApplication()) - proxyApp := proxy.NewAppConns(cc) - err := proxyApp.Start() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := config.ResetTestRoot("node_create_proposal") + require.NoError(t, err) + + defer os.RemoveAll(cfg.RootDir) + cc := abciclient.NewLocalCreator(kvstore.NewApplication()) + proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics()) + err = proxyApp.Start(ctx) require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests logger := log.TestingLogger() const height int64 = 1 - state, stateDB, _ := state(1, height) + state, stateDB, _ := state(t, 1, height) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) const maxBytes int64 = 16384 @@ -324,20 +373,18 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { proposerAddr, _ := state.Validators.GetByIndex(0) // Make Mempool - mp := mempoolv0.NewCListMempool( - config.Mempool, + + mp := mempool.NewTxMempool( + logger.With("module", "mempool"), + cfg.Mempool, proxyApp.Mempool(), state.LastBlockHeight, - mempoolv0.WithMetrics(mempool.NopMetrics()), - mempoolv0.WithPreCheck(sm.TxPreCheck(state)), - mempoolv0.WithPostCheck(sm.TxPostCheck(state)), ) - mp.SetLogger(logger) // fill the mempool with one txs just below the maximum size txLength := int(types.MaxDataBytesNoEvidence(maxBytes, 1)) tx := tmrand.Bytes(txLength - 4) // to account for the varint - err = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) + err = mp.CheckTx(ctx, tx, nil, mempool.TxInfo{}) assert.NoError(t, err) blockExec := sm.NewBlockExecutor( @@ -366,17 +413,20 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { } func TestMaxProposalBlockSize(t *testing.T) { - config := cfg.ResetTestRoot("node_create_proposal") - defer os.RemoveAll(config.RootDir) - cc := proxy.NewLocalClientCreator(kvstore.NewApplication()) - proxyApp := proxy.NewAppConns(cc) - err := proxyApp.Start() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := config.ResetTestRoot("node_create_proposal") + require.NoError(t, err) + defer os.RemoveAll(cfg.RootDir) + cc := abciclient.NewLocalCreator(kvstore.NewApplication()) + proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics()) + err = proxyApp.Start(ctx) require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests logger := log.TestingLogger() - state, stateDB, _ := state(types.MaxVotesCount, int64(1)) + state, stateDB, _ := state(t, types.MaxVotesCount, int64(1)) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) const maxBytes int64 = 1024 * 1024 * 2 @@ -384,26 +434,23 @@ func TestMaxProposalBlockSize(t *testing.T) { proposerAddr, _ := state.Validators.GetByIndex(0) // Make Mempool - mp := mempoolv0.NewCListMempool( - config.Mempool, + mp := mempool.NewTxMempool( + logger.With("module", "mempool"), + cfg.Mempool, proxyApp.Mempool(), state.LastBlockHeight, - mempoolv0.WithMetrics(mempool.NopMetrics()), - mempoolv0.WithPreCheck(sm.TxPreCheck(state)), - mempoolv0.WithPostCheck(sm.TxPostCheck(state)), ) - mp.SetLogger(logger) // fill the mempool with one txs just below the maximum size txLength := int(types.MaxDataBytesNoEvidence(maxBytes, types.MaxVotesCount)) tx := tmrand.Bytes(txLength - 6) // to account for the varint - err = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) + err = mp.CheckTx(ctx, tx, nil, mempool.TxInfo{}) assert.NoError(t, err) // now produce more txs than what a normal block can hold with 10 smaller txs // At the end of the test, only the single big tx should be added for i := 0; i < 10; i++ { tx := tmrand.Bytes(10) - err = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) + err = mp.CheckTx(ctx, tx, nil, mempool.TxInfo{}) assert.NoError(t, err) } @@ -481,92 +528,126 @@ func TestMaxProposalBlockSize(t *testing.T) { } func TestNodeNewSeedNode(t *testing.T) { - config := cfg.ResetTestRoot("node_new_node_custom_reactors_test") - config.Mode = cfg.ModeSeed - defer os.RemoveAll(config.RootDir) + cfg, err := config.ResetTestRoot("node_new_node_custom_reactors_test") + require.NoError(t, err) + cfg.Mode = config.ModeSeed + defer os.RemoveAll(cfg.RootDir) - nodeKey, err := types.LoadOrGenNodeKey(config.NodeKeyFile()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + nodeKey, err := types.LoadOrGenNodeKey(cfg.NodeKeyFile()) require.NoError(t, err) - ns, err := makeSeedNode(config, - cfg.DefaultDBProvider, + ns, err := makeSeedNode(cfg, + config.DefaultDBProvider, nodeKey, - defaultGenesisDocProviderFunc(config), + defaultGenesisDocProviderFunc(cfg), log.TestingLogger(), ) + t.Cleanup(ns.Wait) + require.NoError(t, err) n, ok := ns.(*nodeImpl) require.True(t, ok) - err = n.Start() + err = n.Start(ctx) require.NoError(t, err) - assert.True(t, n.pexReactor.IsRunning()) + + cancel() + n.Wait() + + assert.False(t, n.pexReactor.IsRunning()) } func TestNodeSetEventSink(t *testing.T) { - config := cfg.ResetTestRoot("node_app_version_test") - defer os.RemoveAll(config.RootDir) + cfg, err := config.ResetTestRoot("node_app_version_test") + require.NoError(t, err) + + defer os.RemoveAll(cfg.RootDir) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() logger := log.TestingLogger() - setupTest := func(t *testing.T, conf *cfg.Config) []indexer.EventSink { - eventBus, err := createAndStartEventBus(logger) + setupTest := func(t *testing.T, conf *config.Config) []indexer.EventSink { + eventBus, err := createAndStartEventBus(ctx, logger) + require.NoError(t, err) + t.Cleanup(eventBus.Wait) + genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) require.NoError(t, err) - genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) + indexService, eventSinks, err := createAndStartIndexerService(ctx, cfg, + config.DefaultDBProvider, eventBus, logger, genDoc.ChainID, + indexer.NopMetrics()) require.NoError(t, err) - - indexService, eventSinks, err := createAndStartIndexerService(config, - cfg.DefaultDBProvider, eventBus, logger, genDoc.ChainID) - require.NoError(t, err) - t.Cleanup(func() { require.NoError(t, indexService.Stop()) }) + t.Cleanup(indexService.Wait) return eventSinks } + cleanup := func(ns service.Service) func() { + return func() { + n, ok := ns.(*nodeImpl) + if !ok { + return + } + if n == nil { + return + } + if !n.IsRunning() { + return + } + cancel() + n.Wait() + } + } - eventSinks := setupTest(t, config) + eventSinks := setupTest(t, cfg) assert.Equal(t, 1, len(eventSinks)) assert.Equal(t, indexer.KV, eventSinks[0].Type()) - config.TxIndex.Indexer = []string{"null"} - eventSinks = setupTest(t, config) + cfg.TxIndex.Indexer = []string{"null"} + eventSinks = setupTest(t, cfg) assert.Equal(t, 1, len(eventSinks)) assert.Equal(t, indexer.NULL, eventSinks[0].Type()) - config.TxIndex.Indexer = []string{"null", "kv"} - eventSinks = setupTest(t, config) + cfg.TxIndex.Indexer = []string{"null", "kv"} + eventSinks = setupTest(t, cfg) assert.Equal(t, 1, len(eventSinks)) assert.Equal(t, indexer.NULL, eventSinks[0].Type()) - config.TxIndex.Indexer = []string{"kvv"} - ns, err := newDefaultNode(config, logger) + cfg.TxIndex.Indexer = []string{"kvv"} + ns, err := newDefaultNode(ctx, cfg, logger) assert.Nil(t, ns) - assert.Equal(t, errors.New("unsupported event sink type"), err) + assert.Contains(t, err.Error(), "unsupported event sink type") + t.Cleanup(cleanup(ns)) - config.TxIndex.Indexer = []string{} - eventSinks = setupTest(t, config) + cfg.TxIndex.Indexer = []string{} + eventSinks = setupTest(t, cfg) assert.Equal(t, 1, len(eventSinks)) assert.Equal(t, indexer.NULL, eventSinks[0].Type()) - config.TxIndex.Indexer = []string{"psql"} - ns, err = newDefaultNode(config, logger) + cfg.TxIndex.Indexer = []string{"psql"} + ns, err = newDefaultNode(ctx, cfg, logger) assert.Nil(t, ns) - assert.Equal(t, errors.New("the psql connection settings cannot be empty"), err) + assert.Contains(t, err.Error(), "the psql connection settings cannot be empty") + t.Cleanup(cleanup(ns)) var psqlConn = "test" - config.TxIndex.Indexer = []string{"psql"} - config.TxIndex.PsqlConn = psqlConn - eventSinks = setupTest(t, config) + cfg.TxIndex.Indexer = []string{"psql"} + cfg.TxIndex.PsqlConn = psqlConn + eventSinks = setupTest(t, cfg) assert.Equal(t, 1, len(eventSinks)) assert.Equal(t, indexer.PSQL, eventSinks[0].Type()) - config.TxIndex.Indexer = []string{"psql", "kv"} - config.TxIndex.PsqlConn = psqlConn - eventSinks = setupTest(t, config) + cfg.TxIndex.Indexer = []string{"psql", "kv"} + cfg.TxIndex.PsqlConn = psqlConn + eventSinks = setupTest(t, cfg) assert.Equal(t, 2, len(eventSinks)) // we use map to filter the duplicated sinks, so it's not guarantee the order when append sinks. @@ -577,9 +658,9 @@ func TestNodeSetEventSink(t *testing.T) { assert.Equal(t, indexer.KV, eventSinks[1].Type()) } - config.TxIndex.Indexer = []string{"kv", "psql"} - config.TxIndex.PsqlConn = psqlConn - eventSinks = setupTest(t, config) + cfg.TxIndex.Indexer = []string{"kv", "psql"} + cfg.TxIndex.PsqlConn = psqlConn + eventSinks = setupTest(t, cfg) assert.Equal(t, 2, len(eventSinks)) if eventSinks[0].Type() == indexer.KV { @@ -590,20 +671,23 @@ func TestNodeSetEventSink(t *testing.T) { } var e = errors.New("found duplicated sinks, please check the tx-index section in the config.toml") - config.TxIndex.Indexer = []string{"psql", "kv", "Kv"} - config.TxIndex.PsqlConn = psqlConn - _, err = newDefaultNode(config, logger) + cfg.TxIndex.Indexer = []string{"psql", "kv", "Kv"} + cfg.TxIndex.PsqlConn = psqlConn + ns, err = newDefaultNode(ctx, cfg, logger) require.Error(t, err) - assert.Equal(t, e, err) + assert.Contains(t, err.Error(), e.Error()) + t.Cleanup(cleanup(ns)) - config.TxIndex.Indexer = []string{"Psql", "kV", "kv", "pSql"} - config.TxIndex.PsqlConn = psqlConn - _, err = newDefaultNode(config, logger) + cfg.TxIndex.Indexer = []string{"Psql", "kV", "kv", "pSql"} + cfg.TxIndex.PsqlConn = psqlConn + ns, err = newDefaultNode(ctx, cfg, logger) require.Error(t, err) - assert.Equal(t, e, err) + assert.Contains(t, err.Error(), e.Error()) + t.Cleanup(cleanup(ns)) } -func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { +func state(t *testing.T, nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { + t.Helper() privVals := make([]types.PrivValidator, nVals) vals := make([]types.GenesisValidator, nVals) for i := 0; i < nVals; i++ { @@ -624,17 +708,15 @@ func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { // save validators to db for 2 heights stateDB := dbm.NewMemDB() + t.Cleanup(func() { require.NoError(t, stateDB.Close()) }) + stateStore := sm.NewStore(stateDB) - if err := stateStore.Save(s); err != nil { - panic(err) - } + require.NoError(t, stateStore.Save(s)) for i := 1; i < int(height); i++ { s.LastBlockHeight++ s.LastValidators = s.Validators.Copy() - if err := stateStore.Save(s); err != nil { - panic(err) - } + require.NoError(t, stateStore.Save(s)) } return s, stateDB, privVals } @@ -648,14 +730,15 @@ func loadStatefromGenesis(t *testing.T) sm.State { stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) - config := cfg.ResetTestRoot("load_state_from_genesis") + cfg, err := config.ResetTestRoot("load_state_from_genesis") + require.NoError(t, err) loadedState, err := stateStore.Load() require.NoError(t, err) require.True(t, loadedState.IsEmpty()) valSet, _ := factory.ValidatorSet(0, 10) - genDoc := factory.GenesisDoc(config, time.Now(), valSet.Validators, nil) + genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil) state, err := loadStateFromDBOrGenesisDocProvider( stateStore, diff --git a/node/public.go b/node/public.go index 99a8226d0..87007bdfc 100644 --- a/node/public.go +++ b/node/public.go @@ -2,13 +2,14 @@ package node import ( + "context" "fmt" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -16,8 +17,12 @@ import ( // process that host their own process-local tendermint node. This is // equivalent to running tendermint in it's own process communicating // to an external ABCI application. -func NewDefault(conf *config.Config, logger log.Logger) (service.Service, error) { - return newDefaultNode(conf, logger) +func NewDefault( + ctx context.Context, + conf *config.Config, + logger log.Logger, +) (service.Service, error) { + return newDefaultNode(ctx, conf, logger) } // New constructs a tendermint node. The ClientCreator makes it @@ -26,9 +31,11 @@ func NewDefault(conf *config.Config, logger log.Logger) (service.Service, error) // Genesis document: if the value is nil, the genesis document is read // from the file specified in the config, and otherwise the node uses // value of the final argument. -func New(conf *config.Config, +func New( + ctx context.Context, + conf *config.Config, logger log.Logger, - cf proxy.ClientCreator, + cf abciclient.Creator, gen *types.GenesisDoc, ) (service.Service, error) { nodeKey, err := types.LoadOrGenNodeKey(conf.NodeKeyFile()) @@ -51,7 +58,9 @@ func New(conf *config.Config, return nil, err } - return makeNode(conf, + return makeNode( + ctx, + conf, pval, nodeKey, cf, diff --git a/node/setup.go b/node/setup.go index 1a7c1b3b2..6ca991484 100644 --- a/node/setup.go +++ b/node/setup.go @@ -5,110 +5,146 @@ import ( "context" "errors" "fmt" - "math" - "net" - _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port + "strings" "time" dbm "github.com/tendermint/tm-db" - abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + abciclient "github.com/tendermint/tendermint/abci/client" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" - bcv0 "github.com/tendermint/tendermint/internal/blocksync/v0" - bcv2 "github.com/tendermint/tendermint/internal/blocksync/v2" - cs "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/blocksync" + "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/mempool" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" - mempoolv1 "github.com/tendermint/tendermint/internal/mempool/v1" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/internal/p2p/pex" + "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer/sink" "github.com/tendermint/tendermint/internal/statesync" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" tmstrings "github.com/tendermint/tendermint/libs/strings" - protop2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/state/indexer/sink" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" + + _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port ) -func initDBs(config *cfg.Config, dbProvider cfg.DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) { - var blockStoreDB dbm.DB - blockStoreDB, err = dbProvider(&cfg.DBContext{ID: "blockstore", Config: config}) - if err != nil { - return - } - blockStore = store.NewBlockStore(blockStoreDB) +type closer func() error - stateDB, err = dbProvider(&cfg.DBContext{ID: "state", Config: config}) - return +func makeCloser(cs []closer) closer { + return func() error { + errs := make([]string, 0, len(cs)) + for _, cl := range cs { + if err := cl(); err != nil { + errs = append(errs, err.Error()) + } + } + if len(errs) >= 0 { + return errors.New(strings.Join(errs, "; ")) + } + return nil + } } -func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) { - proxyApp := proxy.NewAppConns(clientCreator) - proxyApp.SetLogger(logger.With("module", "proxy")) - if err := proxyApp.Start(); err != nil { +func convertCancelCloser(cancel context.CancelFunc) closer { + return func() error { cancel(); return nil } +} + +func combineCloseError(err error, cl closer) error { + if err == nil { + return cl() + } + + clerr := cl() + if clerr == nil { + return err + } + + return fmt.Errorf("error=%q closerError=%q", err.Error(), clerr.Error()) +} + +func initDBs( + cfg *config.Config, + dbProvider config.DBProvider, +) (*store.BlockStore, dbm.DB, closer, error) { + + blockStoreDB, err := dbProvider(&config.DBContext{ID: "blockstore", Config: cfg}) + if err != nil { + return nil, nil, func() error { return nil }, err + } + closers := []closer{} + blockStore := store.NewBlockStore(blockStoreDB) + closers = append(closers, blockStoreDB.Close) + + stateDB, err := dbProvider(&config.DBContext{ID: "state", Config: cfg}) + if err != nil { + return nil, nil, makeCloser(closers), err + } + + closers = append(closers, stateDB.Close) + + return blockStore, stateDB, makeCloser(closers), nil +} + +func createAndStartProxyAppConns( + ctx context.Context, + clientCreator abciclient.Creator, + logger log.Logger, + metrics *proxy.Metrics, +) (proxy.AppConns, error) { + proxyApp := proxy.NewAppConns(clientCreator, logger.With("module", "proxy"), metrics) + + if err := proxyApp.Start(ctx); err != nil { return nil, fmt.Errorf("error starting proxy app connections: %v", err) } + return proxyApp, nil } -func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) { - eventBus := types.NewEventBus() - eventBus.SetLogger(logger.With("module", "events")) - if err := eventBus.Start(); err != nil { +func createAndStartEventBus(ctx context.Context, logger log.Logger) (*eventbus.EventBus, error) { + eventBus := eventbus.NewDefault(logger.With("module", "events")) + if err := eventBus.Start(ctx); err != nil { return nil, err } return eventBus, nil } func createAndStartIndexerService( - config *cfg.Config, - dbProvider cfg.DBProvider, - eventBus *types.EventBus, + ctx context.Context, + cfg *config.Config, + dbProvider config.DBProvider, + eventBus *eventbus.EventBus, logger log.Logger, chainID string, + metrics *indexer.Metrics, ) (*indexer.Service, []indexer.EventSink, error) { - eventSinks, err := sink.EventSinksFromConfig(config, dbProvider, chainID) + eventSinks, err := sink.EventSinksFromConfig(cfg, dbProvider, chainID) if err != nil { return nil, nil, err } - indexerService := indexer.NewIndexerService(eventSinks, eventBus) - indexerService.SetLogger(logger.With("module", "txindex")) + indexerService := indexer.NewService(indexer.ServiceArgs{ + Sinks: eventSinks, + EventBus: eventBus, + Logger: logger.With("module", "txindex"), + Metrics: metrics, + }) - if err := indexerService.Start(); err != nil { + if err := indexerService.Start(ctx); err != nil { return nil, nil, err } return indexerService, eventSinks, nil } -func doHandshake( - stateStore sm.Store, - state sm.State, - blockStore sm.BlockStore, - genDoc *types.GenesisDoc, - eventBus types.BlockEventPublisher, - proxyApp proxy.AppConns, - consensusLogger log.Logger) error { - - handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc) - handshaker.SetLogger(consensusLogger) - handshaker.SetEventBus(eventBus) - if err := handshaker.Handshake(proxyApp); err != nil { - return fmt.Errorf("error during handshake: %v", err) - } - return nil -} - -func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger, mode string) { +func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger log.Logger, mode string) { // Log the version info. logger.Info("Version info", "tmVersion", version.TMVersion, @@ -124,17 +160,23 @@ func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusL "state", state.Version.Consensus.Block, ) } - switch { - case mode == cfg.ModeFull: - consensusLogger.Info("This node is a fullnode") - case mode == cfg.ModeValidator: + + switch mode { + case config.ModeFull: + logger.Info("This node is a fullnode") + case config.ModeValidator: addr := pubKey.Address() // Log whether this node is a validator or an observer if state.Validators.HasAddress(addr) { - consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey.Bytes()) + logger.Info("This node is a validator", + "addr", addr, + "pubKey", pubKey.Bytes(), + ) } else { - consensusLogger.Info("This node is a validator (NOT in the active validator set)", - "addr", addr, "pubKey", pubKey.Bytes()) + logger.Info("This node is a validator (NOT in the active validator set)", + "addr", addr, + "pubKey", pubKey.Bytes(), + ) } } } @@ -148,297 +190,215 @@ func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool { } func createMempoolReactor( - config *cfg.Config, + cfg *config.Config, proxyApp proxy.AppConns, state sm.State, memplMetrics *mempool.Metrics, peerManager *p2p.PeerManager, router *p2p.Router, logger log.Logger, -) (*p2p.ReactorShim, service.Service, mempool.Mempool, error) { +) (service.Service, mempool.Mempool, error) { - logger = logger.With("module", "mempool", "version", config.Mempool.Version) - channelShims := mempoolv0.GetChannelShims(config.Mempool) - reactorShim := p2p.NewReactorShim(logger, "MempoolShim", channelShims) + logger = logger.With("module", "mempool") - var ( - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates + ch, err := router.OpenChannel(mempool.GetChannelDescriptor(cfg.Mempool)) + if err != nil { + return nil, nil, err + } + + mp := mempool.NewTxMempool( + logger, + cfg.Mempool, + proxyApp.Mempool(), + state.LastBlockHeight, + mempool.WithMetrics(memplMetrics), + mempool.WithPreCheck(sm.TxPreCheck(state)), + mempool.WithPostCheck(sm.TxPostCheck(state)), ) - if config.P2P.UseLegacy { - channels = getChannelsFromShim(reactorShim) - peerUpdates = reactorShim.PeerUpdates - } else { - channels = makeChannelsFromShims(router, channelShims) - peerUpdates = peerManager.Subscribe() + reactor := mempool.NewReactor( + logger, + cfg.Mempool, + peerManager, + mp, + ch, + peerManager.Subscribe(), + ) + + if cfg.Consensus.WaitForTxs() { + mp.EnableTxsAvailable() } - switch config.Mempool.Version { - case cfg.MempoolV0: - mp := mempoolv0.NewCListMempool( - config.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, - mempoolv0.WithMetrics(memplMetrics), - mempoolv0.WithPreCheck(sm.TxPreCheck(state)), - mempoolv0.WithPostCheck(sm.TxPostCheck(state)), - ) - - mp.SetLogger(logger) - - reactor := mempoolv0.NewReactor( - logger, - config.Mempool, - peerManager, - mp, - channels[mempool.MempoolChannel], - peerUpdates, - ) - - if config.Consensus.WaitForTxs() { - mp.EnableTxsAvailable() - } - - return reactorShim, reactor, mp, nil - - case cfg.MempoolV1: - mp := mempoolv1.NewTxMempool( - logger, - config.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, - mempoolv1.WithMetrics(memplMetrics), - mempoolv1.WithPreCheck(sm.TxPreCheck(state)), - mempoolv1.WithPostCheck(sm.TxPostCheck(state)), - ) - - reactor := mempoolv1.NewReactor( - logger, - config.Mempool, - peerManager, - mp, - channels[mempool.MempoolChannel], - peerUpdates, - ) - - if config.Consensus.WaitForTxs() { - mp.EnableTxsAvailable() - } - - return reactorShim, reactor, mp, nil - - default: - return nil, nil, nil, fmt.Errorf("unknown mempool version: %s", config.Mempool.Version) - } + return reactor, mp, nil } func createEvidenceReactor( - config *cfg.Config, - dbProvider cfg.DBProvider, + cfg *config.Config, + dbProvider config.DBProvider, stateDB dbm.DB, blockStore *store.BlockStore, peerManager *p2p.PeerManager, router *p2p.Router, logger log.Logger, -) (*p2p.ReactorShim, *evidence.Reactor, *evidence.Pool, error) { - evidenceDB, err := dbProvider(&cfg.DBContext{ID: "evidence", Config: config}) +) (*evidence.Reactor, *evidence.Pool, error) { + evidenceDB, err := dbProvider(&config.DBContext{ID: "evidence", Config: cfg}) if err != nil { - return nil, nil, nil, err + return nil, nil, err } logger = logger.With("module", "evidence") - reactorShim := p2p.NewReactorShim(logger, "EvidenceShim", evidence.ChannelShims) evidencePool, err := evidence.NewPool(logger, evidenceDB, sm.NewStore(stateDB), blockStore) if err != nil { - return nil, nil, nil, fmt.Errorf("creating evidence pool: %w", err) + return nil, nil, fmt.Errorf("creating evidence pool: %w", err) } - var ( - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) - - if config.P2P.UseLegacy { - channels = getChannelsFromShim(reactorShim) - peerUpdates = reactorShim.PeerUpdates - } else { - channels = makeChannelsFromShims(router, evidence.ChannelShims) - peerUpdates = peerManager.Subscribe() + ch, err := router.OpenChannel(evidence.GetChannelDescriptor()) + if err != nil { + return nil, nil, fmt.Errorf("creating evidence channel: %w", err) } evidenceReactor := evidence.NewReactor( logger, - channels[evidence.EvidenceChannel], - peerUpdates, + ch, + peerManager.Subscribe(), evidencePool, ) - return reactorShim, evidenceReactor, evidencePool, nil + return evidenceReactor, evidencePool, nil } func createBlockchainReactor( logger log.Logger, - config *cfg.Config, state sm.State, blockExec *sm.BlockExecutor, blockStore *store.BlockStore, - csReactor *cs.Reactor, + csReactor *consensus.Reactor, peerManager *p2p.PeerManager, router *p2p.Router, blockSync bool, - metrics *cs.Metrics, -) (*p2p.ReactorShim, service.Service, error) { + metrics *consensus.Metrics, +) (service.Service, error) { logger = logger.With("module", "blockchain") - switch config.BlockSync.Version { - case cfg.BlockSyncV0: - reactorShim := p2p.NewReactorShim(logger, "BlockchainShim", bcv0.ChannelShims) - - var ( - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) - - if config.P2P.UseLegacy { - channels = getChannelsFromShim(reactorShim) - peerUpdates = reactorShim.PeerUpdates - } else { - channels = makeChannelsFromShims(router, bcv0.ChannelShims) - peerUpdates = peerManager.Subscribe() - } - - reactor, err := bcv0.NewReactor( - logger, state.Copy(), blockExec, blockStore, csReactor, - channels[bcv0.BlockSyncChannel], peerUpdates, blockSync, - metrics, - ) - if err != nil { - return nil, nil, err - } - - return reactorShim, reactor, nil - - case cfg.BlockSyncV2: - return nil, nil, errors.New("block sync version v2 is no longer supported. Please use v0") - - default: - return nil, nil, fmt.Errorf("unknown block sync version %s", config.BlockSync.Version) + ch, err := router.OpenChannel(blocksync.GetChannelDescriptor()) + if err != nil { + return nil, err } + + peerUpdates := peerManager.Subscribe() + + reactor, err := blocksync.NewReactor( + logger, state.Copy(), blockExec, blockStore, csReactor, + ch, peerUpdates, blockSync, + metrics, + ) + if err != nil { + return nil, err + } + + return reactor, nil } func createConsensusReactor( - config *cfg.Config, + cfg *config.Config, state sm.State, blockExec *sm.BlockExecutor, blockStore sm.BlockStore, mp mempool.Mempool, evidencePool *evidence.Pool, privValidator types.PrivValidator, - csMetrics *cs.Metrics, + csMetrics *consensus.Metrics, waitSync bool, - eventBus *types.EventBus, + eventBus *eventbus.EventBus, peerManager *p2p.PeerManager, router *p2p.Router, logger log.Logger, -) (*p2p.ReactorShim, *cs.Reactor, *cs.State) { +) (*consensus.Reactor, *consensus.State, error) { + logger = logger.With("module", "consensus") - consensusState := cs.NewState( - config.Consensus, + consensusState := consensus.NewState( + logger, + cfg.Consensus, state.Copy(), blockExec, blockStore, mp, evidencePool, - cs.StateMetrics(csMetrics), + consensus.StateMetrics(csMetrics), ) - consensusState.SetLogger(logger) - if privValidator != nil && config.Mode == cfg.ModeValidator { + + if privValidator != nil && cfg.Mode == config.ModeValidator { consensusState.SetPrivValidator(privValidator) } - reactorShim := p2p.NewReactorShim(logger, "ConsensusShim", cs.ChannelShims) + csChDesc := consensus.GetChannelDescriptors() + channels := make(map[p2p.ChannelID]*p2p.Channel, len(csChDesc)) + for idx := range csChDesc { + chd := csChDesc[idx] + ch, err := router.OpenChannel(chd) + if err != nil { + return nil, nil, err + } - var ( - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) - - if config.P2P.UseLegacy { - channels = getChannelsFromShim(reactorShim) - peerUpdates = reactorShim.PeerUpdates - } else { - channels = makeChannelsFromShims(router, cs.ChannelShims) - peerUpdates = peerManager.Subscribe() + channels[ch.ID] = ch } - reactor := cs.NewReactor( + reactor := consensus.NewReactor( logger, consensusState, - channels[cs.StateChannel], - channels[cs.DataChannel], - channels[cs.VoteChannel], - channels[cs.VoteSetBitsChannel], - peerUpdates, + channels[consensus.StateChannel], + channels[consensus.DataChannel], + channels[consensus.VoteChannel], + channels[consensus.VoteSetBitsChannel], + peerManager.Subscribe(), waitSync, - cs.ReactorMetrics(csMetrics), + consensus.ReactorMetrics(csMetrics), ) // Services which will be publishing and/or subscribing for messages (events) // consensusReactor will set it on consensusState and blockExecutor. reactor.SetEventBus(eventBus) - return reactorShim, reactor, consensusState + return reactor, consensusState, nil } -func createTransport(logger log.Logger, config *cfg.Config) *p2p.MConnTransport { +func createTransport(logger log.Logger, cfg *config.Config) *p2p.MConnTransport { + conf := conn.DefaultMConnConfig() + conf.FlushThrottle = cfg.P2P.FlushThrottleTimeout + conf.SendRate = cfg.P2P.SendRate + conf.RecvRate = cfg.P2P.RecvRate + conf.MaxPacketMsgPayloadSize = cfg.P2P.MaxPacketMsgPayloadSize + return p2p.NewMConnTransport( - logger, p2p.MConnConfig(config.P2P), []*p2p.ChannelDescriptor{}, + logger, conf, []*p2p.ChannelDescriptor{}, p2p.MConnTransportOptions{ - MaxAcceptedConnections: uint32(config.P2P.MaxNumInboundPeers + - len(tmstrings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")), - ), + MaxAcceptedConnections: uint32(cfg.P2P.MaxConnections), }, ) } func createPeerManager( - config *cfg.Config, - dbProvider cfg.DBProvider, - p2pLogger log.Logger, + cfg *config.Config, + dbProvider config.DBProvider, nodeID types.NodeID, -) (*p2p.PeerManager, error) { +) (*p2p.PeerManager, closer, error) { + + privatePeerIDs := make(map[types.NodeID]struct{}) + for _, id := range tmstrings.SplitAndTrimEmpty(cfg.P2P.PrivatePeerIDs, ",", " ") { + privatePeerIDs[types.NodeID(id)] = struct{}{} + } var maxConns uint16 switch { - case config.P2P.MaxConnections > 0: - maxConns = config.P2P.MaxConnections - - case config.P2P.MaxNumInboundPeers > 0 && config.P2P.MaxNumOutboundPeers > 0: - x := config.P2P.MaxNumInboundPeers + config.P2P.MaxNumOutboundPeers - if x > math.MaxUint16 { - return nil, fmt.Errorf( - "max inbound peers (%d) + max outbound peers (%d) exceeds maximum (%d)", - config.P2P.MaxNumInboundPeers, - config.P2P.MaxNumOutboundPeers, - math.MaxUint16, - ) - } - - maxConns = uint16(x) - + case cfg.P2P.MaxConnections > 0: + maxConns = cfg.P2P.MaxConnections default: maxConns = 64 } - privatePeerIDs := make(map[types.NodeID]struct{}) - for _, id := range tmstrings.SplitAndTrimEmpty(config.P2P.PrivatePeerIDs, ",", " ") { - privatePeerIDs[types.NodeID(id)] = struct{}{} - } - options := p2p.PeerManagerOptions{ MaxConnected: maxConns, MaxConnectedUpgrade: 4, @@ -451,218 +411,89 @@ func createPeerManager( } peers := []p2p.NodeAddress{} - for _, p := range tmstrings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ") { + for _, p := range tmstrings.SplitAndTrimEmpty(cfg.P2P.PersistentPeers, ",", " ") { address, err := p2p.ParseNodeAddress(p) if err != nil { - return nil, fmt.Errorf("invalid peer address %q: %w", p, err) + return nil, func() error { return nil }, fmt.Errorf("invalid peer address %q: %w", p, err) } peers = append(peers, address) options.PersistentPeers = append(options.PersistentPeers, address.NodeID) } - for _, p := range tmstrings.SplitAndTrimEmpty(config.P2P.BootstrapPeers, ",", " ") { + for _, p := range tmstrings.SplitAndTrimEmpty(cfg.P2P.BootstrapPeers, ",", " ") { address, err := p2p.ParseNodeAddress(p) if err != nil { - return nil, fmt.Errorf("invalid peer address %q: %w", p, err) + return nil, func() error { return nil }, fmt.Errorf("invalid peer address %q: %w", p, err) } peers = append(peers, address) } - peerDB, err := dbProvider(&cfg.DBContext{ID: "peerstore", Config: config}) + peerDB, err := dbProvider(&config.DBContext{ID: "peerstore", Config: cfg}) if err != nil { - return nil, err + return nil, func() error { return nil }, err } peerManager, err := p2p.NewPeerManager(nodeID, peerDB, options) if err != nil { - return nil, fmt.Errorf("failed to create peer manager: %w", err) + return nil, peerDB.Close, fmt.Errorf("failed to create peer manager: %w", err) } for _, peer := range peers { if _, err := peerManager.Add(peer); err != nil { - return nil, fmt.Errorf("failed to add peer %q: %w", peer, err) + return nil, peerDB.Close, fmt.Errorf("failed to add peer %q: %w", peer, err) } } - return peerManager, nil + return peerManager, peerDB.Close, nil } func createRouter( - p2pLogger log.Logger, + logger log.Logger, p2pMetrics *p2p.Metrics, nodeInfo types.NodeInfo, - privKey crypto.PrivKey, + nodeKey types.NodeKey, peerManager *p2p.PeerManager, - transport p2p.Transport, - options p2p.RouterOptions, + conf *config.Config, + proxyApp proxy.AppConns, ) (*p2p.Router, error) { + p2pLogger := logger.With("module", "p2p") + transport := createTransport(p2pLogger, conf) + + ep, err := p2p.NewEndpoint(nodeKey.ID.AddressString(conf.P2P.ListenAddress)) + if err != nil { + return nil, err + } + return p2p.NewRouter( p2pLogger, p2pMetrics, nodeInfo, - privKey, + nodeKey.PrivKey, peerManager, []p2p.Transport{transport}, - options, + []p2p.Endpoint{ep}, + getRouterConfig(conf, proxyApp), ) } -func createSwitch( - config *cfg.Config, - transport p2p.Transport, - p2pMetrics *p2p.Metrics, - mempoolReactor *p2p.ReactorShim, - bcReactor p2p.Reactor, - stateSyncReactor *p2p.ReactorShim, - consensusReactor *p2p.ReactorShim, - evidenceReactor *p2p.ReactorShim, - proxyApp proxy.AppConns, - nodeInfo types.NodeInfo, - nodeKey types.NodeKey, - p2pLogger log.Logger, -) *p2p.Switch { - - var ( - connFilters = []p2p.ConnFilterFunc{} - peerFilters = []p2p.PeerFilterFunc{} - ) - - if !config.P2P.AllowDuplicateIP { - connFilters = append(connFilters, p2p.ConnDuplicateIPFilter) - } - - // Filter peers by addr or pubkey with an ABCI query. - // If the query return code is OK, add peer. - if config.FilterPeers { - connFilters = append( - connFilters, - // ABCI query for address filtering. - func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error { - res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{ - Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()), - }) - if err != nil { - return err - } - if res.IsErr() { - return fmt.Errorf("error querying abci app: %v", res) - } - - return nil - }, - ) - - peerFilters = append( - peerFilters, - // ABCI query for ID filtering. - func(_ p2p.IPeerSet, p p2p.Peer) error { - res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{ - Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()), - }) - if err != nil { - return err - } - if res.IsErr() { - return fmt.Errorf("error querying abci app: %v", res) - } - - return nil - }, - ) - } - - sw := p2p.NewSwitch( - config.P2P, - transport, - p2p.WithMetrics(p2pMetrics), - p2p.SwitchPeerFilters(peerFilters...), - p2p.SwitchConnFilters(connFilters...), - ) - - sw.SetLogger(p2pLogger) - if config.Mode != cfg.ModeSeed { - sw.AddReactor("MEMPOOL", mempoolReactor) - sw.AddReactor("BLOCKCHAIN", bcReactor) - sw.AddReactor("CONSENSUS", consensusReactor) - sw.AddReactor("EVIDENCE", evidenceReactor) - sw.AddReactor("STATESYNC", stateSyncReactor) - } - - sw.SetNodeInfo(nodeInfo) - sw.SetNodeKey(nodeKey) - - p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID, "file", config.NodeKeyFile()) - return sw -} - -func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch, - p2pLogger log.Logger, nodeKey types.NodeKey) (pex.AddrBook, error) { - - addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) - addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) - - // Add ourselves to addrbook to prevent dialing ourselves - if config.P2P.ExternalAddress != "" { - addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(config.P2P.ExternalAddress)) - if err != nil { - return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err) - } - addrBook.AddOurAddress(addr) - } - if config.P2P.ListenAddress != "" { - addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(config.P2P.ListenAddress)) - if err != nil { - return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err) - } - addrBook.AddOurAddress(addr) - } - - sw.SetAddrBook(addrBook) - - return addrBook, nil -} - -func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, - sw *p2p.Switch, logger log.Logger) *pex.Reactor { - - reactorConfig := &pex.ReactorConfig{ - Seeds: tmstrings.SplitAndTrimEmpty(config.P2P.Seeds, ",", " "), - SeedMode: config.Mode == cfg.ModeSeed, - // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000 - // blocks assuming 10s blocks ~ 28 hours. - // TODO (melekes): make it dynamic based on the actual block latencies - // from the live network. - // https://github.com/tendermint/tendermint/issues/3523 - SeedDisconnectWaitPeriod: 28 * time.Hour, - PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod, - } - // TODO persistent peers ? so we can have their DNS addrs saved - pexReactor := pex.NewReactor(addrBook, reactorConfig) - pexReactor.SetLogger(logger.With("module", "pex")) - sw.AddReactor("PEX", pexReactor) - return pexReactor -} - -func createPEXReactorV2( - config *cfg.Config, +func createPEXReactor( logger log.Logger, peerManager *p2p.PeerManager, router *p2p.Router, ) (service.Service, error) { - channel, err := router.OpenChannel(pex.ChannelDescriptor(), &protop2p.PexMessage{}, 128) + channel, err := router.OpenChannel(pex.ChannelDescriptor()) if err != nil { return nil, err } - peerUpdates := peerManager.Subscribe() - return pex.NewReactorV2(logger, peerManager, channel, peerUpdates), nil + return pex.NewReactor(logger, peerManager, channel, peerManager.Subscribe()), nil } func makeNodeInfo( - config *cfg.Config, + cfg *config.Config, nodeKey types.NodeKey, eventSinks []indexer.EventSink, genDoc *types.GenesisDoc, @@ -674,17 +505,7 @@ func makeNodeInfo( txIndexerStatus = "on" } - var bcChannel byte - switch config.BlockSync.Version { - case cfg.BlockSyncV0: - bcChannel = byte(bcv0.BlockSyncChannel) - - case cfg.BlockSyncV2: - bcChannel = bcv2.BlockchainChannel - - default: - return types.NodeInfo{}, fmt.Errorf("unknown blocksync version %s", config.BlockSync.Version) - } + bcChannel := byte(blocksync.BlockSyncChannel) nodeInfo := types.NodeInfo{ ProtocolVersion: types.ProtocolVersion{ @@ -697,10 +518,10 @@ func makeNodeInfo( Version: version.TMVersion, Channels: []byte{ bcChannel, - byte(cs.StateChannel), - byte(cs.DataChannel), - byte(cs.VoteChannel), - byte(cs.VoteSetBitsChannel), + byte(consensus.StateChannel), + byte(consensus.DataChannel), + byte(consensus.VoteChannel), + byte(consensus.VoteSetBitsChannel), byte(mempool.MempoolChannel), byte(evidence.EvidenceChannel), byte(statesync.SnapshotChannel), @@ -708,21 +529,21 @@ func makeNodeInfo( byte(statesync.LightBlockChannel), byte(statesync.ParamsChannel), }, - Moniker: config.Moniker, + Moniker: cfg.Moniker, Other: types.NodeInfoOther{ TxIndex: txIndexerStatus, - RPCAddress: config.RPC.ListenAddress, + RPCAddress: cfg.RPC.ListenAddress, }, } - if config.P2P.PexReactor { + if cfg.P2P.PexReactor { nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) } - lAddr := config.P2P.ExternalAddress + lAddr := cfg.P2P.ExternalAddress if lAddr == "" { - lAddr = config.P2P.ListenAddress + lAddr = cfg.P2P.ListenAddress } nodeInfo.ListenAddr = lAddr @@ -732,7 +553,7 @@ func makeNodeInfo( } func makeSeedNodeInfo( - config *cfg.Config, + cfg *config.Config, nodeKey types.NodeKey, genDoc *types.GenesisDoc, state sm.State, @@ -747,21 +568,21 @@ func makeSeedNodeInfo( Network: genDoc.ChainID, Version: version.TMVersion, Channels: []byte{}, - Moniker: config.Moniker, + Moniker: cfg.Moniker, Other: types.NodeInfoOther{ TxIndex: "off", - RPCAddress: config.RPC.ListenAddress, + RPCAddress: cfg.RPC.ListenAddress, }, } - if config.P2P.PexReactor { + if cfg.P2P.PexReactor { nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) } - lAddr := config.P2P.ExternalAddress + lAddr := cfg.P2P.ExternalAddress if lAddr == "" { - lAddr = config.P2P.ListenAddress + lAddr = cfg.P2P.ListenAddress } nodeInfo.ListenAddr = lAddr diff --git a/privval/file.go b/privval/file.go index 4ec918c70..6a8484557 100644 --- a/privval/file.go +++ b/privval/file.go @@ -5,7 +5,7 @@ import ( "context" "errors" "fmt" - "io/ioutil" + "os" "time" "github.com/gogo/protobuf/proto" @@ -200,7 +200,7 @@ func LoadFilePVEmptyState(keyFilePath, stateFilePath string) (*FilePV, error) { // If loadState is true, we load from the stateFilePath. Otherwise, we use an empty LastSignState. func loadFilePV(keyFilePath, stateFilePath string, loadState bool) (*FilePV, error) { - keyJSONBytes, err := ioutil.ReadFile(keyFilePath) + keyJSONBytes, err := os.ReadFile(keyFilePath) if err != nil { return nil, err } @@ -218,7 +218,7 @@ func loadFilePV(keyFilePath, stateFilePath string, loadState bool) (*FilePV, err pvState := FilePVLastSignState{} if loadState { - stateJSONBytes, err := ioutil.ReadFile(stateFilePath) + stateJSONBytes, err := os.ReadFile(stateFilePath) if err != nil { return nil, err } diff --git a/privval/file_test.go b/privval/file_test.go index 680428ac2..c4314a367 100644 --- a/privval/file_test.go +++ b/privval/file_test.go @@ -4,7 +4,6 @@ import ( "context" "encoding/base64" "fmt" - "io/ioutil" "os" "testing" "time" @@ -24,9 +23,9 @@ import ( func TestGenLoadValidator(t *testing.T) { assert := assert.New(t) - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") require.Nil(t, err) privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") @@ -44,9 +43,9 @@ func TestGenLoadValidator(t *testing.T) { } func TestResetValidator(t *testing.T) { - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") require.Nil(t, err) privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") @@ -76,9 +75,9 @@ func TestResetValidator(t *testing.T) { func TestLoadOrGenValidator(t *testing.T) { assert := assert.New(t) - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") require.Nil(t, err) tempKeyFilePath := tempKeyFile.Name() @@ -165,9 +164,9 @@ func TestUnmarshalValidatorKey(t *testing.T) { func TestSignVote(t *testing.T) { assert := assert.New(t) - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") require.Nil(t, err) privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") @@ -219,9 +218,9 @@ func TestSignVote(t *testing.T) { func TestSignProposal(t *testing.T) { assert := assert.New(t) - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") require.Nil(t, err) privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") @@ -268,9 +267,9 @@ func TestSignProposal(t *testing.T) { } func TestDifferByTimestamp(t *testing.T) { - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") require.Nil(t, err) privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") diff --git a/privval/grpc/client.go b/privval/grpc/client.go index 77f3930aa..f4c0b7d99 100644 --- a/privval/grpc/client.go +++ b/privval/grpc/client.go @@ -7,7 +7,7 @@ import ( "google.golang.org/grpc/status" "github.com/tendermint/tendermint/crypto" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/libs/log" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -62,7 +62,7 @@ func (sc *SignerClient) GetPubKey(ctx context.Context) (crypto.PubKey, error) { return nil, errStatus.Err() } - pk, err := cryptoenc.PubKeyFromProto(resp.PubKey) + pk, err := encoding.PubKeyFromProto(resp.PubKey) if err != nil { return nil, err } diff --git a/privval/grpc/server.go b/privval/grpc/server.go index f5c434b1b..13e0c9073 100644 --- a/privval/grpc/server.go +++ b/privval/grpc/server.go @@ -7,7 +7,7 @@ import ( "google.golang.org/grpc/status" "github.com/tendermint/tendermint/crypto" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/libs/log" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" "github.com/tendermint/tendermint/types" @@ -44,7 +44,7 @@ func (ss *SignerServer) GetPubKey(ctx context.Context, req *privvalproto.PubKeyR return nil, status.Errorf(codes.NotFound, "error getting pubkey: %v", err) } - pk, err := cryptoenc.PubKeyToProto(pubKey) + pk, err := encoding.PubKeyToProto(pubKey) if err != nil { return nil, status.Errorf(codes.Internal, "error transitioning pubkey to proto: %v", err) } diff --git a/privval/grpc/util.go b/privval/grpc/util.go index 62647542c..7e0483f9c 100644 --- a/privval/grpc/util.go +++ b/privval/grpc/util.go @@ -4,17 +4,16 @@ import ( "context" "crypto/tls" "crypto/x509" - "io/ioutil" "os" "time" grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" - grpc "google.golang.org/grpc" + "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" ) @@ -65,7 +64,7 @@ func GenerateTLS(certPath, keyPath, ca string, log log.Logger) grpc.DialOption { } certPool := x509.NewCertPool() - bs, err := ioutil.ReadFile(ca) + bs, err := os.ReadFile(ca) if err != nil { log.Error("failed to read ca cert:", "error", err) os.Exit(1) @@ -88,15 +87,15 @@ func GenerateTLS(certPath, keyPath, ca string, log log.Logger) grpc.DialOption { // DialRemoteSigner is a generalized function to dial the gRPC server. func DialRemoteSigner( - config *cfg.PrivValidatorConfig, + cfg *config.PrivValidatorConfig, chainID string, logger log.Logger, usePrometheus bool, ) (*SignerClient, error) { var transportSecurity grpc.DialOption - if config.AreSecurityOptionsPresent() { - transportSecurity = GenerateTLS(config.ClientCertificateFile(), - config.ClientKeyFile(), config.RootCAFile(), logger) + if cfg.AreSecurityOptionsPresent() { + transportSecurity = GenerateTLS(cfg.ClientCertificateFile(), + cfg.ClientKeyFile(), cfg.RootCAFile(), logger) } else { transportSecurity = grpc.WithInsecure() logger.Info("Using an insecure gRPC connection!") @@ -110,8 +109,8 @@ func DialRemoteSigner( dialOptions = append(dialOptions, transportSecurity) - ctx := context.Background() - _, address := tmnet.ProtocolAndAddress(config.ListenAddr) + ctx := context.TODO() + _, address := tmnet.ProtocolAndAddress(cfg.ListenAddr) conn, err := grpc.DialContext(ctx, address, dialOptions...) if err != nil { logger.Error("unable to connect to server", "target", address, "err", err) diff --git a/privval/msgs_test.go b/privval/msgs_test.go index bf532bd7b..7ac9f2c5d 100644 --- a/privval/msgs_test.go +++ b/privval/msgs_test.go @@ -10,7 +10,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/crypto/tmhash" cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" privproto "github.com/tendermint/tendermint/proto/tendermint/privval" @@ -60,7 +60,7 @@ func exampleProposal() *types.Proposal { // nolint:lll // ignore line length for tests func TestPrivvalVectors(t *testing.T) { pk := ed25519.GenPrivKeyFromSecret([]byte("it's a secret")).PubKey() - ppk, err := cryptoenc.PubKeyToProto(pk) + ppk, err := encoding.PubKeyToProto(pk) require.NoError(t, err) // Generate a simple vote diff --git a/privval/secret_connection.go b/privval/secret_connection.go index 8847f91db..ffa5d36ed 100644 --- a/privval/secret_connection.go +++ b/privval/secret_connection.go @@ -23,7 +23,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/internal/libs/protoio" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/async" @@ -408,7 +408,7 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte // Send our info and receive theirs in tandem. var trs, _ = async.Parallel( func(_ int) (val interface{}, abort bool, err error) { - pbpk, err := cryptoenc.PubKeyToProto(pubKey) + pbpk, err := encoding.PubKeyToProto(pubKey) if err != nil { return nil, true, err } @@ -425,7 +425,7 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte return nil, true, err // abort } - pk, err := cryptoenc.PubKeyFromProto(pba.PubKey) + pk, err := encoding.PubKeyFromProto(pba.PubKey) if err != nil { return nil, true, err // abort } diff --git a/privval/signer_client.go b/privval/signer_client.go index d25584c8f..ec6d95ca6 100644 --- a/privval/signer_client.go +++ b/privval/signer_client.go @@ -6,7 +6,7 @@ import ( "time" "github.com/tendermint/tendermint/crypto" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -23,9 +23,9 @@ var _ types.PrivValidator = (*SignerClient)(nil) // NewSignerClient returns an instance of SignerClient. // it will start the endpoint (if not already started) -func NewSignerClient(endpoint *SignerListenerEndpoint, chainID string) (*SignerClient, error) { +func NewSignerClient(ctx context.Context, endpoint *SignerListenerEndpoint, chainID string) (*SignerClient, error) { if !endpoint.IsRunning() { - if err := endpoint.Start(); err != nil { + if err := endpoint.Start(ctx); err != nil { return nil, fmt.Errorf("failed to start listener endpoint: %w", err) } } @@ -83,7 +83,7 @@ func (sc *SignerClient) GetPubKey(ctx context.Context) (crypto.PubKey, error) { return nil, &RemoteSignerError{Code: int(resp.Error.Code), Description: resp.Error.Description} } - pk, err := cryptoenc.PubKeyFromProto(resp.PubKey) + pk, err := encoding.PubKeyFromProto(resp.PubKey) if err != nil { return nil, err } diff --git a/privval/signer_client_test.go b/privval/signer_client_test.go index 9aa49e709..f9272b004 100644 --- a/privval/signer_client_test.go +++ b/privval/signer_client_test.go @@ -23,370 +23,336 @@ type signerTestCase struct { mockPV types.PrivValidator signerClient *SignerClient signerServer *SignerServer + name string + closer context.CancelFunc } -func getSignerTestCases(t *testing.T) []signerTestCase { +func getSignerTestCases(ctx context.Context, t *testing.T) []signerTestCase { + t.Helper() + testCases := make([]signerTestCase, 0) // Get test cases for each possible dialer (DialTCP / DialUnix / etc) - for _, dtc := range getDialerTestCases(t) { + for idx, dtc := range getDialerTestCases(t) { chainID := tmrand.Str(12) mockPV := types.NewMockPV() + cctx, ccancel := context.WithCancel(ctx) // get a pair of signer listener, signer dialer endpoints - sl, sd := getMockEndpoints(t, dtc.addr, dtc.dialer) - sc, err := NewSignerClient(sl, chainID) + sl, sd := getMockEndpoints(cctx, t, dtc.addr, dtc.dialer) + sc, err := NewSignerClient(cctx, sl, chainID) require.NoError(t, err) ss := NewSignerServer(sd, chainID, mockPV) - err = ss.Start() - require.NoError(t, err) + require.NoError(t, ss.Start(cctx)) - tc := signerTestCase{ + testCases = append(testCases, signerTestCase{ + name: fmt.Sprintf("Case%d%T_%s", idx, dtc.dialer, chainID), + closer: ccancel, chainID: chainID, mockPV: mockPV, signerClient: sc, signerServer: ss, - } - - testCases = append(testCases, tc) + }) } return testCases } func TestSignerClose(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - err := tc.signerClient.Close() - assert.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - err = tc.signerServer.Stop() - assert.NoError(t, err) + for _, tc := range getSignerTestCases(ctx, t) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() + + assert.NoError(t, tc.signerClient.Close()) + assert.NoError(t, tc.signerServer.Stop()) + }) } } func TestSignerPing(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) - } - }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for _, tc := range getSignerTestCases(ctx, t) { err := tc.signerClient.Ping() assert.NoError(t, err) } } func TestSignerGetPubKey(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) - } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + for _, tc := range getSignerTestCases(ctx, t) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() + + pubKey, err := tc.signerClient.GetPubKey(ctx) + require.NoError(t, err) + expectedPubKey, err := tc.mockPV.GetPubKey(ctx) + require.NoError(t, err) + + assert.Equal(t, expectedPubKey, pubKey) + + pubKey, err = tc.signerClient.GetPubKey(ctx) + require.NoError(t, err) + expectedpk, err := tc.mockPV.GetPubKey(ctx) + require.NoError(t, err) + expectedAddr := expectedpk.Address() + + assert.Equal(t, expectedAddr, pubKey.Address()) }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) - } - }) - - pubKey, err := tc.signerClient.GetPubKey(context.Background()) - require.NoError(t, err) - expectedPubKey, err := tc.mockPV.GetPubKey(context.Background()) - require.NoError(t, err) - - assert.Equal(t, expectedPubKey, pubKey) - - pubKey, err = tc.signerClient.GetPubKey(context.Background()) - require.NoError(t, err) - expectedpk, err := tc.mockPV.GetPubKey(context.Background()) - require.NoError(t, err) - expectedAddr := expectedpk.Address() - - assert.Equal(t, expectedAddr, pubKey.Address()) } } func TestSignerProposal(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - ts := time.Now() - hash := tmrand.Bytes(tmhash.Size) - have := &types.Proposal{ - Type: tmproto.ProposalType, - Height: 1, - Round: 2, - POLRound: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - } - want := &types.Proposal{ - Type: tmproto.ProposalType, - Height: 1, - Round: 2, - POLRound: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) + for _, tc := range getSignerTestCases(ctx, t) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() + + ts := time.Now() + hash := tmrand.Bytes(tmhash.Size) + have := &types.Proposal{ + Type: tmproto.ProposalType, + Height: 1, + Round: 2, + POLRound: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) + want := &types.Proposal{ + Type: tmproto.ProposalType, + Height: 1, + Round: 2, + POLRound: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, } + + require.NoError(t, tc.mockPV.SignProposal(ctx, tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignProposal(ctx, tc.chainID, have.ToProto())) + + assert.Equal(t, want.Signature, have.Signature) }) - require.NoError(t, tc.mockPV.SignProposal(context.Background(), tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignProposal(context.Background(), tc.chainID, have.ToProto())) - - assert.Equal(t, want.Signature, have.Signature) } } func TestSignerVote(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - ts := time.Now() - hash := tmrand.Bytes(tmhash.Size) - valAddr := tmrand.Bytes(crypto.AddressSize) - want := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - ValidatorAddress: valAddr, - ValidatorIndex: 1, - } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - have := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - ValidatorAddress: valAddr, - ValidatorIndex: 1, - } + for _, tc := range getSignerTestCases(ctx, t) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) + ts := time.Now() + hash := tmrand.Bytes(tmhash.Size) + valAddr := tmrand.Bytes(crypto.AddressSize) + want := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) + + have := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, } + + require.NoError(t, tc.mockPV.SignVote(ctx, tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignVote(ctx, tc.chainID, have.ToProto())) + + assert.Equal(t, want.Signature, have.Signature) }) - - require.NoError(t, tc.mockPV.SignVote(context.Background(), tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignVote(context.Background(), tc.chainID, have.ToProto())) - - assert.Equal(t, want.Signature, have.Signature) } } func TestSignerVoteResetDeadline(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - ts := time.Now() - hash := tmrand.Bytes(tmhash.Size) - valAddr := tmrand.Bytes(crypto.AddressSize) - want := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - ValidatorAddress: valAddr, - ValidatorIndex: 1, - } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - have := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - ValidatorAddress: valAddr, - ValidatorIndex: 1, - } - - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) + for _, tc := range getSignerTestCases(ctx, t) { + t.Run(tc.name, func(t *testing.T) { + ts := time.Now() + hash := tmrand.Bytes(tmhash.Size) + valAddr := tmrand.Bytes(crypto.AddressSize) + want := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) + + have := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, } + + time.Sleep(testTimeoutReadWrite2o3) + + require.NoError(t, tc.mockPV.SignVote(ctx, tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignVote(ctx, tc.chainID, have.ToProto())) + assert.Equal(t, want.Signature, have.Signature) + + // TODO(jleni): Clarify what is actually being tested + + // This would exceed the deadline if it was not extended by the previous message + time.Sleep(testTimeoutReadWrite2o3) + + require.NoError(t, tc.mockPV.SignVote(ctx, tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignVote(ctx, tc.chainID, have.ToProto())) + assert.Equal(t, want.Signature, have.Signature) }) - - time.Sleep(testTimeoutReadWrite2o3) - - require.NoError(t, tc.mockPV.SignVote(context.Background(), tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignVote(context.Background(), tc.chainID, have.ToProto())) - assert.Equal(t, want.Signature, have.Signature) - - // TODO(jleni): Clarify what is actually being tested - - // This would exceed the deadline if it was not extended by the previous message - time.Sleep(testTimeoutReadWrite2o3) - - require.NoError(t, tc.mockPV.SignVote(context.Background(), tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignVote(context.Background(), tc.chainID, have.ToProto())) - assert.Equal(t, want.Signature, have.Signature) } } func TestSignerVoteKeepAlive(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - ts := time.Now() - hash := tmrand.Bytes(tmhash.Size) - valAddr := tmrand.Bytes(crypto.AddressSize) - want := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - ValidatorAddress: valAddr, - ValidatorIndex: 1, - } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - have := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - ValidatorAddress: valAddr, - ValidatorIndex: 1, - } + for _, tc := range getSignerTestCases(ctx, t) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) + ts := time.Now() + hash := tmrand.Bytes(tmhash.Size) + valAddr := tmrand.Bytes(crypto.AddressSize) + want := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) + + have := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, } + + // Check that even if the client does not request a + // signature for a long time. The service is still available + + // in this particular case, we use the dialer logger to ensure that + // test messages are properly interleaved in the test logs + tc.signerServer.Logger.Debug("TEST: Forced Wait -------------------------------------------------") + time.Sleep(testTimeoutReadWrite * 3) + tc.signerServer.Logger.Debug("TEST: Forced Wait DONE---------------------------------------------") + + require.NoError(t, tc.mockPV.SignVote(ctx, tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignVote(ctx, tc.chainID, have.ToProto())) + + assert.Equal(t, want.Signature, have.Signature) }) - - // Check that even if the client does not request a - // signature for a long time. The service is still available - - // in this particular case, we use the dialer logger to ensure that - // test messages are properly interleaved in the test logs - tc.signerServer.Logger.Debug("TEST: Forced Wait -------------------------------------------------") - time.Sleep(testTimeoutReadWrite * 3) - tc.signerServer.Logger.Debug("TEST: Forced Wait DONE---------------------------------------------") - - require.NoError(t, tc.mockPV.SignVote(context.Background(), tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignVote(context.Background(), tc.chainID, have.ToProto())) - - assert.Equal(t, want.Signature, have.Signature) } } func TestSignerSignProposalErrors(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - // Replace service with a mock that always fails - tc.signerServer.privVal = types.NewErroringMockPV() - tc.mockPV = types.NewErroringMockPV() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) + for _, tc := range getSignerTestCases(ctx, t) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() + // Replace service with a mock that always fails + tc.signerServer.privVal = types.NewErroringMockPV() + tc.mockPV = types.NewErroringMockPV() + + ts := time.Now() + hash := tmrand.Bytes(tmhash.Size) + proposal := &types.Proposal{ + Type: tmproto.ProposalType, + Height: 1, + Round: 2, + POLRound: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + Signature: []byte("signature"), } + + err := tc.signerClient.SignProposal(ctx, tc.chainID, proposal.ToProto()) + rserr, ok := err.(*RemoteSignerError) + require.True(t, ok, "%T", err) + require.Contains(t, rserr.Error(), types.ErroringMockPVErr.Error()) + + err = tc.mockPV.SignProposal(ctx, tc.chainID, proposal.ToProto()) + require.Error(t, err) + + err = tc.signerClient.SignProposal(ctx, tc.chainID, proposal.ToProto()) + require.Error(t, err) }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) - } - }) - - ts := time.Now() - hash := tmrand.Bytes(tmhash.Size) - proposal := &types.Proposal{ - Type: tmproto.ProposalType, - Height: 1, - Round: 2, - POLRound: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - Signature: []byte("signature"), - } - - err := tc.signerClient.SignProposal(context.Background(), tc.chainID, proposal.ToProto()) - require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) - - err = tc.mockPV.SignProposal(context.Background(), tc.chainID, proposal.ToProto()) - require.Error(t, err) - - err = tc.signerClient.SignProposal(context.Background(), tc.chainID, proposal.ToProto()) - require.Error(t, err) } } func TestSignerSignVoteErrors(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - ts := time.Now() - hash := tmrand.Bytes(tmhash.Size) - valAddr := tmrand.Bytes(crypto.AddressSize) - vote := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - ValidatorAddress: valAddr, - ValidatorIndex: 1, - Signature: []byte("signature"), - } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - // Replace signer service privval with one that always fails - tc.signerServer.privVal = types.NewErroringMockPV() - tc.mockPV = types.NewErroringMockPV() + for _, tc := range getSignerTestCases(ctx, t) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) + ts := time.Now() + hash := tmrand.Bytes(tmhash.Size) + valAddr := tmrand.Bytes(crypto.AddressSize) + vote := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, + Signature: []byte("signature"), } + + // Replace signer service privval with one that always fails + tc.signerServer.privVal = types.NewErroringMockPV() + tc.mockPV = types.NewErroringMockPV() + + err := tc.signerClient.SignVote(ctx, tc.chainID, vote.ToProto()) + rserr, ok := err.(*RemoteSignerError) + require.True(t, ok, "%T", err) + require.Contains(t, rserr.Error(), types.ErroringMockPVErr.Error()) + + err = tc.mockPV.SignVote(ctx, tc.chainID, vote.ToProto()) + require.Error(t, err) + + err = tc.signerClient.SignVote(ctx, tc.chainID, vote.ToProto()) + require.Error(t, err) }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) - } - }) - - err := tc.signerClient.SignVote(context.Background(), tc.chainID, vote.ToProto()) - require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) - - err = tc.mockPV.SignVote(context.Background(), tc.chainID, vote.ToProto()) - require.Error(t, err) - - err = tc.signerClient.SignVote(context.Background(), tc.chainID, vote.ToProto()) - require.Error(t, err) } } @@ -413,28 +379,23 @@ func brokenHandler(ctx context.Context, privVal types.PrivValidator, request pri } func TestSignerUnexpectedResponse(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - tc.signerServer.privVal = types.NewMockPV() - tc.mockPV = types.NewMockPV() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - tc.signerServer.SetRequestHandler(brokenHandler) + for _, tc := range getSignerTestCases(ctx, t) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) - } + tc.signerServer.privVal = types.NewMockPV() + tc.mockPV = types.NewMockPV() + + tc.signerServer.SetRequestHandler(brokenHandler) + + ts := time.Now() + want := &types.Vote{Timestamp: ts, Type: tmproto.PrecommitType} + + e := tc.signerClient.SignVote(ctx, tc.chainID, want.ToProto()) + assert.EqualError(t, e, "empty response") }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) - } - }) - - ts := time.Now() - want := &types.Vote{Timestamp: ts, Type: tmproto.PrecommitType} - - e := tc.signerClient.SignVote(context.Background(), tc.chainID, want.ToProto()) - assert.EqualError(t, e, "empty response") } } diff --git a/privval/signer_listener_endpoint.go b/privval/signer_listener_endpoint.go index 292e7a476..e2287c630 100644 --- a/privval/signer_listener_endpoint.go +++ b/privval/signer_listener_endpoint.go @@ -1,6 +1,7 @@ package privval import ( + "context" "fmt" "net" "time" @@ -63,7 +64,7 @@ func NewSignerListenerEndpoint( } // OnStart implements service.Service. -func (sl *SignerListenerEndpoint) OnStart() error { +func (sl *SignerListenerEndpoint) OnStart(ctx context.Context) error { sl.connectRequestCh = make(chan struct{}) sl.connectionAvailableCh = make(chan net.Conn) diff --git a/privval/signer_listener_endpoint_test.go b/privval/signer_listener_endpoint_test.go index cbd45e6ce..b92e0abe5 100644 --- a/privval/signer_listener_endpoint_test.go +++ b/privval/signer_listener_endpoint_test.go @@ -1,6 +1,7 @@ package privval import ( + "context" "net" "testing" "time" @@ -38,6 +39,9 @@ func TestSignerRemoteRetryTCPOnly(t *testing.T) { retries = 10 ) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ln, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) @@ -71,7 +75,7 @@ func TestSignerRemoteRetryTCPOnly(t *testing.T) { mockPV := types.NewMockPV() signerServer := NewSignerServer(dialerEndpoint, chainID, mockPV) - err = signerServer.Start() + err = signerServer.Start(ctx) require.NoError(t, err) t.Cleanup(func() { if err := signerServer.Stop(); err != nil { @@ -88,6 +92,9 @@ func TestSignerRemoteRetryTCPOnly(t *testing.T) { } func TestRetryConnToRemoteSigner(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for _, tc := range getDialerTestCases(t) { var ( logger = log.TestingLogger() @@ -107,14 +114,9 @@ func TestRetryConnToRemoteSigner(t *testing.T) { signerServer := NewSignerServer(dialerEndpoint, chainID, mockPV) - startListenerEndpointAsync(t, listenerEndpoint, endpointIsOpenCh) - t.Cleanup(func() { - if err := listenerEndpoint.Stop(); err != nil { - t.Error(err) - } - }) + startListenerEndpointAsync(ctx, t, listenerEndpoint, endpointIsOpenCh) - require.NoError(t, signerServer.Start()) + require.NoError(t, signerServer.Start(ctx)) assert.True(t, signerServer.IsRunning()) <-endpointIsOpenCh if err := signerServer.Stop(); err != nil { @@ -128,13 +130,8 @@ func TestRetryConnToRemoteSigner(t *testing.T) { signerServer2 := NewSignerServer(dialerEndpoint2, chainID, mockPV) // let some pings pass - require.NoError(t, signerServer2.Start()) + require.NoError(t, signerServer2.Start(ctx)) assert.True(t, signerServer2.IsRunning()) - t.Cleanup(func() { - if err := signerServer2.Stop(); err != nil { - t.Error(err) - } - }) // give the client some time to re-establish the conn to the remote signer // should see sth like this in the logs: @@ -175,15 +172,23 @@ func newSignerListenerEndpoint(logger log.Logger, addr string, timeoutReadWrite ) } -func startListenerEndpointAsync(t *testing.T, sle *SignerListenerEndpoint, endpointIsOpenCh chan struct{}) { +func startListenerEndpointAsync( + ctx context.Context, + t *testing.T, + sle *SignerListenerEndpoint, + endpointIsOpenCh chan struct{}, +) { + t.Helper() + go func(sle *SignerListenerEndpoint) { - require.NoError(t, sle.Start()) + require.NoError(t, sle.Start(ctx)) assert.True(t, sle.IsRunning()) close(endpointIsOpenCh) }(sle) } func getMockEndpoints( + ctx context.Context, t *testing.T, addr string, socketDialer SocketDialer, @@ -204,9 +209,9 @@ func getMockEndpoints( SignerDialerEndpointTimeoutReadWrite(testTimeoutReadWrite)(dialerEndpoint) SignerDialerEndpointConnRetries(1e6)(dialerEndpoint) - startListenerEndpointAsync(t, listenerEndpoint, endpointIsOpenCh) + startListenerEndpointAsync(ctx, t, listenerEndpoint, endpointIsOpenCh) - require.NoError(t, dialerEndpoint.Start()) + require.NoError(t, dialerEndpoint.Start(ctx)) assert.True(t, dialerEndpoint.IsRunning()) <-endpointIsOpenCh diff --git a/privval/signer_requestHandler.go b/privval/signer_requestHandler.go index 18ad8a996..d07c65620 100644 --- a/privval/signer_requestHandler.go +++ b/privval/signer_requestHandler.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/tendermint/tendermint/crypto" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -37,7 +37,7 @@ func DefaultValidationRequestHandler( if err != nil { return res, err } - pk, err := cryptoenc.PubKeyToProto(pubKey) + pk, err := encoding.PubKeyToProto(pubKey) if err != nil { return res, err } diff --git a/privval/signer_server.go b/privval/signer_server.go index 24bf67cc5..e31d3bdb4 100644 --- a/privval/signer_server.go +++ b/privval/signer_server.go @@ -42,8 +42,8 @@ func NewSignerServer(endpoint *SignerDialerEndpoint, chainID string, privVal typ } // OnStart implements service.Service. -func (ss *SignerServer) OnStart() error { - go ss.serviceLoop() +func (ss *SignerServer) OnStart(ctx context.Context) error { + go ss.serviceLoop(ctx) return nil } @@ -91,18 +91,18 @@ func (ss *SignerServer) servicePendingRequest() { } } -func (ss *SignerServer) serviceLoop() { +func (ss *SignerServer) serviceLoop(ctx context.Context) { for { select { + case <-ss.Quit(): + return + case <-ctx.Done(): + return default: - err := ss.endpoint.ensureConnection() - if err != nil { + if err := ss.endpoint.ensureConnection(); err != nil { return } ss.servicePendingRequest() - - case <-ss.Quit(): - return } } } diff --git a/privval/socket_listeners_test.go b/privval/socket_listeners_test.go index 5e95ec10c..08a285bdf 100644 --- a/privval/socket_listeners_test.go +++ b/privval/socket_listeners_test.go @@ -1,7 +1,6 @@ package privval import ( - "io/ioutil" "net" "os" "testing" @@ -29,7 +28,7 @@ type listenerTestCase struct { // testUnixAddr will attempt to obtain a platform-independent temporary file // name for a Unix socket func testUnixAddr() (string, error) { - f, err := ioutil.TempFile("", "tendermint-privval-test-*") + f, err := os.CreateTemp("", "tendermint-privval-test-*") if err != nil { return "", err } diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto deleted file mode 100644 index 7126488d0..000000000 --- a/proto/tendermint/abci/types.proto +++ /dev/null @@ -1,369 +0,0 @@ -syntax = "proto3"; -package tendermint.abci; - -option go_package = "github.com/tendermint/tendermint/abci/types"; - -// For more information on gogo.proto, see: -// https://github.com/gogo/protobuf/blob/master/extensions.md -import "tendermint/crypto/proof.proto"; -import "tendermint/types/types.proto"; -import "tendermint/crypto/keys.proto"; -import "tendermint/types/params.proto"; -import "google/protobuf/timestamp.proto"; -import "gogoproto/gogo.proto"; - -// This file is copied from http://github.com/tendermint/abci -// NOTE: When using custom types, mind the warnings. -// https://github.com/gogo/protobuf/blob/master/custom_types.md#warnings-and-issues - -//---------------------------------------- -// Request types - -message Request { - oneof value { - RequestEcho echo = 1; - RequestFlush flush = 2; - RequestInfo info = 3; - RequestInitChain init_chain = 4; - RequestQuery query = 5; - RequestBeginBlock begin_block = 6; - RequestCheckTx check_tx = 7; - RequestDeliverTx deliver_tx = 8; - RequestEndBlock end_block = 9; - RequestCommit commit = 10; - RequestListSnapshots list_snapshots = 11; - RequestOfferSnapshot offer_snapshot = 12; - RequestLoadSnapshotChunk load_snapshot_chunk = 13; - RequestApplySnapshotChunk apply_snapshot_chunk = 14; - } -} - -message RequestEcho { - string message = 1; -} - -message RequestFlush {} - -message RequestInfo { - string version = 1; - uint64 block_version = 2; - uint64 p2p_version = 3; - string abci_version = 4; -} - -message RequestInitChain { - google.protobuf.Timestamp time = 1 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - string chain_id = 2; - tendermint.types.ConsensusParams consensus_params = 3; - repeated ValidatorUpdate validators = 4 [(gogoproto.nullable) = false]; - bytes app_state_bytes = 5; - int64 initial_height = 6; -} - -message RequestQuery { - bytes data = 1; - string path = 2; - int64 height = 3; - bool prove = 4; -} - -message RequestBeginBlock { - bytes hash = 1; - tendermint.types.Header header = 2 [(gogoproto.nullable) = false]; - LastCommitInfo last_commit_info = 3 [(gogoproto.nullable) = false]; - repeated Evidence byzantine_validators = 4 [(gogoproto.nullable) = false]; -} - -enum CheckTxType { - NEW = 0 [(gogoproto.enumvalue_customname) = "New"]; - RECHECK = 1 [(gogoproto.enumvalue_customname) = "Recheck"]; -} - -message RequestCheckTx { - bytes tx = 1; - CheckTxType type = 2; -} - -message RequestDeliverTx { - bytes tx = 1; -} - -message RequestEndBlock { - int64 height = 1; -} - -message RequestCommit {} - -// lists available snapshots -message RequestListSnapshots {} - -// offers a snapshot to the application -message RequestOfferSnapshot { - Snapshot snapshot = 1; // snapshot offered by peers - bytes app_hash = 2; // light client-verified app hash for snapshot height -} - -// loads a snapshot chunk -message RequestLoadSnapshotChunk { - uint64 height = 1; - uint32 format = 2; - uint32 chunk = 3; -} - -// Applies a snapshot chunk -message RequestApplySnapshotChunk { - uint32 index = 1; - bytes chunk = 2; - string sender = 3; -} - -//---------------------------------------- -// Response types - -message Response { - oneof value { - ResponseException exception = 1; - ResponseEcho echo = 2; - ResponseFlush flush = 3; - ResponseInfo info = 4; - ResponseInitChain init_chain = 5; - ResponseQuery query = 6; - ResponseBeginBlock begin_block = 7; - ResponseCheckTx check_tx = 8; - ResponseDeliverTx deliver_tx = 9; - ResponseEndBlock end_block = 10; - ResponseCommit commit = 11; - ResponseListSnapshots list_snapshots = 12; - ResponseOfferSnapshot offer_snapshot = 13; - ResponseLoadSnapshotChunk load_snapshot_chunk = 14; - ResponseApplySnapshotChunk apply_snapshot_chunk = 15; - } -} - -// nondeterministic -message ResponseException { - string error = 1; -} - -message ResponseEcho { - string message = 1; -} - -message ResponseFlush {} - -message ResponseInfo { - string data = 1; - - // this is the software version of the application. TODO: remove? - string version = 2; - uint64 app_version = 3; - - int64 last_block_height = 4; - bytes last_block_app_hash = 5; -} - -message ResponseInitChain { - tendermint.types.ConsensusParams consensus_params = 1; - repeated ValidatorUpdate validators = 2 [(gogoproto.nullable) = false]; - bytes app_hash = 3; -} - -message ResponseQuery { - uint32 code = 1; - // bytes data = 2; // use "value" instead. - string log = 3; // nondeterministic - string info = 4; // nondeterministic - int64 index = 5; - bytes key = 6; - bytes value = 7; - tendermint.crypto.ProofOps proof_ops = 8; - int64 height = 9; - string codespace = 10; -} - -message ResponseBeginBlock { - repeated Event events = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; -} - -message ResponseCheckTx { - uint32 code = 1; - bytes data = 2; - string log = 3; // nondeterministic - string info = 4; // nondeterministic - int64 gas_wanted = 5 [json_name = "gas_wanted"]; - int64 gas_used = 6 [json_name = "gas_used"]; - repeated Event events = 7 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; - string codespace = 8; - string sender = 9; - int64 priority = 10; - - // mempool_error is set by Tendermint. - // ABCI applictions creating a ResponseCheckTX should not set mempool_error. - string mempool_error = 11; -} - -message ResponseDeliverTx { - uint32 code = 1; - bytes data = 2; - string log = 3; // nondeterministic - string info = 4; // nondeterministic - int64 gas_wanted = 5 [json_name = "gas_wanted"]; - int64 gas_used = 6 [json_name = "gas_used"]; - repeated Event events = 7 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; // nondeterministic - string codespace = 8; -} - -message ResponseEndBlock { - repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable) = false]; - tendermint.types.ConsensusParams consensus_param_updates = 2; - repeated Event events = 3 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; -} - -message ResponseCommit { - // reserve 1 - bytes data = 2; - int64 retain_height = 3; -} - -message ResponseListSnapshots { - repeated Snapshot snapshots = 1; -} - -message ResponseOfferSnapshot { - Result result = 1; - - enum Result { - UNKNOWN = 0; // Unknown result, abort all snapshot restoration - ACCEPT = 1; // Snapshot accepted, apply chunks - ABORT = 2; // Abort all snapshot restoration - REJECT = 3; // Reject this specific snapshot, try others - REJECT_FORMAT = 4; // Reject all snapshots of this format, try others - REJECT_SENDER = 5; // Reject all snapshots from the sender(s), try others - } -} - -message ResponseLoadSnapshotChunk { - bytes chunk = 1; -} - -message ResponseApplySnapshotChunk { - Result result = 1; - repeated uint32 refetch_chunks = 2; // Chunks to refetch and reapply - repeated string reject_senders = 3; // Chunk senders to reject and ban - - enum Result { - UNKNOWN = 0; // Unknown result, abort all snapshot restoration - ACCEPT = 1; // Chunk successfully accepted - ABORT = 2; // Abort all snapshot restoration - RETRY = 3; // Retry chunk (combine with refetch and reject) - RETRY_SNAPSHOT = 4; // Retry snapshot (combine with refetch and reject) - REJECT_SNAPSHOT = 5; // Reject this snapshot, try others - } -} - -//---------------------------------------- -// Misc. - -message LastCommitInfo { - int32 round = 1; - repeated VoteInfo votes = 2 [(gogoproto.nullable) = false]; -} - -// Event allows application developers to attach additional information to -// ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and ResponseDeliverTx. -// Later, transactions may be queried using these events. -message Event { - string type = 1; - repeated EventAttribute attributes = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "attributes,omitempty"]; -} - -// EventAttribute is a single key-value pair, associated with an event. -message EventAttribute { - string key = 1; - string value = 2; - bool index = 3; // nondeterministic -} - -// TxResult contains results of executing the transaction. -// -// One usage is indexing transaction results. -message TxResult { - int64 height = 1; - uint32 index = 2; - bytes tx = 3; - ResponseDeliverTx result = 4 [(gogoproto.nullable) = false]; -} - -//---------------------------------------- -// Blockchain Types - -// Validator -message Validator { - bytes address = 1; // The first 20 bytes of SHA256(public key) - // PubKey pub_key = 2 [(gogoproto.nullable)=false]; - int64 power = 3; // The voting power -} - -// ValidatorUpdate -message ValidatorUpdate { - tendermint.crypto.PublicKey pub_key = 1 [(gogoproto.nullable) = false]; - int64 power = 2; -} - -// VoteInfo -message VoteInfo { - Validator validator = 1 [(gogoproto.nullable) = false]; - bool signed_last_block = 2; -} - -enum EvidenceType { - UNKNOWN = 0; - DUPLICATE_VOTE = 1; - LIGHT_CLIENT_ATTACK = 2; -} - -message Evidence { - EvidenceType type = 1; - // The offending validator - Validator validator = 2 [(gogoproto.nullable) = false]; - // The height when the offense occurred - int64 height = 3; - // The corresponding time where the offense occurred - google.protobuf.Timestamp time = 4 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - // Total voting power of the validator set in case the ABCI application does - // not store historical validators. - // https://github.com/tendermint/tendermint/issues/4581 - int64 total_voting_power = 5; -} - -//---------------------------------------- -// State Sync Types - -message Snapshot { - uint64 height = 1; // The height at which the snapshot was taken - uint32 format = 2; // The application-specific snapshot format - uint32 chunks = 3; // Number of chunks in the snapshot - bytes hash = 4; // Arbitrary snapshot hash, equal only if identical - bytes metadata = 5; // Arbitrary application metadata -} - -//---------------------------------------- -// Service Definition - -service ABCIApplication { - rpc Echo(RequestEcho) returns (ResponseEcho); - rpc Flush(RequestFlush) returns (ResponseFlush); - rpc Info(RequestInfo) returns (ResponseInfo); - rpc DeliverTx(RequestDeliverTx) returns (ResponseDeliverTx); - rpc CheckTx(RequestCheckTx) returns (ResponseCheckTx); - rpc Query(RequestQuery) returns (ResponseQuery); - rpc Commit(RequestCommit) returns (ResponseCommit); - rpc InitChain(RequestInitChain) returns (ResponseInitChain); - rpc BeginBlock(RequestBeginBlock) returns (ResponseBeginBlock); - rpc EndBlock(RequestEndBlock) returns (ResponseEndBlock); - rpc ListSnapshots(RequestListSnapshots) returns (ResponseListSnapshots); - rpc OfferSnapshot(RequestOfferSnapshot) returns (ResponseOfferSnapshot); - rpc LoadSnapshotChunk(RequestLoadSnapshotChunk) returns (ResponseLoadSnapshotChunk); - rpc ApplySnapshotChunk(RequestApplySnapshotChunk) returns (ResponseApplySnapshotChunk); -} diff --git a/proto/tendermint/blocksync/message.go b/proto/tendermint/blocksync/message.go index d448ccc4b..1840c4e61 100644 --- a/proto/tendermint/blocksync/message.go +++ b/proto/tendermint/blocksync/message.go @@ -2,9 +2,9 @@ package blocksync import ( "errors" - fmt "fmt" + "fmt" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" ) const ( diff --git a/proto/tendermint/blocksync/message_test.go b/proto/tendermint/blocksync/message_test.go index dd1aebbd0..f81de149f 100644 --- a/proto/tendermint/blocksync/message_test.go +++ b/proto/tendermint/blocksync/message_test.go @@ -5,7 +5,7 @@ import ( math "math" "testing" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" diff --git a/proto/tendermint/blocksync/types.pb.go b/proto/tendermint/blocksync/types.pb.go index fcbef7107..c00200322 100644 --- a/proto/tendermint/blocksync/types.pb.go +++ b/proto/tendermint/blocksync/types.pb.go @@ -68,7 +68,8 @@ func (m *BlockRequest) GetHeight() int64 { return 0 } -// NoBlockResponse informs the node that the peer does not have block at the requested height +// NoBlockResponse informs the node that the peer does not have block at the +// requested height type NoBlockResponse struct { Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` } diff --git a/proto/tendermint/blocksync/types.proto b/proto/tendermint/blocksync/types.proto deleted file mode 100644 index 8c187c793..000000000 --- a/proto/tendermint/blocksync/types.proto +++ /dev/null @@ -1,41 +0,0 @@ -syntax = "proto3"; -package tendermint.blocksync; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/blocksync"; - -import "tendermint/types/block.proto"; - -// BlockRequest requests a block for a specific height -message BlockRequest { - int64 height = 1; -} - -// NoBlockResponse informs the node that the peer does not have block at the requested height -message NoBlockResponse { - int64 height = 1; -} - -// BlockResponse returns block to the requested -message BlockResponse { - tendermint.types.Block block = 1; -} - -// StatusRequest requests the status of a peer. -message StatusRequest { -} - -// StatusResponse is a peer response to inform their status. -message StatusResponse { - int64 height = 1; - int64 base = 2; -} - -message Message { - oneof sum { - BlockRequest block_request = 1; - NoBlockResponse no_block_response = 2; - BlockResponse block_response = 3; - StatusRequest status_request = 4; - StatusResponse status_response = 5; - } -} diff --git a/proto/tendermint/consensus/message.go b/proto/tendermint/consensus/message.go index 51feffc22..bcdab629a 100644 --- a/proto/tendermint/consensus/message.go +++ b/proto/tendermint/consensus/message.go @@ -1,9 +1,9 @@ package consensus import ( - fmt "fmt" + "fmt" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" ) // Wrap implements the p2p Wrapper interface and wraps a consensus proto message. diff --git a/proto/tendermint/consensus/types.pb.go b/proto/tendermint/consensus/types.pb.go index 6372a88d4..4ae9abc9e 100644 --- a/proto/tendermint/consensus/types.pb.go +++ b/proto/tendermint/consensus/types.pb.go @@ -103,8 +103,10 @@ func (m *NewRoundStep) GetLastCommitRound() int32 { return 0 } -// NewValidBlock is sent when a validator observes a valid block B in some round r, -//i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. +// NewValidBlock is sent when a validator observes a valid block B in some round +// r, +// i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in +// the round r. // In case the block is also committed, then IsCommit flag is set to true. type NewValidBlock struct { Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` @@ -532,7 +534,8 @@ func (m *VoteSetMaj23) GetBlockID() types.BlockID { return types.BlockID{} } -// VoteSetBits is sent to communicate the bit-array of votes seen for the BlockID. +// VoteSetBits is sent to communicate the bit-array of votes seen for the +// BlockID. type VoteSetBits struct { Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` diff --git a/proto/tendermint/consensus/types.proto b/proto/tendermint/consensus/types.proto deleted file mode 100644 index 6e1f41371..000000000 --- a/proto/tendermint/consensus/types.proto +++ /dev/null @@ -1,92 +0,0 @@ -syntax = "proto3"; -package tendermint.consensus; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/consensus"; - -import "gogoproto/gogo.proto"; -import "tendermint/types/types.proto"; -import "tendermint/libs/bits/types.proto"; - -// NewRoundStep is sent for every step taken in the ConsensusState. -// For every height/round/step transition -message NewRoundStep { - int64 height = 1; - int32 round = 2; - uint32 step = 3; - int64 seconds_since_start_time = 4; - int32 last_commit_round = 5; -} - -// NewValidBlock is sent when a validator observes a valid block B in some round r, -//i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. -// In case the block is also committed, then IsCommit flag is set to true. -message NewValidBlock { - int64 height = 1; - int32 round = 2; - tendermint.types.PartSetHeader block_part_set_header = 3 [(gogoproto.nullable) = false]; - tendermint.libs.bits.BitArray block_parts = 4; - bool is_commit = 5; -} - -// Proposal is sent when a new block is proposed. -message Proposal { - tendermint.types.Proposal proposal = 1 [(gogoproto.nullable) = false]; -} - -// ProposalPOL is sent when a previous proposal is re-proposed. -message ProposalPOL { - int64 height = 1; - int32 proposal_pol_round = 2; - tendermint.libs.bits.BitArray proposal_pol = 3 [(gogoproto.nullable) = false]; -} - -// BlockPart is sent when gossipping a piece of the proposed block. -message BlockPart { - int64 height = 1; - int32 round = 2; - tendermint.types.Part part = 3 [(gogoproto.nullable) = false]; -} - -// Vote is sent when voting for a proposal (or lack thereof). -message Vote { - tendermint.types.Vote vote = 1; -} - -// HasVote is sent to indicate that a particular vote has been received. -message HasVote { - int64 height = 1; - int32 round = 2; - tendermint.types.SignedMsgType type = 3; - int32 index = 4; -} - -// VoteSetMaj23 is sent to indicate that a given BlockID has seen +2/3 votes. -message VoteSetMaj23 { - int64 height = 1; - int32 round = 2; - tendermint.types.SignedMsgType type = 3; - tendermint.types.BlockID block_id = 4 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; -} - -// VoteSetBits is sent to communicate the bit-array of votes seen for the BlockID. -message VoteSetBits { - int64 height = 1; - int32 round = 2; - tendermint.types.SignedMsgType type = 3; - tendermint.types.BlockID block_id = 4 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; - tendermint.libs.bits.BitArray votes = 5 [(gogoproto.nullable) = false]; -} - -message Message { - oneof sum { - NewRoundStep new_round_step = 1; - NewValidBlock new_valid_block = 2; - Proposal proposal = 3; - ProposalPOL proposal_pol = 4; - BlockPart block_part = 5; - Vote vote = 6; - HasVote has_vote = 7; - VoteSetMaj23 vote_set_maj23 = 8; - VoteSetBits vote_set_bits = 9; - } -} diff --git a/proto/tendermint/crypto/keys.proto b/proto/tendermint/crypto/keys.proto deleted file mode 100644 index d66f9fc0c..000000000 --- a/proto/tendermint/crypto/keys.proto +++ /dev/null @@ -1,18 +0,0 @@ -syntax = "proto3"; -package tendermint.crypto; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/crypto"; - -import "gogoproto/gogo.proto"; - -// PublicKey defines the keys available for use with Tendermint Validators -message PublicKey { - option (gogoproto.compare) = true; - option (gogoproto.equal) = true; - - oneof sum { - bytes ed25519 = 1; - bytes secp256k1 = 2; - bytes sr25519 = 3; - } -} diff --git a/proto/tendermint/crypto/proof.proto b/proto/tendermint/crypto/proof.proto deleted file mode 100644 index 975df7685..000000000 --- a/proto/tendermint/crypto/proof.proto +++ /dev/null @@ -1,41 +0,0 @@ -syntax = "proto3"; -package tendermint.crypto; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/crypto"; - -import "gogoproto/gogo.proto"; - -message Proof { - int64 total = 1; - int64 index = 2; - bytes leaf_hash = 3; - repeated bytes aunts = 4; -} - -message ValueOp { - // Encoded in ProofOp.Key. - bytes key = 1; - - // To encode in ProofOp.Data - Proof proof = 2; -} - -message DominoOp { - string key = 1; - string input = 2; - string output = 3; -} - -// ProofOp defines an operation used for calculating Merkle root -// The data could be arbitrary format, providing nessecary data -// for example neighbouring node hash -message ProofOp { - string type = 1; - bytes key = 2; - bytes data = 3; -} - -// ProofOps is Merkle proof defined by the list of ProofOps -message ProofOps { - repeated ProofOp ops = 1 [(gogoproto.nullable) = false]; -} diff --git a/proto/tendermint/libs/bits/types.proto b/proto/tendermint/libs/bits/types.proto deleted file mode 100644 index 3111d113a..000000000 --- a/proto/tendermint/libs/bits/types.proto +++ /dev/null @@ -1,9 +0,0 @@ -syntax = "proto3"; -package tendermint.libs.bits; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/libs/bits"; - -message BitArray { - int64 bits = 1; - repeated uint64 elems = 2; -} diff --git a/proto/tendermint/mempool/message.go b/proto/tendermint/mempool/message.go index 64a79bc81..a3e249f99 100644 --- a/proto/tendermint/mempool/message.go +++ b/proto/tendermint/mempool/message.go @@ -1,9 +1,9 @@ package mempool import ( - fmt "fmt" + "fmt" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" ) // Wrap implements the p2p Wrapper interface and wraps a mempool message. diff --git a/proto/tendermint/mempool/types.proto b/proto/tendermint/mempool/types.proto deleted file mode 100644 index b55d9717b..000000000 --- a/proto/tendermint/mempool/types.proto +++ /dev/null @@ -1,14 +0,0 @@ -syntax = "proto3"; -package tendermint.mempool; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/mempool"; - -message Txs { - repeated bytes txs = 1; -} - -message Message { - oneof sum { - Txs txs = 1; - } -} diff --git a/proto/tendermint/p2p/conn.proto b/proto/tendermint/p2p/conn.proto deleted file mode 100644 index b12de6c82..000000000 --- a/proto/tendermint/p2p/conn.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; -package tendermint.p2p; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/p2p"; - -import "gogoproto/gogo.proto"; -import "tendermint/crypto/keys.proto"; - -message PacketPing {} - -message PacketPong {} - -message PacketMsg { - int32 channel_id = 1 [(gogoproto.customname) = "ChannelID"]; - bool eof = 2 [(gogoproto.customname) = "EOF"]; - bytes data = 3; -} - -message Packet { - oneof sum { - PacketPing packet_ping = 1; - PacketPong packet_pong = 2; - PacketMsg packet_msg = 3; - } -} - -message AuthSigMessage { - tendermint.crypto.PublicKey pub_key = 1 [(gogoproto.nullable) = false]; - bytes sig = 2; -} diff --git a/proto/tendermint/p2p/pex.go b/proto/tendermint/p2p/pex.go index 8ba8cd2b2..61036142f 100644 --- a/proto/tendermint/p2p/pex.go +++ b/proto/tendermint/p2p/pex.go @@ -1,9 +1,9 @@ package p2p import ( - fmt "fmt" + "fmt" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" ) // Wrap implements the p2p Wrapper interface and wraps a PEX message. @@ -13,10 +13,6 @@ func (m *PexMessage) Wrap(pb proto.Message) error { m.Sum = &PexMessage_PexRequest{PexRequest: msg} case *PexResponse: m.Sum = &PexMessage_PexResponse{PexResponse: msg} - case *PexRequestV2: - m.Sum = &PexMessage_PexRequestV2{PexRequestV2: msg} - case *PexResponseV2: - m.Sum = &PexMessage_PexResponseV2{PexResponseV2: msg} default: return fmt.Errorf("unknown pex message: %T", msg) } @@ -31,10 +27,6 @@ func (m *PexMessage) Unwrap() (proto.Message, error) { return msg.PexRequest, nil case *PexMessage_PexResponse: return msg.PexResponse, nil - case *PexMessage_PexRequestV2: - return msg.PexRequestV2, nil - case *PexMessage_PexResponseV2: - return msg.PexResponseV2, nil default: return nil, fmt.Errorf("unknown pex message: %T", msg) } diff --git a/proto/tendermint/p2p/pex.pb.go b/proto/tendermint/p2p/pex.pb.go index 63882c364..15ccce15e 100644 --- a/proto/tendermint/p2p/pex.pb.go +++ b/proto/tendermint/p2p/pex.pb.go @@ -24,9 +24,7 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type PexAddress struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - IP string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` - Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` + URL string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` } func (m *PexAddress) Reset() { *m = PexAddress{} } @@ -62,27 +60,13 @@ func (m *PexAddress) XXX_DiscardUnknown() { var xxx_messageInfo_PexAddress proto.InternalMessageInfo -func (m *PexAddress) GetID() string { +func (m *PexAddress) GetURL() string { if m != nil { - return m.ID + return m.URL } return "" } -func (m *PexAddress) GetIP() string { - if m != nil { - return m.IP - } - return "" -} - -func (m *PexAddress) GetPort() uint32 { - if m != nil { - return m.Port - } - return 0 -} - type PexRequest struct { } @@ -163,136 +147,10 @@ func (m *PexResponse) GetAddresses() []PexAddress { return nil } -type PexAddressV2 struct { - URL string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` -} - -func (m *PexAddressV2) Reset() { *m = PexAddressV2{} } -func (m *PexAddressV2) String() string { return proto.CompactTextString(m) } -func (*PexAddressV2) ProtoMessage() {} -func (*PexAddressV2) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{3} -} -func (m *PexAddressV2) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PexAddressV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PexAddressV2.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PexAddressV2) XXX_Merge(src proto.Message) { - xxx_messageInfo_PexAddressV2.Merge(m, src) -} -func (m *PexAddressV2) XXX_Size() int { - return m.Size() -} -func (m *PexAddressV2) XXX_DiscardUnknown() { - xxx_messageInfo_PexAddressV2.DiscardUnknown(m) -} - -var xxx_messageInfo_PexAddressV2 proto.InternalMessageInfo - -func (m *PexAddressV2) GetURL() string { - if m != nil { - return m.URL - } - return "" -} - -type PexRequestV2 struct { -} - -func (m *PexRequestV2) Reset() { *m = PexRequestV2{} } -func (m *PexRequestV2) String() string { return proto.CompactTextString(m) } -func (*PexRequestV2) ProtoMessage() {} -func (*PexRequestV2) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{4} -} -func (m *PexRequestV2) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PexRequestV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PexRequestV2.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PexRequestV2) XXX_Merge(src proto.Message) { - xxx_messageInfo_PexRequestV2.Merge(m, src) -} -func (m *PexRequestV2) XXX_Size() int { - return m.Size() -} -func (m *PexRequestV2) XXX_DiscardUnknown() { - xxx_messageInfo_PexRequestV2.DiscardUnknown(m) -} - -var xxx_messageInfo_PexRequestV2 proto.InternalMessageInfo - -type PexResponseV2 struct { - Addresses []PexAddressV2 `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses"` -} - -func (m *PexResponseV2) Reset() { *m = PexResponseV2{} } -func (m *PexResponseV2) String() string { return proto.CompactTextString(m) } -func (*PexResponseV2) ProtoMessage() {} -func (*PexResponseV2) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{5} -} -func (m *PexResponseV2) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PexResponseV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PexResponseV2.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PexResponseV2) XXX_Merge(src proto.Message) { - xxx_messageInfo_PexResponseV2.Merge(m, src) -} -func (m *PexResponseV2) XXX_Size() int { - return m.Size() -} -func (m *PexResponseV2) XXX_DiscardUnknown() { - xxx_messageInfo_PexResponseV2.DiscardUnknown(m) -} - -var xxx_messageInfo_PexResponseV2 proto.InternalMessageInfo - -func (m *PexResponseV2) GetAddresses() []PexAddressV2 { - if m != nil { - return m.Addresses - } - return nil -} - type PexMessage struct { // Types that are valid to be assigned to Sum: // *PexMessage_PexRequest // *PexMessage_PexResponse - // *PexMessage_PexRequestV2 - // *PexMessage_PexResponseV2 Sum isPexMessage_Sum `protobuf_oneof:"sum"` } @@ -300,7 +158,7 @@ func (m *PexMessage) Reset() { *m = PexMessage{} } func (m *PexMessage) String() string { return proto.CompactTextString(m) } func (*PexMessage) ProtoMessage() {} func (*PexMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{6} + return fileDescriptor_81c2f011fd13be57, []int{3} } func (m *PexMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -336,22 +194,14 @@ type isPexMessage_Sum interface { } type PexMessage_PexRequest struct { - PexRequest *PexRequest `protobuf:"bytes,1,opt,name=pex_request,json=pexRequest,proto3,oneof" json:"pex_request,omitempty"` + PexRequest *PexRequest `protobuf:"bytes,3,opt,name=pex_request,json=pexRequest,proto3,oneof" json:"pex_request,omitempty"` } type PexMessage_PexResponse struct { - PexResponse *PexResponse `protobuf:"bytes,2,opt,name=pex_response,json=pexResponse,proto3,oneof" json:"pex_response,omitempty"` -} -type PexMessage_PexRequestV2 struct { - PexRequestV2 *PexRequestV2 `protobuf:"bytes,3,opt,name=pex_request_v2,json=pexRequestV2,proto3,oneof" json:"pex_request_v2,omitempty"` -} -type PexMessage_PexResponseV2 struct { - PexResponseV2 *PexResponseV2 `protobuf:"bytes,4,opt,name=pex_response_v2,json=pexResponseV2,proto3,oneof" json:"pex_response_v2,omitempty"` + PexResponse *PexResponse `protobuf:"bytes,4,opt,name=pex_response,json=pexResponse,proto3,oneof" json:"pex_response,omitempty"` } -func (*PexMessage_PexRequest) isPexMessage_Sum() {} -func (*PexMessage_PexResponse) isPexMessage_Sum() {} -func (*PexMessage_PexRequestV2) isPexMessage_Sum() {} -func (*PexMessage_PexResponseV2) isPexMessage_Sum() {} +func (*PexMessage_PexRequest) isPexMessage_Sum() {} +func (*PexMessage_PexResponse) isPexMessage_Sum() {} func (m *PexMessage) GetSum() isPexMessage_Sum { if m != nil { @@ -374,27 +224,11 @@ func (m *PexMessage) GetPexResponse() *PexResponse { return nil } -func (m *PexMessage) GetPexRequestV2() *PexRequestV2 { - if x, ok := m.GetSum().(*PexMessage_PexRequestV2); ok { - return x.PexRequestV2 - } - return nil -} - -func (m *PexMessage) GetPexResponseV2() *PexResponseV2 { - if x, ok := m.GetSum().(*PexMessage_PexResponseV2); ok { - return x.PexResponseV2 - } - return nil -} - // XXX_OneofWrappers is for the internal use of the proto package. func (*PexMessage) XXX_OneofWrappers() []interface{} { return []interface{}{ (*PexMessage_PexRequest)(nil), (*PexMessage_PexResponse)(nil), - (*PexMessage_PexRequestV2)(nil), - (*PexMessage_PexResponseV2)(nil), } } @@ -402,42 +236,33 @@ func init() { proto.RegisterType((*PexAddress)(nil), "tendermint.p2p.PexAddress") proto.RegisterType((*PexRequest)(nil), "tendermint.p2p.PexRequest") proto.RegisterType((*PexResponse)(nil), "tendermint.p2p.PexResponse") - proto.RegisterType((*PexAddressV2)(nil), "tendermint.p2p.PexAddressV2") - proto.RegisterType((*PexRequestV2)(nil), "tendermint.p2p.PexRequestV2") - proto.RegisterType((*PexResponseV2)(nil), "tendermint.p2p.PexResponseV2") proto.RegisterType((*PexMessage)(nil), "tendermint.p2p.PexMessage") } func init() { proto.RegisterFile("tendermint/p2p/pex.proto", fileDescriptor_81c2f011fd13be57) } var fileDescriptor_81c2f011fd13be57 = []byte{ - // 407 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xdd, 0x8a, 0xda, 0x40, - 0x14, 0xc7, 0xf3, 0x61, 0x2d, 0x9e, 0x44, 0x0b, 0x43, 0x29, 0xa9, 0x6d, 0xa3, 0xe4, 0xca, 0xde, - 0x24, 0x30, 0xa5, 0x97, 0x2d, 0x36, 0x08, 0xb5, 0x50, 0xa9, 0x1d, 0xd8, 0x5c, 0xec, 0x8d, 0xe8, - 0x66, 0xc8, 0x06, 0x56, 0x33, 0x9b, 0x49, 0x16, 0x1f, 0x63, 0xdf, 0x61, 0x5f, 0xc6, 0x4b, 0x2f, - 0xf7, 0x4a, 0x96, 0xf8, 0x22, 0x8b, 0x13, 0x31, 0x23, 0xba, 0x7b, 0x37, 0xe7, 0x7f, 0xbe, 0x7e, - 0xe7, 0xcc, 0x01, 0x2b, 0xa3, 0x8b, 0x90, 0xa6, 0xf3, 0x78, 0x91, 0x79, 0x0c, 0x33, 0x8f, 0xd1, - 0xa5, 0xcb, 0xd2, 0x24, 0x4b, 0x50, 0xab, 0xf2, 0xb8, 0x0c, 0xb3, 0xf6, 0xfb, 0x28, 0x89, 0x12, - 0xe1, 0xf2, 0x76, 0xaf, 0x32, 0xca, 0x19, 0x03, 0x8c, 0xe9, 0xf2, 0x57, 0x18, 0xa6, 0x94, 0x73, - 0xf4, 0x01, 0xb4, 0x38, 0xb4, 0xd4, 0xae, 0xda, 0x6b, 0xf8, 0xf5, 0x62, 0xd3, 0xd1, 0xfe, 0x0c, - 0x88, 0x16, 0x87, 0x42, 0x67, 0x96, 0x26, 0xe9, 0x63, 0xa2, 0xc5, 0x0c, 0x21, 0xa8, 0xb1, 0x24, - 0xcd, 0x2c, 0xbd, 0xab, 0xf6, 0x9a, 0x44, 0xbc, 0x1d, 0x53, 0x54, 0x24, 0xf4, 0x36, 0xa7, 0x3c, - 0x73, 0x46, 0x60, 0x08, 0x8b, 0xb3, 0x64, 0xc1, 0x29, 0xfa, 0x09, 0x8d, 0x69, 0xd9, 0x8b, 0x72, - 0x4b, 0xed, 0xea, 0x3d, 0x03, 0xb7, 0xdd, 0x63, 0x50, 0xb7, 0xe2, 0xf1, 0x6b, 0xab, 0x4d, 0x47, - 0x21, 0x55, 0x8a, 0xf3, 0x15, 0xcc, 0xca, 0x1d, 0x60, 0xf4, 0x11, 0xf4, 0x3c, 0xbd, 0xd9, 0x13, - 0xbf, 0x2d, 0x36, 0x1d, 0xfd, 0x82, 0xfc, 0x25, 0x3b, 0xcd, 0x69, 0x89, 0xd0, 0x3d, 0x47, 0x80, - 0x9d, 0xff, 0xd0, 0x94, 0x48, 0x02, 0x8c, 0xfa, 0xa7, 0x2c, 0x9f, 0x5f, 0x66, 0x09, 0xf0, 0x29, - 0xcd, 0x83, 0x26, 0x66, 0x1d, 0x51, 0xce, 0xa7, 0x11, 0x45, 0x3f, 0xc0, 0x60, 0x74, 0x39, 0x49, - 0xcb, 0x96, 0x02, 0xea, 0xfc, 0x78, 0x7b, 0xa8, 0xa1, 0x42, 0x80, 0x1d, 0x2c, 0xd4, 0x07, 0xb3, - 0x4c, 0x2f, 0x09, 0xc5, 0xba, 0x0d, 0xfc, 0xe9, 0x6c, 0x7e, 0x19, 0x32, 0x54, 0x88, 0xc1, 0xa4, - 0xed, 0x0e, 0xa0, 0x25, 0x01, 0x4c, 0xee, 0xb0, 0xf8, 0x98, 0xf3, 0x63, 0x1d, 0x16, 0x33, 0x54, - 0x88, 0xc9, 0x24, 0x1b, 0xfd, 0x86, 0x77, 0x32, 0xc7, 0xae, 0x4c, 0x4d, 0x94, 0xf9, 0xf2, 0x0a, - 0x8a, 0xa8, 0xd3, 0x64, 0xb2, 0xe0, 0xbf, 0x01, 0x9d, 0xe7, 0x73, 0xff, 0xdf, 0xaa, 0xb0, 0xd5, - 0x75, 0x61, 0xab, 0x4f, 0x85, 0xad, 0xde, 0x6f, 0x6d, 0x65, 0xbd, 0xb5, 0x95, 0xc7, 0xad, 0xad, - 0x5c, 0x7e, 0x8f, 0xe2, 0xec, 0x3a, 0x9f, 0xb9, 0x57, 0xc9, 0xdc, 0x93, 0xee, 0x58, 0x3e, 0x69, - 0x71, 0xaf, 0xc7, 0x37, 0x3e, 0xab, 0x0b, 0xf5, 0xdb, 0x73, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9f, - 0x9b, 0xfd, 0x75, 0xfc, 0x02, 0x00, 0x00, + // 311 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x28, 0x49, 0xcd, 0x4b, + 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x30, 0x2a, 0xd0, 0x2f, 0x48, 0xad, 0xd0, 0x2b, + 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x43, 0xc8, 0xe8, 0x15, 0x18, 0x15, 0x48, 0x89, 0xa4, 0xe7, + 0xa7, 0xe7, 0x83, 0xa5, 0xf4, 0x41, 0x2c, 0x88, 0x2a, 0x25, 0x63, 0x2e, 0xae, 0x80, 0xd4, 0x0a, + 0xc7, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x21, 0x49, 0x2e, 0xe6, 0xd2, 0xa2, 0x1c, 0x09, 0x46, + 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xf6, 0x47, 0xf7, 0xe4, 0x99, 0x43, 0x83, 0x7c, 0x82, 0x40, 0x62, + 0x5e, 0x2c, 0x1c, 0x4c, 0x02, 0xcc, 0x5e, 0x2c, 0x1c, 0xcc, 0x02, 0x2c, 0x4a, 0x3c, 0x60, 0x4d, + 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x4a, 0xbe, 0x5c, 0xdc, 0x60, 0x5e, 0x71, 0x41, 0x7e, + 0x5e, 0x71, 0xaa, 0x90, 0x1d, 0x17, 0x67, 0x22, 0xc4, 0xb8, 0xd4, 0x62, 0x09, 0x46, 0x05, 0x66, + 0x0d, 0x6e, 0x23, 0x29, 0x3d, 0x54, 0xb7, 0xe8, 0x21, 0xac, 0x74, 0x62, 0x39, 0x71, 0x4f, 0x9e, + 0x21, 0x08, 0xa1, 0x45, 0x69, 0x01, 0x23, 0xd8, 0x74, 0xdf, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, + 0x21, 0x5b, 0x2e, 0xee, 0x82, 0xd4, 0x8a, 0xf8, 0x22, 0x88, 0x65, 0x12, 0xcc, 0x0a, 0x8c, 0x38, + 0x0c, 0x84, 0x3a, 0xc7, 0x83, 0x21, 0x88, 0xab, 0x00, 0xce, 0x13, 0x72, 0xe0, 0xe2, 0x81, 0x68, + 0x87, 0xb8, 0x4e, 0x82, 0x05, 0xac, 0x5f, 0x1a, 0xab, 0x7e, 0x88, 0x12, 0x0f, 0x86, 0x20, 0xee, + 0x02, 0x04, 0xd7, 0x89, 0x95, 0x8b, 0xb9, 0xb8, 0x34, 0xd7, 0x8b, 0x85, 0x83, 0x51, 0x80, 0x09, + 0x12, 0x0a, 0x4e, 0xfe, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, + 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0x65, 0x9a, + 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x8f, 0x14, 0x33, 0xc8, 0x91, 0x04, + 0x8e, 0x01, 0xd4, 0x58, 0x4b, 0x62, 0x03, 0x8b, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa7, + 0x1d, 0xdd, 0x6f, 0xce, 0x01, 0x00, 0x00, } func (m *PexAddress) Marshal() (dAtA []byte, err error) { @@ -460,22 +285,10 @@ func (m *PexAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Port != 0 { - i = encodeVarintPex(dAtA, i, uint64(m.Port)) - i-- - dAtA[i] = 0x18 - } - if len(m.IP) > 0 { - i -= len(m.IP) - copy(dAtA[i:], m.IP) - i = encodeVarintPex(dAtA, i, uint64(len(m.IP))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintPex(dAtA, i, uint64(len(m.ID))) + if len(m.URL) > 0 { + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintPex(dAtA, i, uint64(len(m.URL))) i-- dAtA[i] = 0xa } @@ -542,96 +355,6 @@ func (m *PexResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PexAddressV2) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PexAddressV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexAddressV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.URL) > 0 { - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintPex(dAtA, i, uint64(len(m.URL))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PexRequestV2) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PexRequestV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexRequestV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *PexResponseV2) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PexResponseV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexResponseV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Addresses) > 0 { - for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - func (m *PexMessage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -681,7 +404,7 @@ func (m *PexMessage_PexRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintPex(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x1a } return len(dAtA) - i, nil } @@ -702,48 +425,6 @@ func (m *PexMessage_PexResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) i = encodeVarintPex(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} -func (m *PexMessage_PexRequestV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexMessage_PexRequestV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.PexRequestV2 != nil { - { - size, err := m.PexRequestV2.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil -} -func (m *PexMessage_PexResponseV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexMessage_PexResponseV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.PexResponseV2 != nil { - { - size, err := m.PexResponseV2.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPex(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x22 } return len(dAtA) - i, nil @@ -765,17 +446,10 @@ func (m *PexAddress) Size() (n int) { } var l int _ = l - l = len(m.ID) + l = len(m.URL) if l > 0 { n += 1 + l + sovPex(uint64(l)) } - l = len(m.IP) - if l > 0 { - n += 1 + l + sovPex(uint64(l)) - } - if m.Port != 0 { - n += 1 + sovPex(uint64(m.Port)) - } return n } @@ -803,43 +477,6 @@ func (m *PexResponse) Size() (n int) { return n } -func (m *PexAddressV2) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.URL) - if l > 0 { - n += 1 + l + sovPex(uint64(l)) - } - return n -} - -func (m *PexRequestV2) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *PexResponseV2) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Addresses) > 0 { - for _, e := range m.Addresses { - l = e.Size() - n += 1 + l + sovPex(uint64(l)) - } - } - return n -} - func (m *PexMessage) Size() (n int) { if m == nil { return 0 @@ -876,30 +513,6 @@ func (m *PexMessage_PexResponse) Size() (n int) { } return n } -func (m *PexMessage_PexRequestV2) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PexRequestV2 != nil { - l = m.PexRequestV2.Size() - n += 1 + l + sovPex(uint64(l)) - } - return n -} -func (m *PexMessage_PexResponseV2) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PexResponseV2 != nil { - l = m.PexResponseV2.Size() - n += 1 + l + sovPex(uint64(l)) - } - return n -} func sovPex(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 @@ -938,7 +551,7 @@ func (m *PexAddress) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -966,59 +579,8 @@ func (m *PexAddress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ID = string(dAtA[iNdEx:postIndex]) + m.URL = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipPex(dAtA[iNdEx:]) @@ -1174,222 +736,6 @@ func (m *PexResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *PexAddressV2) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PexAddressV2: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PexAddressV2: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.URL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPex(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PexRequestV2) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PexRequestV2: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PexRequestV2: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipPex(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PexResponseV2) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PexResponseV2: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PexResponseV2: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addresses = append(m.Addresses, PexAddressV2{}) - if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPex(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *PexMessage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1419,7 +765,7 @@ func (m *PexMessage) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: PexMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PexRequest", wireType) } @@ -1454,7 +800,7 @@ func (m *PexMessage) Unmarshal(dAtA []byte) error { } m.Sum = &PexMessage_PexRequest{v} iNdEx = postIndex - case 2: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PexResponse", wireType) } @@ -1489,76 +835,6 @@ func (m *PexMessage) Unmarshal(dAtA []byte) error { } m.Sum = &PexMessage_PexResponse{v} iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PexRequestV2", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &PexRequestV2{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Sum = &PexMessage_PexRequestV2{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PexResponseV2", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &PexResponseV2{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Sum = &PexMessage_PexResponseV2{v} - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipPex(dAtA[iNdEx:]) diff --git a/proto/tendermint/p2p/pex.proto b/proto/tendermint/p2p/pex.proto deleted file mode 100644 index 1f78c9864..000000000 --- a/proto/tendermint/p2p/pex.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; -package tendermint.p2p; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/p2p"; - -import "gogoproto/gogo.proto"; - -message PexAddress { - string id = 1 [(gogoproto.customname) = "ID"]; - string ip = 2 [(gogoproto.customname) = "IP"]; - uint32 port = 3; -} - -message PexRequest {} - -message PexResponse { - repeated PexAddress addresses = 1 [(gogoproto.nullable) = false]; -} - -message PexAddressV2 { - string url = 1 [(gogoproto.customname) = "URL"]; -} - -message PexRequestV2 {} - -message PexResponseV2 { - repeated PexAddressV2 addresses = 1 [(gogoproto.nullable) = false]; -} - -message PexMessage { - oneof sum { - PexRequest pex_request = 1; - PexResponse pex_response = 2; - PexRequestV2 pex_request_v2 = 3; - PexResponseV2 pex_response_v2 = 4; - } -} diff --git a/proto/tendermint/p2p/types.proto b/proto/tendermint/p2p/types.proto deleted file mode 100644 index 216a6d8d0..000000000 --- a/proto/tendermint/p2p/types.proto +++ /dev/null @@ -1,42 +0,0 @@ -syntax = "proto3"; -package tendermint.p2p; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/p2p"; - -import "gogoproto/gogo.proto"; -import "google/protobuf/timestamp.proto"; - -message ProtocolVersion { - uint64 p2p = 1 [(gogoproto.customname) = "P2P"]; - uint64 block = 2; - uint64 app = 3; -} - -message NodeInfo { - ProtocolVersion protocol_version = 1 [(gogoproto.nullable) = false]; - string node_id = 2 [(gogoproto.customname) = "NodeID"]; - string listen_addr = 3; - string network = 4; - string version = 5; - bytes channels = 6; - string moniker = 7; - NodeInfoOther other = 8 [(gogoproto.nullable) = false]; -} - -message NodeInfoOther { - string tx_index = 1; - string rpc_address = 2 [(gogoproto.customname) = "RPCAddress"]; -} - -message PeerInfo { - string id = 1 [(gogoproto.customname) = "ID"]; - repeated PeerAddressInfo address_info = 2; - google.protobuf.Timestamp last_connected = 3 [(gogoproto.stdtime) = true]; -} - -message PeerAddressInfo { - string address = 1; - google.protobuf.Timestamp last_dial_success = 2 [(gogoproto.stdtime) = true]; - google.protobuf.Timestamp last_dial_failure = 3 [(gogoproto.stdtime) = true]; - uint32 dial_failures = 4; -} diff --git a/proto/tendermint/privval/service.proto b/proto/tendermint/privval/service.proto index 63e9afca7..2c699e1cd 100644 --- a/proto/tendermint/privval/service.proto +++ b/proto/tendermint/privval/service.proto @@ -1,6 +1,6 @@ syntax = "proto3"; package tendermint.privval; -option go_package = "github.com/tendermint/tendermint/proto/tendermint/privval"; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/privval"; import "tendermint/privval/types.proto"; diff --git a/proto/tendermint/rpc/grpc/types.proto b/proto/tendermint/rpc/grpc/types.proto deleted file mode 100644 index ee948a406..000000000 --- a/proto/tendermint/rpc/grpc/types.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; -package tendermint.rpc.grpc; -option go_package = "github.com/tendermint/tendermint/rpc/grpc;coregrpc"; - -import "tendermint/abci/types.proto"; - -//---------------------------------------- -// Request types - -message RequestPing {} - -message RequestBroadcastTx { - bytes tx = 1; -} - -//---------------------------------------- -// Response types - -message ResponsePing {} - -message ResponseBroadcastTx { - tendermint.abci.ResponseCheckTx check_tx = 1; - tendermint.abci.ResponseDeliverTx deliver_tx = 2; -} - -//---------------------------------------- -// Service Definition - -service BroadcastAPI { - rpc Ping(RequestPing) returns (ResponsePing); - rpc BroadcastTx(RequestBroadcastTx) returns (ResponseBroadcastTx); -} diff --git a/proto/tendermint/statesync/message.go b/proto/tendermint/statesync/message.go index 992cd7525..92d3764fd 100644 --- a/proto/tendermint/statesync/message.go +++ b/proto/tendermint/statesync/message.go @@ -2,9 +2,9 @@ package statesync import ( "errors" - fmt "fmt" + "fmt" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" ) // Wrap implements the p2p Wrapper interface and wraps a state sync proto message. diff --git a/proto/tendermint/statesync/message_test.go b/proto/tendermint/statesync/message_test.go index 40428ec07..cccd25766 100644 --- a/proto/tendermint/statesync/message_test.go +++ b/proto/tendermint/statesync/message_test.go @@ -4,7 +4,7 @@ import ( "encoding/hex" "testing" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" diff --git a/proto/tendermint/statesync/types.proto b/proto/tendermint/statesync/types.proto deleted file mode 100644 index fcfd05f68..000000000 --- a/proto/tendermint/statesync/types.proto +++ /dev/null @@ -1,62 +0,0 @@ -syntax = "proto3"; -package tendermint.statesync; - -import "gogoproto/gogo.proto"; -import "tendermint/types/types.proto"; -import "tendermint/types/params.proto"; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/statesync"; - -message Message { - oneof sum { - SnapshotsRequest snapshots_request = 1; - SnapshotsResponse snapshots_response = 2; - ChunkRequest chunk_request = 3; - ChunkResponse chunk_response = 4; - LightBlockRequest light_block_request = 5; - LightBlockResponse light_block_response = 6; - ParamsRequest params_request = 7; - ParamsResponse params_response = 8; - } -} - -message SnapshotsRequest {} - -message SnapshotsResponse { - uint64 height = 1; - uint32 format = 2; - uint32 chunks = 3; - bytes hash = 4; - bytes metadata = 5; -} - -message ChunkRequest { - uint64 height = 1; - uint32 format = 2; - uint32 index = 3; -} - -message ChunkResponse { - uint64 height = 1; - uint32 format = 2; - uint32 index = 3; - bytes chunk = 4; - bool missing = 5; -} - -message LightBlockRequest { - uint64 height = 1; -} - -message LightBlockResponse { - tendermint.types.LightBlock light_block = 1; -} - -message ParamsRequest { - uint64 height = 1; -} - -message ParamsResponse { - uint64 height = 1; - tendermint.types.ConsensusParams consensus_params = 2 [(gogoproto.nullable) = false]; -} \ No newline at end of file diff --git a/proto/tendermint/types/block.proto b/proto/tendermint/types/block.proto deleted file mode 100644 index 84e9bb15d..000000000 --- a/proto/tendermint/types/block.proto +++ /dev/null @@ -1,15 +0,0 @@ -syntax = "proto3"; -package tendermint.types; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; - -import "gogoproto/gogo.proto"; -import "tendermint/types/types.proto"; -import "tendermint/types/evidence.proto"; - -message Block { - Header header = 1 [(gogoproto.nullable) = false]; - Data data = 2 [(gogoproto.nullable) = false]; - tendermint.types.EvidenceList evidence = 3 [(gogoproto.nullable) = false]; - Commit last_commit = 4; -} diff --git a/proto/tendermint/types/evidence.pb.go b/proto/tendermint/types/evidence.pb.go index daab3dc34..052fb0e6b 100644 --- a/proto/tendermint/types/evidence.pb.go +++ b/proto/tendermint/types/evidence.pb.go @@ -112,7 +112,8 @@ func (*Evidence) XXX_OneofWrappers() []interface{} { } } -// DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes. +// DuplicateVoteEvidence contains evidence of a validator signed two conflicting +// votes. type DuplicateVoteEvidence struct { VoteA *Vote `protobuf:"bytes,1,opt,name=vote_a,json=voteA,proto3" json:"vote_a,omitempty"` VoteB *Vote `protobuf:"bytes,2,opt,name=vote_b,json=voteB,proto3" json:"vote_b,omitempty"` @@ -189,7 +190,8 @@ func (m *DuplicateVoteEvidence) GetTimestamp() time.Time { return time.Time{} } -// LightClientAttackEvidence contains evidence of a set of validators attempting to mislead a light client. +// LightClientAttackEvidence contains evidence of a set of validators attempting +// to mislead a light client. type LightClientAttackEvidence struct { ConflictingBlock *LightBlock `protobuf:"bytes,1,opt,name=conflicting_block,json=conflictingBlock,proto3" json:"conflicting_block,omitempty"` CommonHeight int64 `protobuf:"varint,2,opt,name=common_height,json=commonHeight,proto3" json:"common_height,omitempty"` diff --git a/proto/tendermint/types/evidence.proto b/proto/tendermint/types/evidence.proto deleted file mode 100644 index 451b8dca3..000000000 --- a/proto/tendermint/types/evidence.proto +++ /dev/null @@ -1,38 +0,0 @@ -syntax = "proto3"; -package tendermint.types; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; - -import "gogoproto/gogo.proto"; -import "google/protobuf/timestamp.proto"; -import "tendermint/types/types.proto"; -import "tendermint/types/validator.proto"; - -message Evidence { - oneof sum { - DuplicateVoteEvidence duplicate_vote_evidence = 1; - LightClientAttackEvidence light_client_attack_evidence = 2; - } -} - -// DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes. -message DuplicateVoteEvidence { - tendermint.types.Vote vote_a = 1; - tendermint.types.Vote vote_b = 2; - int64 total_voting_power = 3; - int64 validator_power = 4; - google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; -} - -// LightClientAttackEvidence contains evidence of a set of validators attempting to mislead a light client. -message LightClientAttackEvidence { - tendermint.types.LightBlock conflicting_block = 1; - int64 common_height = 2; - repeated tendermint.types.Validator byzantine_validators = 3; - int64 total_voting_power = 4; - google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; -} - -message EvidenceList { - repeated Evidence evidence = 1 [(gogoproto.nullable) = false]; -} diff --git a/proto/tendermint/types/params.pb.go b/proto/tendermint/types/params.pb.go index 5a9f103a9..3bdf4cb6f 100644 --- a/proto/tendermint/types/params.pb.go +++ b/proto/tendermint/types/params.pb.go @@ -167,8 +167,8 @@ type EvidenceParams struct { // mechanism for handling [Nothing-At-Stake // attacks](https://github.com/ethereum/wiki/wiki/Proof-of-Stake-FAQ#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed). MaxAgeDuration time.Duration `protobuf:"bytes,2,opt,name=max_age_duration,json=maxAgeDuration,proto3,stdduration" json:"max_age_duration"` - // This sets the maximum size of total evidence in bytes that can be committed in a single block. - // and should fall comfortably under the max block bytes. + // This sets the maximum size of total evidence in bytes that can be committed + // in a single block. and should fall comfortably under the max block bytes. // Default is 1048576 or 1MB MaxBytes int64 `protobuf:"varint,3,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` } diff --git a/proto/tendermint/types/params.proto b/proto/tendermint/types/params.proto deleted file mode 100644 index cc926b64e..000000000 --- a/proto/tendermint/types/params.proto +++ /dev/null @@ -1,69 +0,0 @@ -syntax = "proto3"; -package tendermint.types; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; - -import "gogoproto/gogo.proto"; -import "google/protobuf/duration.proto"; - -option (gogoproto.equal_all) = true; - -// ConsensusParams contains consensus critical parameters that determine the -// validity of blocks. -message ConsensusParams { - BlockParams block = 1; - EvidenceParams evidence = 2; - ValidatorParams validator = 3; - VersionParams version = 4; -} - -// BlockParams contains limits on the block size. -message BlockParams { - // Max block size, in bytes. - // Note: must be greater than 0 - int64 max_bytes = 1; - // Max gas per block. - // Note: must be greater or equal to -1 - int64 max_gas = 2; -} - -// EvidenceParams determine how we handle evidence of malfeasance. -message EvidenceParams { - // Max age of evidence, in blocks. - // - // The basic formula for calculating this is: MaxAgeDuration / {average block - // time}. - int64 max_age_num_blocks = 1; - - // Max age of evidence, in time. - // - // It should correspond with an app's "unbonding period" or other similar - // mechanism for handling [Nothing-At-Stake - // attacks](https://github.com/ethereum/wiki/wiki/Proof-of-Stake-FAQ#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed). - google.protobuf.Duration max_age_duration = 2 - [(gogoproto.nullable) = false, (gogoproto.stdduration) = true]; - - // This sets the maximum size of total evidence in bytes that can be committed in a single block. - // and should fall comfortably under the max block bytes. - // Default is 1048576 or 1MB - int64 max_bytes = 3; -} - -// ValidatorParams restrict the public key types validators can use. -// NOTE: uses ABCI pubkey naming, not Amino names. -message ValidatorParams { - repeated string pub_key_types = 1; -} - -// VersionParams contains the ABCI application version. -message VersionParams { - uint64 app_version = 1; -} - -// HashedParams is a subset of ConsensusParams. -// -// It is hashed into the Header.ConsensusHash. -message HashedParams { - int64 block_max_bytes = 1; - int64 block_max_gas = 2; -} diff --git a/proto/tendermint/types/types.pb.go b/proto/tendermint/types/types.pb.go index 73090558e..653497c56 100644 --- a/proto/tendermint/types/types.pb.go +++ b/proto/tendermint/types/types.pb.go @@ -565,7 +565,8 @@ func (m *Vote) GetSignature() []byte { return nil } -// Commit contains the evidence that a block was committed by a set of validators. +// Commit contains the evidence that a block was committed by a set of +// validators. type Commit struct { Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` @@ -967,7 +968,8 @@ func (m *BlockMeta) GetNumTxs() int64 { return 0 } -// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. +// TxProof represents a Merkle proof of the presence of a transaction in the +// Merkle tree. type TxProof struct { RootHash []byte `protobuf:"bytes,1,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` diff --git a/proto/tendermint/types/types.proto b/proto/tendermint/types/types.proto deleted file mode 100644 index 8d4f00972..000000000 --- a/proto/tendermint/types/types.proto +++ /dev/null @@ -1,157 +0,0 @@ -syntax = "proto3"; -package tendermint.types; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; - -import "gogoproto/gogo.proto"; -import "google/protobuf/timestamp.proto"; -import "tendermint/crypto/proof.proto"; -import "tendermint/version/types.proto"; -import "tendermint/types/validator.proto"; - -// BlockIdFlag indicates which BlcokID the signature is for -enum BlockIDFlag { - option (gogoproto.goproto_enum_stringer) = true; - option (gogoproto.goproto_enum_prefix) = false; - - BLOCK_ID_FLAG_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "BlockIDFlagUnknown"]; - BLOCK_ID_FLAG_ABSENT = 1 [(gogoproto.enumvalue_customname) = "BlockIDFlagAbsent"]; - BLOCK_ID_FLAG_COMMIT = 2 [(gogoproto.enumvalue_customname) = "BlockIDFlagCommit"]; - BLOCK_ID_FLAG_NIL = 3 [(gogoproto.enumvalue_customname) = "BlockIDFlagNil"]; -} - -// SignedMsgType is a type of signed message in the consensus. -enum SignedMsgType { - option (gogoproto.goproto_enum_stringer) = true; - option (gogoproto.goproto_enum_prefix) = false; - - SIGNED_MSG_TYPE_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "UnknownType"]; - // Votes - SIGNED_MSG_TYPE_PREVOTE = 1 [(gogoproto.enumvalue_customname) = "PrevoteType"]; - SIGNED_MSG_TYPE_PRECOMMIT = 2 [(gogoproto.enumvalue_customname) = "PrecommitType"]; - - // Proposals - SIGNED_MSG_TYPE_PROPOSAL = 32 [(gogoproto.enumvalue_customname) = "ProposalType"]; -} - -// PartsetHeader -message PartSetHeader { - uint32 total = 1; - bytes hash = 2; -} - -message Part { - uint32 index = 1; - bytes bytes = 2; - tendermint.crypto.Proof proof = 3 [(gogoproto.nullable) = false]; -} - -// BlockID -message BlockID { - bytes hash = 1; - PartSetHeader part_set_header = 2 [(gogoproto.nullable) = false]; -} - -// -------------------------------- - -// Header defines the structure of a Tendermint block header. -message Header { - // basic block info - tendermint.version.Consensus version = 1 [(gogoproto.nullable) = false]; - string chain_id = 2 [(gogoproto.customname) = "ChainID"]; - int64 height = 3; - google.protobuf.Timestamp time = 4 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - - // prev block info - BlockID last_block_id = 5 [(gogoproto.nullable) = false]; - - // hashes of block data - bytes last_commit_hash = 6; // commit from validators from the last block - bytes data_hash = 7; // transactions - - // hashes from the app output from the prev block - bytes validators_hash = 8; // validators for the current block - bytes next_validators_hash = 9; // validators for the next block - bytes consensus_hash = 10; // consensus params for current block - bytes app_hash = 11; // state after txs from the previous block - bytes last_results_hash = 12; // root hash of all results from the txs from the previous block - - // consensus info - bytes evidence_hash = 13; // evidence included in the block - bytes proposer_address = 14; // original proposer of the block -} - -// Data contains the set of transactions included in the block -message Data { - // Txs that will be applied by state @ block.Height+1. - // NOTE: not all txs here are valid. We're just agreeing on the order first. - // This means that block.AppHash does not include these txs. - repeated bytes txs = 1; -} - -// Vote represents a prevote, precommit, or commit vote from validators for -// consensus. -message Vote { - SignedMsgType type = 1; - int64 height = 2; - int32 round = 3; - BlockID block_id = 4 - [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"]; // zero if vote is nil. - google.protobuf.Timestamp timestamp = 5 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - bytes validator_address = 6; - int32 validator_index = 7; - bytes signature = 8; -} - -// Commit contains the evidence that a block was committed by a set of validators. -message Commit { - int64 height = 1; - int32 round = 2; - BlockID block_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"]; - repeated CommitSig signatures = 4 [(gogoproto.nullable) = false]; -} - -// CommitSig is a part of the Vote included in a Commit. -message CommitSig { - BlockIDFlag block_id_flag = 1; - bytes validator_address = 2; - google.protobuf.Timestamp timestamp = 3 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - bytes signature = 4; -} - -message Proposal { - SignedMsgType type = 1; - int64 height = 2; - int32 round = 3; - int32 pol_round = 4; - BlockID block_id = 5 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; - google.protobuf.Timestamp timestamp = 6 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - bytes signature = 7; -} - -message SignedHeader { - Header header = 1; - Commit commit = 2; -} - -message LightBlock { - SignedHeader signed_header = 1; - tendermint.types.ValidatorSet validator_set = 2; -} - -message BlockMeta { - BlockID block_id = 1 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; - int64 block_size = 2; - Header header = 3 [(gogoproto.nullable) = false]; - int64 num_txs = 4; -} - -// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. -message TxProof { - bytes root_hash = 1; - bytes data = 2; - tendermint.crypto.Proof proof = 3; -} diff --git a/proto/tendermint/types/validator.proto b/proto/tendermint/types/validator.proto deleted file mode 100644 index 49860b96d..000000000 --- a/proto/tendermint/types/validator.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; -package tendermint.types; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; - -import "gogoproto/gogo.proto"; -import "tendermint/crypto/keys.proto"; - -message ValidatorSet { - repeated Validator validators = 1; - Validator proposer = 2; - int64 total_voting_power = 3; -} - -message Validator { - bytes address = 1; - tendermint.crypto.PublicKey pub_key = 2 [(gogoproto.nullable) = false]; - int64 voting_power = 3; - int64 proposer_priority = 4; -} - -message SimpleValidator { - tendermint.crypto.PublicKey pub_key = 1; - int64 voting_power = 2; -} diff --git a/proto/tendermint/version/types.pb.go b/proto/tendermint/version/types.pb.go index 6e224392e..76a94fd3c 100644 --- a/proto/tendermint/version/types.pb.go +++ b/proto/tendermint/version/types.pb.go @@ -23,9 +23,9 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -// Consensus captures the consensus rules for processing a block in the blockchain, -// including all blockchain data structures and the rules of the application's -// state transition machine. +// Consensus captures the consensus rules for processing a block in the +// blockchain, including all blockchain data structures and the rules of the +// application's state transition machine. type Consensus struct { Block uint64 `protobuf:"varint,1,opt,name=block,proto3" json:"block,omitempty"` App uint64 `protobuf:"varint,2,opt,name=app,proto3" json:"app,omitempty"` diff --git a/proto/tendermint/version/types.proto b/proto/tendermint/version/types.proto deleted file mode 100644 index 3c4e4cc53..000000000 --- a/proto/tendermint/version/types.proto +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; -package tendermint.version; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/version"; - -import "gogoproto/gogo.proto"; - -// Consensus captures the consensus rules for processing a block in the blockchain, -// including all blockchain data structures and the rules of the application's -// state transition machine. -message Consensus { - option (gogoproto.equal) = true; - - uint64 block = 1; - uint64 app = 2; -} diff --git a/proxy/client.go b/proxy/client.go deleted file mode 100644 index 929933e01..000000000 --- a/proxy/client.go +++ /dev/null @@ -1,94 +0,0 @@ -package proxy - -import ( - "fmt" - "io" - - abcicli "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" -) - -//go:generate ../scripts/mockery_generate.sh ClientCreator - -// ClientCreator creates new ABCI clients. -type ClientCreator interface { - // NewABCIClient returns a new ABCI client. - NewABCIClient() (abcicli.Client, error) -} - -//---------------------------------------------------- -// local proxy uses a mutex on an in-proc app - -type localClientCreator struct { - mtx *tmsync.RWMutex - app types.Application -} - -// NewLocalClientCreator returns a ClientCreator for the given app, -// which will be running locally. -func NewLocalClientCreator(app types.Application) ClientCreator { - return &localClientCreator{ - mtx: new(tmsync.RWMutex), - app: app, - } -} - -func (l *localClientCreator) NewABCIClient() (abcicli.Client, error) { - return abcicli.NewLocalClient(l.mtx, l.app), nil -} - -//--------------------------------------------------------------- -// remote proxy opens new connections to an external app process - -type remoteClientCreator struct { - addr string - transport string - mustConnect bool -} - -// NewRemoteClientCreator returns a ClientCreator for the given address (e.g. -// "192.168.0.1") and transport (e.g. "tcp"). Set mustConnect to true if you -// want the client to connect before reporting success. -func NewRemoteClientCreator(addr, transport string, mustConnect bool) ClientCreator { - return &remoteClientCreator{ - addr: addr, - transport: transport, - mustConnect: mustConnect, - } -} - -func (r *remoteClientCreator) NewABCIClient() (abcicli.Client, error) { - remoteApp, err := abcicli.NewClient(r.addr, r.transport, r.mustConnect) - if err != nil { - return nil, fmt.Errorf("failed to connect to proxy: %w", err) - } - - return remoteApp, nil -} - -// DefaultClientCreator returns a default ClientCreator, which will create a -// local client if addr is one of: 'kvstore', -// 'persistent_kvstore' or 'noop', otherwise - a remote client. -// -// The Closer is a noop except for persistent_kvstore applications, -// which will clean up the store. -func DefaultClientCreator(addr, transport, dbDir string) (ClientCreator, io.Closer) { - switch addr { - case "kvstore": - return NewLocalClientCreator(kvstore.NewApplication()), noopCloser{} - case "persistent_kvstore": - app := kvstore.NewPersistentKVStoreApplication(dbDir) - return NewLocalClientCreator(app), app - case "noop": - return NewLocalClientCreator(types.NewBaseApplication()), noopCloser{} - default: - mustConnect := false // loop retrying - return NewRemoteClientCreator(addr, transport, mustConnect), noopCloser{} - } -} - -type noopCloser struct{} - -func (noopCloser) Close() error { return nil } diff --git a/proxy/mocks/client_creator.go b/proxy/mocks/client_creator.go deleted file mode 100644 index 0e4157c2f..000000000 --- a/proxy/mocks/client_creator.go +++ /dev/null @@ -1,36 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - mock "github.com/stretchr/testify/mock" - abcicli "github.com/tendermint/tendermint/abci/client" -) - -// ClientCreator is an autogenerated mock type for the ClientCreator type -type ClientCreator struct { - mock.Mock -} - -// NewABCIClient provides a mock function with given fields: -func (_m *ClientCreator) NewABCIClient() (abcicli.Client, error) { - ret := _m.Called() - - var r0 abcicli.Client - if rf, ok := ret.Get(0).(func() abcicli.Client); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(abcicli.Client) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/proxy/multi_app_conn.go b/proxy/multi_app_conn.go deleted file mode 100644 index 369b685ba..000000000 --- a/proxy/multi_app_conn.go +++ /dev/null @@ -1,200 +0,0 @@ -package proxy - -import ( - "fmt" - "os" - "syscall" - - abcicli "github.com/tendermint/tendermint/abci/client" - tmlog "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" -) - -const ( - connConsensus = "consensus" - connMempool = "mempool" - connQuery = "query" - connSnapshot = "snapshot" -) - -// AppConns is the Tendermint's interface to the application that consists of -// multiple connections. -type AppConns interface { - service.Service - - // Mempool connection - Mempool() AppConnMempool - // Consensus connection - Consensus() AppConnConsensus - // Query connection - Query() AppConnQuery - // Snapshot connection - Snapshot() AppConnSnapshot -} - -// NewAppConns calls NewMultiAppConn. -func NewAppConns(clientCreator ClientCreator) AppConns { - return NewMultiAppConn(clientCreator) -} - -// multiAppConn implements AppConns. -// -// A multiAppConn is made of a few appConns and manages their underlying abci -// clients. -// TODO: on app restart, clients must reboot together -type multiAppConn struct { - service.BaseService - - consensusConn AppConnConsensus - mempoolConn AppConnMempool - queryConn AppConnQuery - snapshotConn AppConnSnapshot - - consensusConnClient abcicli.Client - mempoolConnClient abcicli.Client - queryConnClient abcicli.Client - snapshotConnClient abcicli.Client - - clientCreator ClientCreator -} - -// NewMultiAppConn makes all necessary abci connections to the application. -func NewMultiAppConn(clientCreator ClientCreator) AppConns { - multiAppConn := &multiAppConn{ - clientCreator: clientCreator, - } - multiAppConn.BaseService = *service.NewBaseService(nil, "multiAppConn", multiAppConn) - return multiAppConn -} - -func (app *multiAppConn) Mempool() AppConnMempool { - return app.mempoolConn -} - -func (app *multiAppConn) Consensus() AppConnConsensus { - return app.consensusConn -} - -func (app *multiAppConn) Query() AppConnQuery { - return app.queryConn -} - -func (app *multiAppConn) Snapshot() AppConnSnapshot { - return app.snapshotConn -} - -func (app *multiAppConn) OnStart() error { - c, err := app.abciClientFor(connQuery) - if err != nil { - return err - } - app.queryConnClient = c - app.queryConn = NewAppConnQuery(c) - - c, err = app.abciClientFor(connSnapshot) - if err != nil { - app.stopAllClients() - return err - } - app.snapshotConnClient = c - app.snapshotConn = NewAppConnSnapshot(c) - - c, err = app.abciClientFor(connMempool) - if err != nil { - app.stopAllClients() - return err - } - app.mempoolConnClient = c - app.mempoolConn = NewAppConnMempool(c) - - c, err = app.abciClientFor(connConsensus) - if err != nil { - app.stopAllClients() - return err - } - app.consensusConnClient = c - app.consensusConn = NewAppConnConsensus(c) - - // Kill Tendermint if the ABCI application crashes. - go app.killTMOnClientError() - - return nil -} - -func (app *multiAppConn) OnStop() { - app.stopAllClients() -} - -func (app *multiAppConn) killTMOnClientError() { - killFn := func(conn string, err error, logger tmlog.Logger) { - logger.Error( - fmt.Sprintf("%s connection terminated. Did the application crash? Please restart tendermint", conn), - "err", err) - if killErr := kill(); killErr != nil { - logger.Error("Failed to kill this process - please do so manually", "err", killErr) - } - } - - select { - case <-app.consensusConnClient.Quit(): - if err := app.consensusConnClient.Error(); err != nil { - killFn(connConsensus, err, app.Logger) - } - case <-app.mempoolConnClient.Quit(): - if err := app.mempoolConnClient.Error(); err != nil { - killFn(connMempool, err, app.Logger) - } - case <-app.queryConnClient.Quit(): - if err := app.queryConnClient.Error(); err != nil { - killFn(connQuery, err, app.Logger) - } - case <-app.snapshotConnClient.Quit(): - if err := app.snapshotConnClient.Error(); err != nil { - killFn(connSnapshot, err, app.Logger) - } - } -} - -func (app *multiAppConn) stopAllClients() { - if app.consensusConnClient != nil { - if err := app.consensusConnClient.Stop(); err != nil { - app.Logger.Error("error while stopping consensus client", "error", err) - } - } - if app.mempoolConnClient != nil { - if err := app.mempoolConnClient.Stop(); err != nil { - app.Logger.Error("error while stopping mempool client", "error", err) - } - } - if app.queryConnClient != nil { - if err := app.queryConnClient.Stop(); err != nil { - app.Logger.Error("error while stopping query client", "error", err) - } - } - if app.snapshotConnClient != nil { - if err := app.snapshotConnClient.Stop(); err != nil { - app.Logger.Error("error while stopping snapshot client", "error", err) - } - } -} - -func (app *multiAppConn) abciClientFor(conn string) (abcicli.Client, error) { - c, err := app.clientCreator.NewABCIClient() - if err != nil { - return nil, fmt.Errorf("error creating ABCI client (%s connection): %w", conn, err) - } - c.SetLogger(app.Logger.With("module", "abci-client", "connection", conn)) - if err := c.Start(); err != nil { - return nil, fmt.Errorf("error starting ABCI client (%s connection): %w", conn, err) - } - return c, nil -} - -func kill() error { - p, err := os.FindProcess(os.Getpid()) - if err != nil { - return err - } - - return p.Signal(syscall.SIGTERM) -} diff --git a/proxy/multi_app_conn_test.go b/proxy/multi_app_conn_test.go deleted file mode 100644 index 34b0d0830..000000000 --- a/proxy/multi_app_conn_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package proxy - -import ( - "errors" - "os" - "os/signal" - "syscall" - "testing" - "time" - - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - abcimocks "github.com/tendermint/tendermint/abci/client/mocks" - "github.com/tendermint/tendermint/proxy/mocks" -) - -func TestAppConns_Start_Stop(t *testing.T) { - quitCh := make(<-chan struct{}) - - clientCreatorMock := &mocks.ClientCreator{} - - clientMock := &abcimocks.Client{} - clientMock.On("SetLogger", mock.Anything).Return().Times(4) - clientMock.On("Start").Return(nil).Times(4) - clientMock.On("Stop").Return(nil).Times(4) - clientMock.On("Quit").Return(quitCh).Times(4) - - clientCreatorMock.On("NewABCIClient").Return(clientMock, nil).Times(4) - - appConns := NewAppConns(clientCreatorMock) - - err := appConns.Start() - require.NoError(t, err) - - time.Sleep(100 * time.Millisecond) - - err = appConns.Stop() - require.NoError(t, err) - - clientMock.AssertExpectations(t) -} - -// Upon failure, we call tmos.Kill -func TestAppConns_Failure(t *testing.T) { - ok := make(chan struct{}) - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGTERM) - go func() { - for range c { - close(ok) - } - }() - - quitCh := make(chan struct{}) - var recvQuitCh <-chan struct{} // nolint:gosimple - recvQuitCh = quitCh - - clientCreatorMock := &mocks.ClientCreator{} - - clientMock := &abcimocks.Client{} - clientMock.On("SetLogger", mock.Anything).Return() - clientMock.On("Start").Return(nil) - clientMock.On("Stop").Return(nil) - - clientMock.On("Quit").Return(recvQuitCh) - clientMock.On("Error").Return(errors.New("EOF")).Once() - - clientCreatorMock.On("NewABCIClient").Return(clientMock, nil) - - appConns := NewAppConns(clientCreatorMock) - - err := appConns.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := appConns.Stop(); err != nil { - t.Error(err) - } - }) - - // simulate failure - close(quitCh) - - select { - case <-ok: - t.Log("SIGTERM successfully received") - case <-time.After(5 * time.Second): - t.Fatal("expected process to receive SIGTERM signal") - } -} diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go index 4acd0fee9..3b91de107 100644 --- a/rpc/client/event_test.go +++ b/rpc/client/event_test.go @@ -3,7 +3,6 @@ package client_test import ( "context" "fmt" - "reflect" "testing" "time" @@ -13,11 +12,11 @@ import ( abci "github.com/tendermint/tendermint/abci/types" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) -var waitForEventTimeout = 8 * time.Second +const waitForEventTimeout = 2 * time.Second // MakeTxKV returns a text transaction, allong with expected key, value pair func MakeTxKV() ([]byte, []byte, []byte) { @@ -26,164 +25,41 @@ func MakeTxKV() ([]byte, []byte, []byte) { return k, v, append(k, append([]byte("="), v...)...) } -func TestHeaderEvents(t *testing.T) { - n, conf := NodeSuite(t) +func testTxEventsSent(ctx context.Context, t *testing.T, broadcastMethod string, c client.Client) { + // make the tx + _, _, tx := MakeTxKV() - for i, c := range GetClients(t, n, conf) { - i, c := i, c - t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { - // start for this test it if it wasn't already running - if !c.IsRunning() { - // if so, then we start it, listen, and stop it. - err := c.Start() - require.Nil(t, err, "%d: %+v", i, err) - t.Cleanup(func() { - if err := c.Stop(); err != nil { - t.Error(err) - } - }) - } + // send + done := make(chan struct{}) + go func() { + defer close(done) + var ( + txres *coretypes.ResultBroadcastTx + err error + ) + switch broadcastMethod { + case "async": + txres, err = c.BroadcastTxAsync(ctx, tx) + case "sync": + txres, err = c.BroadcastTxSync(ctx, tx) + default: + panic(fmt.Sprintf("Unknown broadcastMethod %s", broadcastMethod)) + } + if assert.NoError(t, err) { + assert.Equal(t, txres.Code, abci.CodeTypeOK) + } + }() - evt, err := client.WaitForOneEvent(c, types.EventNewBlockHeaderValue, waitForEventTimeout) - require.Nil(t, err, "%d: %+v", i, err) - _, ok := evt.(types.EventDataNewBlockHeader) - require.True(t, ok, "%d: %#v", i, evt) - // TODO: more checks... - }) - } -} - -// subscribe to new blocks and make sure height increments by 1 -func TestBlockEvents(t *testing.T) { - n, conf := NodeSuite(t) - for _, c := range GetClients(t, n, conf) { - c := c - t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { - - // start for this test it if it wasn't already running - if !c.IsRunning() { - // if so, then we start it, listen, and stop it. - err := c.Start() - require.Nil(t, err) - t.Cleanup(func() { - if err := c.Stop(); err != nil { - t.Error(err) - } - }) - } - - const subscriber = "TestBlockEvents" - - eventCh, err := c.Subscribe(context.Background(), subscriber, types.QueryForEvent(types.EventNewBlockValue).String()) - require.NoError(t, err) - t.Cleanup(func() { - if err := c.UnsubscribeAll(context.Background(), subscriber); err != nil { - t.Error(err) - } - }) - - var firstBlockHeight int64 - for i := int64(0); i < 3; i++ { - event := <-eventCh - blockEvent, ok := event.Data.(types.EventDataNewBlock) - require.True(t, ok) - - block := blockEvent.Block - - if firstBlockHeight == 0 { - firstBlockHeight = block.Header.Height - } - - require.Equal(t, firstBlockHeight+i, block.Header.Height) - } - }) - } -} - -func TestTxEventsSentWithBroadcastTxAsync(t *testing.T) { testTxEventsSent(t, "async") } -func TestTxEventsSentWithBroadcastTxSync(t *testing.T) { testTxEventsSent(t, "sync") } - -func testTxEventsSent(t *testing.T, broadcastMethod string) { - n, conf := NodeSuite(t) - for _, c := range GetClients(t, n, conf) { - c := c - t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { - - // start for this test it if it wasn't already running - if !c.IsRunning() { - // if so, then we start it, listen, and stop it. - err := c.Start() - require.Nil(t, err) - t.Cleanup(func() { - if err := c.Stop(); err != nil { - t.Error(err) - } - }) - } - - // make the tx - _, _, tx := MakeTxKV() - - // send - go func() { - var ( - txres *ctypes.ResultBroadcastTx - err error - ctx = context.Background() - ) - switch broadcastMethod { - case "async": - txres, err = c.BroadcastTxAsync(ctx, tx) - case "sync": - txres, err = c.BroadcastTxSync(ctx, tx) - default: - panic(fmt.Sprintf("Unknown broadcastMethod %s", broadcastMethod)) - } - if assert.NoError(t, err) { - assert.Equal(t, txres.Code, abci.CodeTypeOK) - } - }() - - // and wait for confirmation - evt, err := client.WaitForOneEvent(c, types.EventTxValue, waitForEventTimeout) - require.Nil(t, err) - - // and make sure it has the proper info - txe, ok := evt.(types.EventDataTx) - require.True(t, ok) - - // make sure this is the proper tx - require.EqualValues(t, tx, txe.Tx) - require.True(t, txe.Result.IsOK()) - }) - } -} - -// Test HTTPClient resubscribes upon disconnect && subscription error. -// Test Local client resubscribes upon subscription error. -func TestClientsResubscribe(t *testing.T) { - // TODO(melekes) -} - -func TestHTTPReturnsErrorIfClientIsNotRunning(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - - c := getHTTPClient(t, conf) - - // on Subscribe - _, err := c.Subscribe(ctx, "TestHeaderEvents", - types.QueryForEvent(types.EventNewBlockHeaderValue).String()) - assert.Error(t, err) - - // on Unsubscribe - err = c.Unsubscribe(ctx, "TestHeaderEvents", - types.QueryForEvent(types.EventNewBlockHeaderValue).String()) - assert.Error(t, err) - - // on UnsubscribeAll - err = c.UnsubscribeAll(ctx, "TestHeaderEvents") - assert.Error(t, err) + // and wait for confirmation + evt, err := client.WaitForOneEvent(c, types.EventTxValue, waitForEventTimeout) + require.Nil(t, err) + + // and make sure it has the proper info + txe, ok := evt.(types.EventDataTx) + require.True(t, ok) + + // make sure this is the proper tx + require.EqualValues(t, tx, txe.Tx) + require.True(t, txe.Result.IsOK()) + <-done } diff --git a/rpc/client/evidence_test.go b/rpc/client/evidence_test.go index 5626b7f48..ae4e29f52 100644 --- a/rpc/client/evidence_test.go +++ b/rpc/client/evidence_test.go @@ -1,17 +1,12 @@ package client_test import ( - "bytes" "context" "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/crypto/tmhash" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/privval" @@ -113,64 +108,6 @@ func makeEvidences( return correct, fakes } -func TestBroadcastEvidence_DuplicateVoteEvidence(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, config := NodeSuite(t) - chainID := config.ChainID() - - pv, err := privval.LoadOrGenFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile()) - require.NoError(t, err) - - for i, c := range GetClients(t, n, config) { - correct, fakes := makeEvidences(t, pv, chainID) - t.Logf("client %d", i) - - // make sure that the node has produced enough blocks - waitForBlock(ctx, t, c, 2) - - result, err := c.BroadcastEvidence(ctx, correct) - require.NoError(t, err, "BroadcastEvidence(%s) failed", correct) - assert.Equal(t, correct.Hash(), result.Hash, "expected result hash to match evidence hash") - - status, err := c.Status(ctx) - require.NoError(t, err) - err = client.WaitForHeight(c, status.SyncInfo.LatestBlockHeight+2, nil) - require.NoError(t, err) - - ed25519pub := pv.Key.PubKey.(ed25519.PubKey) - rawpub := ed25519pub.Bytes() - result2, err := c.ABCIQuery(ctx, "/val", rawpub) - require.NoError(t, err) - qres := result2.Response - require.True(t, qres.IsOK()) - - var v abci.ValidatorUpdate - err = abci.ReadMessage(bytes.NewReader(qres.Value), &v) - require.NoError(t, err, "Error reading query result, value %v", qres.Value) - - pk, err := cryptoenc.PubKeyFromProto(v.PubKey) - require.NoError(t, err) - - require.EqualValues(t, rawpub, pk, "Stored PubKey not equal with expected, value %v", string(qres.Value)) - require.Equal(t, int64(9), v.Power, "Stored Power not equal with expected, value %v", string(qres.Value)) - - for _, fake := range fakes { - _, err := c.BroadcastEvidence(ctx, fake) - require.Error(t, err, "BroadcastEvidence(%s) succeeded, but the evidence was fake", fake) - } - } -} - -func TestBroadcastEmptyEvidence(t *testing.T) { - n, conf := NodeSuite(t) - for _, c := range GetClients(t, n, conf) { - _, err := c.BroadcastEvidence(context.Background(), nil) - assert.Error(t, err) - } -} - func waitForBlock(ctx context.Context, t *testing.T, c client.Client, height int64) { timer := time.NewTimer(0 * time.Millisecond) defer timer.Stop() diff --git a/rpc/client/examples_test.go b/rpc/client/examples_test.go index 8c4c4f277..8acf4b072 100644 --- a/rpc/client/examples_test.go +++ b/rpc/client/examples_test.go @@ -3,22 +3,27 @@ package client_test import ( "bytes" "context" - "fmt" "log" + "net/http" + "testing" + "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" rpchttp "github.com/tendermint/tendermint/rpc/client/http" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctest "github.com/tendermint/tendermint/rpc/test" ) -func ExampleHTTP_simple() { +func TestHTTPSimple(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() // Start a tendermint node (and kvstore) in the background to test against app := kvstore.NewApplication() - conf := rpctest.CreateConfig("ExampleHTTP_simple") + conf, err := rpctest.CreateConfig("ExampleHTTP_simple") + require.NoError(t, err) _, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout) if err != nil { @@ -29,9 +34,7 @@ func ExampleHTTP_simple() { // Create our RPC client rpcAddr := conf.RPC.ListenAddress c, err := rpchttp.New(rpcAddr) - if err != nil { - log.Fatal(err) - } + require.NoError(t, err) // Create a transaction k := []byte("name") @@ -41,6 +44,7 @@ func ExampleHTTP_simple() { // Broadcast the transaction and wait for it to commit (rather use // c.BroadcastTxSync though in production). bres, err := c.BroadcastTxCommit(context.Background(), tx) + require.NoError(t, err) if err != nil { log.Fatal(err) } @@ -50,36 +54,26 @@ func ExampleHTTP_simple() { // Now try to fetch the value for the key qres, err := c.ABCIQuery(context.Background(), "/key", k) - if err != nil { - log.Fatal(err) - } - if qres.Response.IsErr() { - log.Fatal("ABCIQuery failed") - } - if !bytes.Equal(qres.Response.Key, k) { - log.Fatal("returned key does not match queried key") - } - if !bytes.Equal(qres.Response.Value, v) { - log.Fatal("returned value does not match sent value") - } + require.NoError(t, err) + require.False(t, qres.Response.IsErr(), "ABCIQuery failed") + require.True(t, bytes.Equal(qres.Response.Key, k), + "returned key does not match queried key") + require.True(t, bytes.Equal(qres.Response.Value, v), + "returned value does not match sent value [%s]", string(v)) - fmt.Println("Sent tx :", string(tx)) - fmt.Println("Queried for :", string(qres.Response.Key)) - fmt.Println("Got value :", string(qres.Response.Value)) - - // Output: - // Sent tx : name=satoshi - // Queried for : name - // Got value : satoshi + assert.Equal(t, "name=satoshi", string(tx), "sent tx") + assert.Equal(t, "name", string(qres.Response.Key), "queried for") + assert.Equal(t, "satoshi", string(qres.Response.Value), "got value") } -func ExampleHTTP_batching() { +func TestHTTPBatching(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() // Start a tendermint node (and kvstore) in the background to test against app := kvstore.NewApplication() - conf := rpctest.CreateConfig("ExampleHTTP_batching") + conf, err := rpctest.CreateConfig("ExampleHTTP_batching") + require.NoError(t, err) _, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout) if err != nil { @@ -88,10 +82,8 @@ func ExampleHTTP_batching() { defer func() { _ = closer(ctx) }() rpcAddr := conf.RPC.ListenAddress - c, err := rpchttp.New(rpcAddr) - if err != nil { - log.Fatal(err) - } + c, err := rpchttp.NewWithClient(rpcAddr, http.DefaultClient) + require.NoError(t, err) // Create our two transactions k1 := []byte("firstName") @@ -111,41 +103,51 @@ func ExampleHTTP_batching() { for _, tx := range txs { // Broadcast the transaction and wait for it to commit (rather use // c.BroadcastTxSync though in production). - if _, err := batch.BroadcastTxCommit(context.Background(), tx); err != nil { - log.Fatal(err) - } + _, err := batch.BroadcastTxSync(ctx, tx) + require.NoError(t, err) } // Send the batch of 2 transactions - if _, err := batch.Send(context.Background()); err != nil { - log.Fatal(err) - } + _, err = batch.Send(ctx) + require.NoError(t, err) - // Now let's query for the original results as a batch - keys := [][]byte{k1, k2} - for _, key := range keys { - if _, err := batch.ABCIQuery(context.Background(), "/key", key); err != nil { - log.Fatal(err) - } - } + // wait for the transaction to land, we could poll more for + // the transactions to land definitively. + require.Eventually(t, + func() bool { + // Now let's query for the original results as a batch + exists := 0 + for _, key := range [][]byte{k1, k2} { + _, err := batch.ABCIQuery(context.Background(), "/key", key) + if err == nil { + exists++ + + } + } + return exists == 2 + }, + 10*time.Second, + time.Second, + ) // Send the 2 queries and keep the results - results, err := batch.Send(context.Background()) - if err != nil { - log.Fatal(err) - } + results, err := batch.Send(ctx) + require.NoError(t, err) + require.Len(t, results, 2) // Each result in the returned list is the deserialized result of each // respective ABCIQuery response for _, result := range results { - qr, ok := result.(*ctypes.ResultABCIQuery) - if !ok { - log.Fatal("invalid result type from ABCIQuery request") - } - fmt.Println(string(qr.Response.Key), "=", string(qr.Response.Value)) - } + qr, ok := result.(*coretypes.ResultABCIQuery) + require.True(t, ok, "invalid result type from ABCIQuery request") - // Output: - // firstName = satoshi - // lastName = nakamoto + switch string(qr.Response.Key) { + case "firstName": + require.Equal(t, "satoshi", string(qr.Response.Value)) + case "lastName": + require.Equal(t, "nakamoto", string(qr.Response.Value)) + default: + t.Fatalf("encountered unknown key %q", string(qr.Response.Key)) + } + } } diff --git a/rpc/client/helpers.go b/rpc/client/helpers.go index 49598e814..58e48dbba 100644 --- a/rpc/client/helpers.go +++ b/rpc/client/helpers.go @@ -4,8 +4,10 @@ import ( "context" "errors" "fmt" + "sync" "time" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" ) @@ -77,8 +79,91 @@ func WaitForOneEvent(c EventsClient, eventValue string, timeout time.Duration) ( select { case event := <-eventCh: - return event.Data.(types.TMEventData), nil + return event.Data, nil case <-ctx.Done(): return nil, errors.New("timed out waiting for event") } } + +var ( + // ErrClientRunning is returned by Start when the client is already running. + ErrClientRunning = errors.New("client already running") + + // ErrClientNotRunning is returned by Stop when the client is not running. + ErrClientNotRunning = errors.New("client is not running") +) + +// RunState is a helper that a client implementation can embed to implement +// common plumbing for keeping track of run state and logging. +// +// TODO(creachadair): This type is a temporary measure, and will be removed. +// See the discussion on #6971. +type RunState struct { + Logger log.Logger + + mu sync.Mutex + name string + isRunning bool + quit chan struct{} +} + +// NewRunState returns a new unstarted run state tracker with the given logging +// label and log sink. If logger == nil, a no-op logger is provided by default. +func NewRunState(name string, logger log.Logger) *RunState { + if logger == nil { + logger = log.NewNopLogger() + } + return &RunState{ + name: name, + Logger: logger, + } +} + +// Start sets the state to running, or reports an error. +func (r *RunState) Start() error { + r.mu.Lock() + defer r.mu.Unlock() + if r.isRunning { + r.Logger.Error("not starting client, it is already started", "client", r.name) + return ErrClientRunning + } + r.Logger.Info("starting client", "client", r.name) + r.isRunning = true + r.quit = make(chan struct{}) + return nil +} + +// Stop sets the state to not running, or reports an error. +func (r *RunState) Stop() error { + r.mu.Lock() + defer r.mu.Unlock() + if !r.isRunning { + r.Logger.Error("not stopping client; it is already stopped", "client", r.name) + return ErrClientNotRunning + } + r.Logger.Info("stopping client", "client", r.name) + r.isRunning = false + close(r.quit) + return nil +} + +// SetLogger updates the log sink. +func (r *RunState) SetLogger(logger log.Logger) { + r.mu.Lock() + defer r.mu.Unlock() + r.Logger = logger +} + +// IsRunning reports whether the state is running. +func (r *RunState) IsRunning() bool { + r.mu.Lock() + defer r.mu.Unlock() + return r.isRunning +} + +// Quit returns a channel that is closed when a call to Stop succeeds. +func (r *RunState) Quit() <-chan struct{} { + r.mu.Lock() + defer r.mu.Unlock() + return r.quit +} diff --git a/rpc/client/helpers_test.go b/rpc/client/helpers_test.go index 3b78dfe5f..60732b991 100644 --- a/rpc/client/helpers_test.go +++ b/rpc/client/helpers_test.go @@ -10,7 +10,7 @@ import ( "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/client/mock" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" ) func TestWaitForHeight(t *testing.T) { @@ -33,7 +33,7 @@ func TestWaitForHeight(t *testing.T) { // now set current block height to 10 m.Call = mock.Call{ - Response: &ctypes.ResultStatus{SyncInfo: ctypes.SyncInfo{LatestBlockHeight: 10}}, + Response: &coretypes.ResultStatus{SyncInfo: coretypes.SyncInfo{LatestBlockHeight: 10}}, } // we will not wait for more than 10 blocks @@ -53,7 +53,7 @@ func TestWaitForHeight(t *testing.T) { // we use the callback to update the status height myWaiter := func(delta int64) error { // update the height for the next call - m.Call.Response = &ctypes.ResultStatus{SyncInfo: ctypes.SyncInfo{LatestBlockHeight: 15}} + m.Call.Response = &coretypes.ResultStatus{SyncInfo: coretypes.SyncInfo{LatestBlockHeight: 15}} return client.DefaultWaitStrategy(delta) } @@ -65,13 +65,13 @@ func TestWaitForHeight(t *testing.T) { pre := r.Calls[3] require.Nil(pre.Error) - prer, ok := pre.Response.(*ctypes.ResultStatus) + prer, ok := pre.Response.(*coretypes.ResultStatus) require.True(ok) assert.Equal(int64(10), prer.SyncInfo.LatestBlockHeight) post := r.Calls[4] require.Nil(post.Error) - postr, ok := post.Response.(*ctypes.ResultStatus) + postr, ok := post.Response.(*coretypes.ResultStatus) require.True(ok) assert.Equal(int64(15), postr.SyncInfo.LatestBlockHeight) } diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index 26a0ea5de..5bd7b398a 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -2,13 +2,13 @@ package http import ( "context" + "errors" "net/http" "time" "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/libs/log" rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" jsonrpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" "github.com/tendermint/tendermint/types" ) @@ -121,20 +121,20 @@ func NewWithTimeout(remote string, t time.Duration) (*HTTP, error) { } // NewWithClient allows you to set a custom http client. An error is returned -// on invalid remote. The function panics when client is nil. +// on invalid remote. The function returns an error when client is nil +// or an invalid remote. func NewWithClient(remote string, c *http.Client) (*HTTP, error) { if c == nil { - panic("nil http.Client") + return nil, errors.New("nil client") } return NewWithClientAndWSOptions(remote, c, DefaultWSOptions()) } // NewWithClientAndWSOptions allows you to set a custom http client and -// WebSocket options. An error is returned on invalid remote. The function -// panics when client is nil. +// WebSocket options. An error is returned on invalid remote or nil client. func NewWithClientAndWSOptions(remote string, c *http.Client, wso WSOptions) (*HTTP, error) { if c == nil { - panic("nil http.Client") + return nil, errors.New("nil client") } rpc, err := jsonrpcclient.NewWithHTTPClient(remote, c) if err != nil { @@ -158,11 +158,6 @@ func NewWithClientAndWSOptions(remote string, c *http.Client, wso WSOptions) (*H var _ rpcclient.Client = (*HTTP)(nil) -// SetLogger sets a logger. -func (c *HTTP) SetLogger(l log.Logger) { - c.wsEvents.SetLogger(l) -} - // Remote returns the remote network address in a string form. func (c *HTTP) Remote() string { return c.remote @@ -204,8 +199,8 @@ func (b *BatchHTTP) Count() int { //----------------------------------------------------------------------------- // baseRPCClient -func (c *baseRPCClient) Status(ctx context.Context) (*ctypes.ResultStatus, error) { - result := new(ctypes.ResultStatus) +func (c *baseRPCClient) Status(ctx context.Context) (*coretypes.ResultStatus, error) { + result := new(coretypes.ResultStatus) _, err := c.caller.Call(ctx, "status", map[string]interface{}{}, result) if err != nil { return nil, err @@ -214,8 +209,8 @@ func (c *baseRPCClient) Status(ctx context.Context) (*ctypes.ResultStatus, error return result, nil } -func (c *baseRPCClient) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { - result := new(ctypes.ResultABCIInfo) +func (c *baseRPCClient) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { + result := new(coretypes.ResultABCIInfo) _, err := c.caller.Call(ctx, "abci_info", map[string]interface{}{}, result) if err != nil { return nil, err @@ -228,7 +223,7 @@ func (c *baseRPCClient) ABCIQuery( ctx context.Context, path string, data bytes.HexBytes, -) (*ctypes.ResultABCIQuery, error) { +) (*coretypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) } @@ -236,8 +231,8 @@ func (c *baseRPCClient) ABCIQueryWithOptions( ctx context.Context, path string, data bytes.HexBytes, - opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - result := new(ctypes.ResultABCIQuery) + opts rpcclient.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { + result := new(coretypes.ResultABCIQuery) _, err := c.caller.Call(ctx, "abci_query", map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove}, result) @@ -251,8 +246,8 @@ func (c *baseRPCClient) ABCIQueryWithOptions( func (c *baseRPCClient) BroadcastTxCommit( ctx context.Context, tx types.Tx, -) (*ctypes.ResultBroadcastTxCommit, error) { - result := new(ctypes.ResultBroadcastTxCommit) +) (*coretypes.ResultBroadcastTxCommit, error) { + result := new(coretypes.ResultBroadcastTxCommit) _, err := c.caller.Call(ctx, "broadcast_tx_commit", map[string]interface{}{"tx": tx}, result) if err != nil { return nil, err @@ -263,14 +258,14 @@ func (c *baseRPCClient) BroadcastTxCommit( func (c *baseRPCClient) BroadcastTxAsync( ctx context.Context, tx types.Tx, -) (*ctypes.ResultBroadcastTx, error) { +) (*coretypes.ResultBroadcastTx, error) { return c.broadcastTX(ctx, "broadcast_tx_async", tx) } func (c *baseRPCClient) BroadcastTxSync( ctx context.Context, tx types.Tx, -) (*ctypes.ResultBroadcastTx, error) { +) (*coretypes.ResultBroadcastTx, error) { return c.broadcastTX(ctx, "broadcast_tx_sync", tx) } @@ -278,8 +273,8 @@ func (c *baseRPCClient) broadcastTX( ctx context.Context, route string, tx types.Tx, -) (*ctypes.ResultBroadcastTx, error) { - result := new(ctypes.ResultBroadcastTx) +) (*coretypes.ResultBroadcastTx, error) { + result := new(coretypes.ResultBroadcastTx) _, err := c.caller.Call(ctx, route, map[string]interface{}{"tx": tx}, result) if err != nil { return nil, err @@ -290,8 +285,8 @@ func (c *baseRPCClient) broadcastTX( func (c *baseRPCClient) UnconfirmedTxs( ctx context.Context, limit *int, -) (*ctypes.ResultUnconfirmedTxs, error) { - result := new(ctypes.ResultUnconfirmedTxs) +) (*coretypes.ResultUnconfirmedTxs, error) { + result := new(coretypes.ResultUnconfirmedTxs) params := make(map[string]interface{}) if limit != nil { params["limit"] = limit @@ -303,8 +298,8 @@ func (c *baseRPCClient) UnconfirmedTxs( return result, nil } -func (c *baseRPCClient) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) { - result := new(ctypes.ResultUnconfirmedTxs) +func (c *baseRPCClient) NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) { + result := new(coretypes.ResultUnconfirmedTxs) _, err := c.caller.Call(ctx, "num_unconfirmed_txs", map[string]interface{}{}, result) if err != nil { return nil, err @@ -312,8 +307,8 @@ func (c *baseRPCClient) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUn return result, nil } -func (c *baseRPCClient) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { - result := new(ctypes.ResultCheckTx) +func (c *baseRPCClient) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { + result := new(coretypes.ResultCheckTx) _, err := c.caller.Call(ctx, "check_tx", map[string]interface{}{"tx": tx}, result) if err != nil { return nil, err @@ -321,8 +316,16 @@ func (c *baseRPCClient) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.Resul return result, nil } -func (c *baseRPCClient) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { - result := new(ctypes.ResultNetInfo) +func (c *baseRPCClient) RemoveTx(ctx context.Context, txKey types.TxKey) error { + _, err := c.caller.Call(ctx, "remove_tx", map[string]interface{}{"tx_key": txKey}, nil) + if err != nil { + return err + } + return nil +} + +func (c *baseRPCClient) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { + result := new(coretypes.ResultNetInfo) _, err := c.caller.Call(ctx, "net_info", map[string]interface{}{}, result) if err != nil { return nil, err @@ -330,8 +333,8 @@ func (c *baseRPCClient) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, err return result, nil } -func (c *baseRPCClient) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { - result := new(ctypes.ResultDumpConsensusState) +func (c *baseRPCClient) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { + result := new(coretypes.ResultDumpConsensusState) _, err := c.caller.Call(ctx, "dump_consensus_state", map[string]interface{}{}, result) if err != nil { return nil, err @@ -339,8 +342,8 @@ func (c *baseRPCClient) DumpConsensusState(ctx context.Context) (*ctypes.ResultD return result, nil } -func (c *baseRPCClient) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { - result := new(ctypes.ResultConsensusState) +func (c *baseRPCClient) ConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { + result := new(coretypes.ResultConsensusState) _, err := c.caller.Call(ctx, "consensus_state", map[string]interface{}{}, result) if err != nil { return nil, err @@ -351,8 +354,8 @@ func (c *baseRPCClient) ConsensusState(ctx context.Context) (*ctypes.ResultConse func (c *baseRPCClient) ConsensusParams( ctx context.Context, height *int64, -) (*ctypes.ResultConsensusParams, error) { - result := new(ctypes.ResultConsensusParams) +) (*coretypes.ResultConsensusParams, error) { + result := new(coretypes.ResultConsensusParams) params := make(map[string]interface{}) if height != nil { params["height"] = height @@ -364,8 +367,8 @@ func (c *baseRPCClient) ConsensusParams( return result, nil } -func (c *baseRPCClient) Health(ctx context.Context) (*ctypes.ResultHealth, error) { - result := new(ctypes.ResultHealth) +func (c *baseRPCClient) Health(ctx context.Context) (*coretypes.ResultHealth, error) { + result := new(coretypes.ResultHealth) _, err := c.caller.Call(ctx, "health", map[string]interface{}{}, result) if err != nil { return nil, err @@ -377,8 +380,8 @@ func (c *baseRPCClient) BlockchainInfo( ctx context.Context, minHeight, maxHeight int64, -) (*ctypes.ResultBlockchainInfo, error) { - result := new(ctypes.ResultBlockchainInfo) +) (*coretypes.ResultBlockchainInfo, error) { + result := new(coretypes.ResultBlockchainInfo) _, err := c.caller.Call(ctx, "blockchain", map[string]interface{}{"minHeight": minHeight, "maxHeight": maxHeight}, result) @@ -388,8 +391,8 @@ func (c *baseRPCClient) BlockchainInfo( return result, nil } -func (c *baseRPCClient) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { - result := new(ctypes.ResultGenesis) +func (c *baseRPCClient) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) { + result := new(coretypes.ResultGenesis) _, err := c.caller.Call(ctx, "genesis", map[string]interface{}{}, result) if err != nil { return nil, err @@ -397,8 +400,8 @@ func (c *baseRPCClient) Genesis(ctx context.Context) (*ctypes.ResultGenesis, err return result, nil } -func (c *baseRPCClient) GenesisChunked(ctx context.Context, id uint) (*ctypes.ResultGenesisChunk, error) { - result := new(ctypes.ResultGenesisChunk) +func (c *baseRPCClient) GenesisChunked(ctx context.Context, id uint) (*coretypes.ResultGenesisChunk, error) { + result := new(coretypes.ResultGenesisChunk) _, err := c.caller.Call(ctx, "genesis_chunked", map[string]interface{}{"chunk": id}, result) if err != nil { return nil, err @@ -406,8 +409,8 @@ func (c *baseRPCClient) GenesisChunked(ctx context.Context, id uint) (*ctypes.Re return result, nil } -func (c *baseRPCClient) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { - result := new(ctypes.ResultBlock) +func (c *baseRPCClient) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { + result := new(coretypes.ResultBlock) params := make(map[string]interface{}) if height != nil { params["height"] = height @@ -419,8 +422,8 @@ func (c *baseRPCClient) Block(ctx context.Context, height *int64) (*ctypes.Resul return result, nil } -func (c *baseRPCClient) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultBlock, error) { - result := new(ctypes.ResultBlock) +func (c *baseRPCClient) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { + result := new(coretypes.ResultBlock) params := map[string]interface{}{ "hash": hash, } @@ -434,8 +437,8 @@ func (c *baseRPCClient) BlockByHash(ctx context.Context, hash bytes.HexBytes) (* func (c *baseRPCClient) BlockResults( ctx context.Context, height *int64, -) (*ctypes.ResultBlockResults, error) { - result := new(ctypes.ResultBlockResults) +) (*coretypes.ResultBlockResults, error) { + result := new(coretypes.ResultBlockResults) params := make(map[string]interface{}) if height != nil { params["height"] = height @@ -447,8 +450,8 @@ func (c *baseRPCClient) BlockResults( return result, nil } -func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { - result := new(ctypes.ResultCommit) +func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { + result := new(coretypes.ResultCommit) params := make(map[string]interface{}) if height != nil { params["height"] = height @@ -460,8 +463,8 @@ func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*ctypes.Resu return result, nil } -func (c *baseRPCClient) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*ctypes.ResultTx, error) { - result := new(ctypes.ResultTx) +func (c *baseRPCClient) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { + result := new(coretypes.ResultTx) params := map[string]interface{}{ "hash": hash, "prove": prove, @@ -480,9 +483,9 @@ func (c *baseRPCClient) TxSearch( page, perPage *int, orderBy string, -) (*ctypes.ResultTxSearch, error) { +) (*coretypes.ResultTxSearch, error) { - result := new(ctypes.ResultTxSearch) + result := new(coretypes.ResultTxSearch) params := map[string]interface{}{ "query": query, "prove": prove, @@ -509,9 +512,9 @@ func (c *baseRPCClient) BlockSearch( query string, page, perPage *int, orderBy string, -) (*ctypes.ResultBlockSearch, error) { +) (*coretypes.ResultBlockSearch, error) { - result := new(ctypes.ResultBlockSearch) + result := new(coretypes.ResultBlockSearch) params := map[string]interface{}{ "query": query, "order_by": orderBy, @@ -537,8 +540,8 @@ func (c *baseRPCClient) Validators( height *int64, page, perPage *int, -) (*ctypes.ResultValidators, error) { - result := new(ctypes.ResultValidators) +) (*coretypes.ResultValidators, error) { + result := new(coretypes.ResultValidators) params := make(map[string]interface{}) if page != nil { params["page"] = page @@ -559,8 +562,8 @@ func (c *baseRPCClient) Validators( func (c *baseRPCClient) BroadcastEvidence( ctx context.Context, ev types.Evidence, -) (*ctypes.ResultBroadcastEvidence, error) { - result := new(ctypes.ResultBroadcastEvidence) +) (*coretypes.ResultBroadcastEvidence, error) { + result := new(coretypes.ResultBroadcastEvidence) _, err := c.caller.Call(ctx, "broadcast_evidence", map[string]interface{}{"evidence": ev}, result) if err != nil { return nil, err diff --git a/rpc/client/http/ws.go b/rpc/client/http/ws.go index afdaec861..e4c2a14ed 100644 --- a/rpc/client/http/ws.go +++ b/rpc/client/http/ws.go @@ -9,15 +9,12 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" tmjson "github.com/tendermint/tendermint/libs/json" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/libs/pubsub" rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" jsonrpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" ) -var errNotRunning = errors.New("client is not running. Use .Start() method to start") - // WSOptions for the WS part of the HTTP client. type WSOptions struct { Path string // path (e.g. "/ws") @@ -48,7 +45,7 @@ func (wso WSOptions) Validate() error { // wsEvents is a wrapper around WSClient, which implements EventsClient. type wsEvents struct { - service.BaseService + *rpcclient.RunState ws *jsonrpcclient.WSClient mtx tmsync.RWMutex @@ -56,7 +53,7 @@ type wsEvents struct { } type wsSubscription struct { - res chan ctypes.ResultEvent + res chan coretypes.ResultEvent id string query string } @@ -78,7 +75,7 @@ func newWsEvents(remote string, wso WSOptions) (*wsEvents, error) { w := &wsEvents{ subscriptions: make(map[string]*wsSubscription), } - w.BaseService = *service.NewBaseService(nil, "wsEvents", w) + w.RunState = rpcclient.NewRunState("wsEvents", nil) var err error w.ws, err = jsonrpcclient.NewWSWithOptions(remote, wso.Path, wso.WSOptions) @@ -94,23 +91,20 @@ func newWsEvents(remote string, wso WSOptions) (*wsEvents, error) { return w, nil } -// OnStart implements service.Service by starting WSClient and event loop. -func (w *wsEvents) OnStart() error { +// Start starts the websocket client and the event loop. +func (w *wsEvents) Start(ctx context.Context) error { if err := w.ws.Start(); err != nil { return err } - go w.eventListener() - return nil } -// OnStop implements service.Service by stopping WSClient. -func (w *wsEvents) OnStop() { - if err := w.ws.Stop(); err != nil { - w.Logger.Error("Can't stop ws client", "err", err) - } -} +// IsRunning reports whether the websocket client is running. +func (w *wsEvents) IsRunning() bool { return w.ws.IsRunning() } + +// Stop shuts down the websocket client. +func (w *wsEvents) Stop() error { return w.ws.Stop() } // Subscribe implements EventsClient by using WSClient to subscribe given // subscriber to query. By default, it returns a channel with cap=1. Error is @@ -125,10 +119,10 @@ func (w *wsEvents) OnStop() { // // It returns an error if wsEvents is not running. func (w *wsEvents) Subscribe(ctx context.Context, subscriber, query string, - outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { + outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) { if !w.IsRunning() { - return nil, errNotRunning + return nil, rpcclient.ErrClientNotRunning } if err := w.ws.Subscribe(ctx, query); err != nil { @@ -140,7 +134,7 @@ func (w *wsEvents) Subscribe(ctx context.Context, subscriber, query string, outCap = outCapacity[0] } - outc := make(chan ctypes.ResultEvent, outCap) + outc := make(chan coretypes.ResultEvent, outCap) w.mtx.Lock() defer w.mtx.Unlock() // subscriber param is ignored because Tendermint will override it with @@ -156,7 +150,7 @@ func (w *wsEvents) Subscribe(ctx context.Context, subscriber, query string, // It returns an error if wsEvents is not running. func (w *wsEvents) Unsubscribe(ctx context.Context, subscriber, query string) error { if !w.IsRunning() { - return errNotRunning + return rpcclient.ErrClientNotRunning } if err := w.ws.Unsubscribe(ctx, query); err != nil { @@ -182,7 +176,7 @@ func (w *wsEvents) Unsubscribe(ctx context.Context, subscriber, query string) er // It returns an error if wsEvents is not running. func (w *wsEvents) UnsubscribeAll(ctx context.Context, subscriber string) error { if !w.IsRunning() { - return errNotRunning + return rpcclient.ErrClientNotRunning } if err := w.ws.UnsubscribeAll(ctx); err != nil { @@ -219,7 +213,7 @@ func (w *wsEvents) redoSubscriptionsAfter(d time.Duration) { } func isErrAlreadySubscribed(err error) bool { - return strings.Contains(err.Error(), tmpubsub.ErrAlreadySubscribed.Error()) + return strings.Contains(err.Error(), pubsub.ErrAlreadySubscribed.Error()) } func (w *wsEvents) eventListener() { @@ -244,7 +238,7 @@ func (w *wsEvents) eventListener() { continue } - result := new(ctypes.ResultEvent) + result := new(coretypes.ResultEvent) err := tmjson.Unmarshal(resp.Result, result) if err != nil { w.Logger.Error("failed to unmarshal response", "err", err) diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 8244e9295..8d160b799 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -24,17 +24,26 @@ import ( "context" "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/libs/service" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) //go:generate ../../scripts/mockery_generate.sh Client -// Client wraps most important rpc calls a client would make if you want to -// listen for events, test if it also implements events.EventSwitch. +// Client describes the interface of Tendermint RPC client implementations. type Client interface { - service.Service + // These methods define the operational structure of the client. + + // Start the client. Start must report an error if the client is running. + Start(context.Context) error + + // Stop the client. Stop must report an error if the client is not running. + Stop() error + + // IsRunning reports whether the client is running. + IsRunning() bool + + // These embedded interfaces define the callable methods of the service. ABCIClient EventsClient HistoryClient @@ -52,26 +61,26 @@ type Client interface { // is easier to mock. type ABCIClient interface { // Reading from abci app - ABCIInfo(context.Context) (*ctypes.ResultABCIInfo, error) - ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) + ABCIInfo(context.Context) (*coretypes.ResultABCIInfo, error) + ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) ABCIQueryWithOptions(ctx context.Context, path string, data bytes.HexBytes, - opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) + opts ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) // Writing to abci app - BroadcastTxCommit(context.Context, types.Tx) (*ctypes.ResultBroadcastTxCommit, error) - BroadcastTxAsync(context.Context, types.Tx) (*ctypes.ResultBroadcastTx, error) - BroadcastTxSync(context.Context, types.Tx) (*ctypes.ResultBroadcastTx, error) + BroadcastTxCommit(context.Context, types.Tx) (*coretypes.ResultBroadcastTxCommit, error) + BroadcastTxAsync(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error) + BroadcastTxSync(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error) } // SignClient groups together the functionality needed to get valid signatures // and prove anything about the chain. type SignClient interface { - Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) - BlockByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultBlock, error) - BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) - Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) - Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) - Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*ctypes.ResultTx, error) + Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) + BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) + BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) + Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) + Validators(ctx context.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) + Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) // TxSearch defines a method to search for a paginated set of transactions by // DeliverTx event search criteria. @@ -81,7 +90,7 @@ type SignClient interface { prove bool, page, perPage *int, orderBy string, - ) (*ctypes.ResultTxSearch, error) + ) (*coretypes.ResultTxSearch, error) // BlockSearch defines a method to search for a paginated set of blocks by // BeginBlock and EndBlock event search criteria. @@ -90,29 +99,29 @@ type SignClient interface { query string, page, perPage *int, orderBy string, - ) (*ctypes.ResultBlockSearch, error) + ) (*coretypes.ResultBlockSearch, error) } // HistoryClient provides access to data from genesis to now in large chunks. type HistoryClient interface { - Genesis(context.Context) (*ctypes.ResultGenesis, error) - GenesisChunked(context.Context, uint) (*ctypes.ResultGenesisChunk, error) - BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) + Genesis(context.Context) (*coretypes.ResultGenesis, error) + GenesisChunked(context.Context, uint) (*coretypes.ResultGenesisChunk, error) + BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) } // StatusClient provides access to general chain info. type StatusClient interface { - Status(context.Context) (*ctypes.ResultStatus, error) + Status(context.Context) (*coretypes.ResultStatus, error) } // NetworkClient is general info about the network state. May not be needed // usually. type NetworkClient interface { - NetInfo(context.Context) (*ctypes.ResultNetInfo, error) - DumpConsensusState(context.Context) (*ctypes.ResultDumpConsensusState, error) - ConsensusState(context.Context) (*ctypes.ResultConsensusState, error) - ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) - Health(context.Context) (*ctypes.ResultHealth, error) + NetInfo(context.Context) (*coretypes.ResultNetInfo, error) + DumpConsensusState(context.Context) (*coretypes.ResultDumpConsensusState, error) + ConsensusState(context.Context) (*coretypes.ResultConsensusState, error) + ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) + Health(context.Context) (*coretypes.ResultHealth, error) } // EventsClient is reactive, you can subscribe to any message, given the proper @@ -125,7 +134,7 @@ type EventsClient interface { // // ctx cannot be used to unsubscribe. To unsubscribe, use either Unsubscribe // or UnsubscribeAll. - Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) + Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) //nolint:lll // Unsubscribe unsubscribes given subscriber from query. Unsubscribe(ctx context.Context, subscriber, query string) error // UnsubscribeAll unsubscribes given subscriber from all the queries. @@ -134,15 +143,16 @@ type EventsClient interface { // MempoolClient shows us data about current mempool state. type MempoolClient interface { - UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) - NumUnconfirmedTxs(context.Context) (*ctypes.ResultUnconfirmedTxs, error) - CheckTx(context.Context, types.Tx) (*ctypes.ResultCheckTx, error) + UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) + NumUnconfirmedTxs(context.Context) (*coretypes.ResultUnconfirmedTxs, error) + CheckTx(context.Context, types.Tx) (*coretypes.ResultCheckTx, error) + RemoveTx(context.Context, types.TxKey) error } // EvidenceClient is used for submitting an evidence of the malicious // behavior. type EvidenceClient interface { - BroadcastEvidence(context.Context, types.Evidence) (*ctypes.ResultBroadcastEvidence, error) + BroadcastEvidence(context.Context, types.Evidence) (*coretypes.ResultBroadcastEvidence, error) } // RemoteClient is a Client, which can also return the remote network address. diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index 39c4295ac..cb5a0a5ed 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -6,13 +6,14 @@ import ( "fmt" "time" + "github.com/tendermint/tendermint/internal/eventbus" + rpccore "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/libs/pubsub/query" rpcclient "github.com/tendermint/tendermint/rpc/client" - rpccore "github.com/tendermint/tendermint/rpc/core" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -38,7 +39,7 @@ don't need to do anything). It will keep trying indefinitely with exponential backoff (10ms -> 20ms -> 40ms) until successful. */ type Local struct { - *types.EventBus + *eventbus.EventBus Logger log.Logger ctx *rpctypes.Context env *rpccore.Environment @@ -48,7 +49,7 @@ type Local struct { // local RPC client constructor needs to build a local client. type NodeService interface { RPCEnvironment() *rpccore.Environment - EventBus() *types.EventBus + EventBus() *eventbus.EventBus } // New configures a client that calls the Node directly. @@ -72,15 +73,15 @@ func (c *Local) SetLogger(l log.Logger) { c.Logger = l } -func (c *Local) Status(ctx context.Context) (*ctypes.ResultStatus, error) { +func (c *Local) Status(ctx context.Context) (*coretypes.ResultStatus, error) { return c.env.Status(c.ctx) } -func (c *Local) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { +func (c *Local) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { return c.env.ABCIInfo(c.ctx) } -func (c *Local) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { +func (c *Local) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) } @@ -88,208 +89,194 @@ func (c *Local) ABCIQueryWithOptions( ctx context.Context, path string, data bytes.HexBytes, - opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts rpcclient.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { return c.env.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove) } -func (c *Local) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (c *Local) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { return c.env.BroadcastTxCommit(c.ctx, tx) } -func (c *Local) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *Local) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.env.BroadcastTxAsync(c.ctx, tx) } -func (c *Local) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *Local) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.env.BroadcastTxSync(c.ctx, tx) } -func (c *Local) UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) { +func (c *Local) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { return c.env.UnconfirmedTxs(c.ctx, limit) } -func (c *Local) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) { +func (c *Local) NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) { return c.env.NumUnconfirmedTxs(c.ctx) } -func (c *Local) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { +func (c *Local) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { return c.env.CheckTx(c.ctx, tx) } -func (c *Local) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { +func (c *Local) RemoveTx(ctx context.Context, txKey types.TxKey) error { + return c.env.Mempool.RemoveTxByKey(txKey) +} + +func (c *Local) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { return c.env.NetInfo(c.ctx) } -func (c *Local) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { +func (c *Local) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { return c.env.DumpConsensusState(c.ctx) } -func (c *Local) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { +func (c *Local) ConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { return c.env.GetConsensusState(c.ctx) } -func (c *Local) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { +func (c *Local) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) { return c.env.ConsensusParams(c.ctx, height) } -func (c *Local) Health(ctx context.Context) (*ctypes.ResultHealth, error) { +func (c *Local) Health(ctx context.Context) (*coretypes.ResultHealth, error) { return c.env.Health(c.ctx) } -func (c *Local) DialSeeds(ctx context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { - return c.env.UnsafeDialSeeds(c.ctx, seeds) -} - -func (c *Local) DialPeers( - ctx context.Context, - peers []string, - persistent, - unconditional, - private bool, -) (*ctypes.ResultDialPeers, error) { - return c.env.UnsafeDialPeers(c.ctx, peers, persistent, unconditional, private) -} - -func (c *Local) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { +func (c *Local) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { //nolint:lll return c.env.BlockchainInfo(c.ctx, minHeight, maxHeight) } -func (c *Local) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { +func (c *Local) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) { return c.env.Genesis(c.ctx) } -func (c *Local) GenesisChunked(ctx context.Context, id uint) (*ctypes.ResultGenesisChunk, error) { +func (c *Local) GenesisChunked(ctx context.Context, id uint) (*coretypes.ResultGenesisChunk, error) { return c.env.GenesisChunked(c.ctx, id) } -func (c *Local) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { +func (c *Local) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { return c.env.Block(c.ctx, height) } -func (c *Local) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultBlock, error) { +func (c *Local) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { return c.env.BlockByHash(c.ctx, hash) } -func (c *Local) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) { +func (c *Local) BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) { return c.env.BlockResults(c.ctx, height) } -func (c *Local) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { +func (c *Local) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { return c.env.Commit(c.ctx, height) } -func (c *Local) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { +func (c *Local) Validators(ctx context.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) { //nolint:lll return c.env.Validators(c.ctx, height, page, perPage) } -func (c *Local) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*ctypes.ResultTx, error) { +func (c *Local) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { return c.env.Tx(c.ctx, hash, prove) } func (c *Local) TxSearch( _ context.Context, - query string, + queryString string, prove bool, page, perPage *int, orderBy string, -) (*ctypes.ResultTxSearch, error) { - return c.env.TxSearch(c.ctx, query, prove, page, perPage, orderBy) +) (*coretypes.ResultTxSearch, error) { + return c.env.TxSearch(c.ctx, queryString, prove, page, perPage, orderBy) } func (c *Local) BlockSearch( _ context.Context, - query string, + queryString string, page, perPage *int, orderBy string, -) (*ctypes.ResultBlockSearch, error) { - return c.env.BlockSearch(c.ctx, query, page, perPage, orderBy) +) (*coretypes.ResultBlockSearch, error) { + return c.env.BlockSearch(c.ctx, queryString, page, perPage, orderBy) } -func (c *Local) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { +func (c *Local) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { return c.env.BroadcastEvidence(c.ctx, ev) } func (c *Local) Subscribe( ctx context.Context, subscriber, - query string, - outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { - q, err := tmquery.New(query) + queryString string, + capacity ...int) (out <-chan coretypes.ResultEvent, err error) { + q, err := query.New(queryString) if err != nil { return nil, fmt.Errorf("failed to parse query: %w", err) } - outCap := 1 - if len(outCapacity) > 0 { - outCap = outCapacity[0] + limit, quota := 1, 0 + if len(capacity) > 0 { + limit = capacity[0] + if len(capacity) > 1 { + quota = capacity[1] + } } - var sub types.Subscription - if outCap > 0 { - sub, err = c.EventBus.Subscribe(ctx, subscriber, q, outCap) - } else { - sub, err = c.EventBus.SubscribeUnbuffered(ctx, subscriber, q) + ctx, cancel := context.WithCancel(ctx) + go func() { <-c.Quit(); cancel() }() + + subArgs := pubsub.SubscribeArgs{ + ClientID: subscriber, + Query: q, + Quota: quota, + Limit: limit, } + sub, err := c.EventBus.SubscribeWithArgs(ctx, subArgs) if err != nil { return nil, fmt.Errorf("failed to subscribe: %w", err) } - outc := make(chan ctypes.ResultEvent, outCap) - go c.eventsRoutine(sub, subscriber, q, outc) + outc := make(chan coretypes.ResultEvent, 1) + go c.eventsRoutine(ctx, sub, subArgs, outc) return outc, nil } func (c *Local) eventsRoutine( - sub types.Subscription, - subscriber string, - q tmpubsub.Query, - outc chan<- ctypes.ResultEvent) { + ctx context.Context, + sub eventbus.Subscription, + subArgs pubsub.SubscribeArgs, + outc chan<- coretypes.ResultEvent, +) { + qstr := subArgs.Query.String() for { - select { - case msg := <-sub.Out(): - result := ctypes.ResultEvent{ - SubscriptionID: msg.SubscriptionID(), - Query: q.String(), - Data: msg.Data(), - Events: msg.Events(), + msg, err := sub.Next(ctx) + if errors.Is(err, pubsub.ErrUnsubscribed) { + return // client unsubscribed + } else if err != nil { + c.Logger.Error("subscription was canceled, resubscribing", + "err", err, "query", subArgs.Query.String()) + sub = c.resubscribe(ctx, subArgs) + if sub == nil { + return // client terminated } - - if cap(outc) == 0 { - outc <- result - } else { - select { - case outc <- result: - default: - c.Logger.Error("wanted to publish ResultEvent, but out channel is full", "result", result, "query", result.Query) - } - } - case <-sub.Canceled(): - if sub.Err() == tmpubsub.ErrUnsubscribed { - return - } - - c.Logger.Error("subscription was canceled, resubscribing...", "err", sub.Err(), "query", q.String()) - sub = c.resubscribe(subscriber, q) - if sub == nil { // client was stopped - return - } - case <-c.Quit(): - return + continue + } + outc <- coretypes.ResultEvent{ + SubscriptionID: msg.SubscriptionID(), + Query: qstr, + Data: msg.Data(), + Events: msg.Events(), } } } // Try to resubscribe with exponential backoff. -func (c *Local) resubscribe(subscriber string, q tmpubsub.Query) types.Subscription { +func (c *Local) resubscribe(ctx context.Context, subArgs pubsub.SubscribeArgs) eventbus.Subscription { attempts := 0 for { if !c.IsRunning() { return nil } - sub, err := c.EventBus.Subscribe(context.Background(), subscriber, q) + sub, err := c.EventBus.SubscribeWithArgs(ctx, subArgs) if err == nil { return sub } @@ -299,17 +286,17 @@ func (c *Local) resubscribe(subscriber string, q tmpubsub.Query) types.Subscript } } -func (c *Local) Unsubscribe(ctx context.Context, subscriber, query string) error { - args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber} +func (c *Local) Unsubscribe(ctx context.Context, subscriber, queryString string) error { + args := pubsub.UnsubscribeArgs{Subscriber: subscriber} var err error - args.Query, err = tmquery.New(query) + args.Query, err = query.New(queryString) if err != nil { // if this isn't a valid query it might be an ID, so // we'll try that. It'll turn into an error when we // try to unsubscribe. Eventually, perhaps, we'll want // to change the interface to only allow // unsubscription by ID, but that's a larger change. - args.ID = query + args.ID = queryString } return c.EventBus.Unsubscribe(ctx, args) } diff --git a/rpc/client/main_test.go b/rpc/client/main_test.go index 4e2c0405c..5ae9b951c 100644 --- a/rpc/client/main_test.go +++ b/rpc/client/main_test.go @@ -3,10 +3,10 @@ package client_test import ( "context" "fmt" - "io/ioutil" "os" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/config" @@ -19,10 +19,11 @@ func NodeSuite(t *testing.T) (service.Service, *config.Config) { ctx, cancel := context.WithCancel(context.Background()) - conf := rpctest.CreateConfig(t.Name()) + conf, err := rpctest.CreateConfig(t.Name()) + require.NoError(t, err) // start a tendermint node in the background to test against - dir, err := ioutil.TempDir("/tmp", fmt.Sprint("rpc-client-test-", t.Name())) + dir, err := os.MkdirTemp("/tmp", fmt.Sprint("rpc-client-test-", t.Name())) require.NoError(t, err) app := kvstore.NewPersistentKVStoreApplication(dir) @@ -30,9 +31,10 @@ func NodeSuite(t *testing.T) (service.Service, *config.Config) { node, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout) require.NoError(t, err) t.Cleanup(func() { - _ = closer(ctx) cancel() - app.Close() + assert.NoError(t, closer(ctx)) + assert.NoError(t, app.Close()) + node.Wait() _ = os.RemoveAll(dir) }) return node, conf diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index 0737deec0..700b08f5e 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -4,10 +4,10 @@ import ( "context" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) @@ -24,11 +24,11 @@ var ( _ client.ABCIClient = (*ABCIRecorder)(nil) ) -func (a ABCIApp) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { - return &ctypes.ResultABCIInfo{Response: a.App.Info(proxy.RequestInfo)}, nil +func (a ABCIApp) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { + return &coretypes.ResultABCIInfo{Response: a.App.Info(proxy.RequestInfo)}, nil } -func (a ABCIApp) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { +func (a ABCIApp) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { return a.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) } @@ -36,21 +36,21 @@ func (a ABCIApp) ABCIQueryWithOptions( ctx context.Context, path string, data bytes.HexBytes, - opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { q := a.App.Query(abci.RequestQuery{ Data: data, Path: path, Height: opts.Height, Prove: opts.Prove, }) - return &ctypes.ResultABCIQuery{Response: q}, nil + return &coretypes.ResultABCIQuery{Response: q}, nil } // NOTE: Caller should call a.App.Commit() separately, // this function does not actually wait for a commit. // TODO: Make it wait for a commit and set res.Height appropriately. -func (a ABCIApp) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - res := ctypes.ResultBroadcastTxCommit{} +func (a ABCIApp) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { + res := coretypes.ResultBroadcastTxCommit{} res.CheckTx = a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) if res.CheckTx.IsErr() { return &res, nil @@ -60,13 +60,13 @@ func (a ABCIApp) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.Re return &res, nil } -func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) // and this gets written in a background thread... if !c.IsErr() { go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() } - return &ctypes.ResultBroadcastTx{ + return &coretypes.ResultBroadcastTx{ Code: c.Code, Data: c.Data, Log: c.Log, @@ -75,13 +75,13 @@ func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.Res }, nil } -func (a ABCIApp) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (a ABCIApp) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) // and this gets written in a background thread... if !c.IsErr() { go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() } - return &ctypes.ResultBroadcastTx{ + return &coretypes.ResultBroadcastTx{ Code: c.Code, Data: c.Data, Log: c.Log, @@ -100,15 +100,15 @@ type ABCIMock struct { Broadcast Call } -func (m ABCIMock) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { +func (m ABCIMock) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { res, err := m.Info.GetResponse(nil) if err != nil { return nil, err } - return &ctypes.ResultABCIInfo{Response: res.(abci.ResponseInfo)}, nil + return &coretypes.ResultABCIInfo{Response: res.(abci.ResponseInfo)}, nil } -func (m ABCIMock) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { +func (m ABCIMock) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { return m.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) } @@ -116,37 +116,37 @@ func (m ABCIMock) ABCIQueryWithOptions( ctx context.Context, path string, data bytes.HexBytes, - opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { res, err := m.Query.GetResponse(QueryArgs{path, data, opts.Height, opts.Prove}) if err != nil { return nil, err } resQuery := res.(abci.ResponseQuery) - return &ctypes.ResultABCIQuery{Response: resQuery}, nil + return &coretypes.ResultABCIQuery{Response: resQuery}, nil } -func (m ABCIMock) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (m ABCIMock) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { res, err := m.BroadcastCommit.GetResponse(tx) if err != nil { return nil, err } - return res.(*ctypes.ResultBroadcastTxCommit), nil + return res.(*coretypes.ResultBroadcastTxCommit), nil } -func (m ABCIMock) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (m ABCIMock) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { res, err := m.Broadcast.GetResponse(tx) if err != nil { return nil, err } - return res.(*ctypes.ResultBroadcastTx), nil + return res.(*coretypes.ResultBroadcastTx), nil } -func (m ABCIMock) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (m ABCIMock) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { res, err := m.Broadcast.GetResponse(tx) if err != nil { return nil, err } - return res.(*ctypes.ResultBroadcastTx), nil + return res.(*coretypes.ResultBroadcastTx), nil } // ABCIRecorder can wrap another type (ABCIApp, ABCIMock, or Client) @@ -174,7 +174,7 @@ func (r *ABCIRecorder) addCall(call Call) { r.Calls = append(r.Calls, call) } -func (r *ABCIRecorder) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { +func (r *ABCIRecorder) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { res, err := r.Client.ABCIInfo(ctx) r.addCall(Call{ Name: "abci_info", @@ -188,7 +188,7 @@ func (r *ABCIRecorder) ABCIQuery( ctx context.Context, path string, data bytes.HexBytes, -) (*ctypes.ResultABCIQuery, error) { +) (*coretypes.ResultABCIQuery, error) { return r.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) } @@ -196,7 +196,7 @@ func (r *ABCIRecorder) ABCIQueryWithOptions( ctx context.Context, path string, data bytes.HexBytes, - opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { res, err := r.Client.ABCIQueryWithOptions(ctx, path, data, opts) r.addCall(Call{ Name: "abci_query", @@ -207,7 +207,7 @@ func (r *ABCIRecorder) ABCIQueryWithOptions( return res, err } -func (r *ABCIRecorder) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (r *ABCIRecorder) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { res, err := r.Client.BroadcastTxCommit(ctx, tx) r.addCall(Call{ Name: "broadcast_tx_commit", @@ -218,7 +218,7 @@ func (r *ABCIRecorder) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*cty return res, err } -func (r *ABCIRecorder) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (r *ABCIRecorder) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { res, err := r.Client.BroadcastTxAsync(ctx, tx) r.addCall(Call{ Name: "broadcast_tx_async", @@ -229,7 +229,7 @@ func (r *ABCIRecorder) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctyp return res, err } -func (r *ABCIRecorder) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (r *ABCIRecorder) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { res, err := r.Client.BroadcastTxSync(ctx, tx) r.addCall(Call{ Name: "broadcast_tx_sync", diff --git a/rpc/client/mock/abci_test.go b/rpc/client/mock/abci_test.go index d164b275a..25fbbc05d 100644 --- a/rpc/client/mock/abci_test.go +++ b/rpc/client/mock/abci_test.go @@ -14,7 +14,7 @@ import ( "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/client/mock" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) @@ -36,7 +36,7 @@ func TestABCIMock(t *testing.T) { // Broadcast commit depends on call BroadcastCommit: mock.Call{ Args: goodTx, - Response: &ctypes.ResultBroadcastTxCommit{ + Response: &coretypes.ResultBroadcastTxCommit{ CheckTx: abci.ResponseCheckTx{Data: bytes.HexBytes("stand")}, DeliverTx: abci.ResponseDeliverTx{Data: bytes.HexBytes("deliver")}, }, @@ -112,7 +112,7 @@ func TestABCIRecorder(t *testing.T) { assert.Nil(info.Error) assert.Nil(info.Args) require.NotNil(info.Response) - ir, ok := info.Response.(*ctypes.ResultABCIInfo) + ir, ok := info.Response.(*coretypes.ResultABCIInfo) require.True(ok) assert.Equal("data", ir.Response.Data) assert.Equal("v0.9.9", ir.Response.Version) diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 8ff474dd5..a1a42e28d 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -18,26 +18,17 @@ import ( "context" "reflect" + "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/rpc/client" - "github.com/tendermint/tendermint/rpc/core" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) // Client wraps arbitrary implementations of the various interfaces. type Client struct { - client.ABCIClient - client.SignClient - client.HistoryClient - client.StatusClient - client.EventsClient - client.EvidenceClient - client.MempoolClient - service.Service - + client.Client env *core.Environment } @@ -84,15 +75,15 @@ func (c Call) GetResponse(args interface{}) (interface{}, error) { return nil, c.Error } -func (c Client) Status(ctx context.Context) (*ctypes.ResultStatus, error) { +func (c Client) Status(ctx context.Context) (*coretypes.ResultStatus, error) { return c.env.Status(&rpctypes.Context{}) } -func (c Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { +func (c Client) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { return c.env.ABCIInfo(&rpctypes.Context{}) } -func (c Client) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { +func (c Client) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) } @@ -100,84 +91,70 @@ func (c Client) ABCIQueryWithOptions( ctx context.Context, path string, data bytes.HexBytes, - opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { return c.env.ABCIQuery(&rpctypes.Context{}, path, data, opts.Height, opts.Prove) } -func (c Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (c Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { return c.env.BroadcastTxCommit(&rpctypes.Context{}, tx) } -func (c Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.env.BroadcastTxAsync(&rpctypes.Context{}, tx) } -func (c Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.env.BroadcastTxSync(&rpctypes.Context{}, tx) } -func (c Client) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { +func (c Client) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { return c.env.CheckTx(&rpctypes.Context{}, tx) } -func (c Client) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { +func (c Client) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { return c.env.NetInfo(&rpctypes.Context{}) } -func (c Client) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { +func (c Client) ConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { return c.env.GetConsensusState(&rpctypes.Context{}) } -func (c Client) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { +func (c Client) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { return c.env.DumpConsensusState(&rpctypes.Context{}) } -func (c Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { +func (c Client) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) { return c.env.ConsensusParams(&rpctypes.Context{}, height) } -func (c Client) Health(ctx context.Context) (*ctypes.ResultHealth, error) { +func (c Client) Health(ctx context.Context) (*coretypes.ResultHealth, error) { return c.env.Health(&rpctypes.Context{}) } -func (c Client) DialSeeds(ctx context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { - return c.env.UnsafeDialSeeds(&rpctypes.Context{}, seeds) -} - -func (c Client) DialPeers( - ctx context.Context, - peers []string, - persistent, - unconditional, - private bool, -) (*ctypes.ResultDialPeers, error) { - return c.env.UnsafeDialPeers(&rpctypes.Context{}, peers, persistent, unconditional, private) -} - -func (c Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { +func (c Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { //nolint:lll return c.env.BlockchainInfo(&rpctypes.Context{}, minHeight, maxHeight) } -func (c Client) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { +func (c Client) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) { return c.env.Genesis(&rpctypes.Context{}) } -func (c Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { +func (c Client) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { return c.env.Block(&rpctypes.Context{}, height) } -func (c Client) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultBlock, error) { +func (c Client) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { return c.env.BlockByHash(&rpctypes.Context{}, hash) } -func (c Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { +func (c Client) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { return c.env.Commit(&rpctypes.Context{}, height) } -func (c Client) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { +func (c Client) Validators(ctx context.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) { //nolint:lll return c.env.Validators(&rpctypes.Context{}, height, page, perPage) } -func (c Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { +func (c Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { return c.env.BroadcastEvidence(&rpctypes.Context{}, ev) } diff --git a/rpc/client/mock/status.go b/rpc/client/mock/status.go index 6dd6a8d44..22548e891 100644 --- a/rpc/client/mock/status.go +++ b/rpc/client/mock/status.go @@ -4,7 +4,7 @@ import ( "context" "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" ) // StatusMock returns the result specified by the Call @@ -17,12 +17,12 @@ var ( _ client.StatusClient = (*StatusRecorder)(nil) ) -func (m *StatusMock) Status(ctx context.Context) (*ctypes.ResultStatus, error) { +func (m *StatusMock) Status(ctx context.Context) (*coretypes.ResultStatus, error) { res, err := m.GetResponse(nil) if err != nil { return nil, err } - return res.(*ctypes.ResultStatus), nil + return res.(*coretypes.ResultStatus), nil } // StatusRecorder can wrap another type (StatusMock, full client) @@ -43,7 +43,7 @@ func (r *StatusRecorder) addCall(call Call) { r.Calls = append(r.Calls, call) } -func (r *StatusRecorder) Status(ctx context.Context) (*ctypes.ResultStatus, error) { +func (r *StatusRecorder) Status(ctx context.Context) (*coretypes.ResultStatus, error) { res, err := r.Client.Status(ctx) r.addCall(Call{ Name: "status", diff --git a/rpc/client/mock/status_test.go b/rpc/client/mock/status_test.go index 3933c33c9..98655280e 100644 --- a/rpc/client/mock/status_test.go +++ b/rpc/client/mock/status_test.go @@ -10,7 +10,7 @@ import ( "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/rpc/client/mock" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" ) func TestStatus(t *testing.T) { @@ -18,14 +18,21 @@ func TestStatus(t *testing.T) { m := &mock.StatusMock{ Call: mock.Call{ - Response: &ctypes.ResultStatus{ - SyncInfo: ctypes.SyncInfo{ - LatestBlockHash: bytes.HexBytes("block"), - LatestAppHash: bytes.HexBytes("app"), - LatestBlockHeight: 10, - MaxPeerBlockHeight: 20, - TotalSyncedTime: time.Second, - RemainingTime: time.Minute, + Response: &coretypes.ResultStatus{ + SyncInfo: coretypes.SyncInfo{ + LatestBlockHash: bytes.HexBytes("block"), + LatestAppHash: bytes.HexBytes("app"), + LatestBlockHeight: 10, + MaxPeerBlockHeight: 20, + TotalSyncedTime: time.Second, + RemainingTime: time.Minute, + TotalSnapshots: 10, + ChunkProcessAvgTime: time.Duration(10), + SnapshotHeight: 10, + SnapshotChunksCount: 9, + SnapshotChunksTotal: 10, + BackFilledBlocks: 9, + BackFillBlocksTotal: 10, }, }}, } @@ -49,11 +56,19 @@ func TestStatus(t *testing.T) { assert.Nil(rs.Args) assert.Nil(rs.Error) require.NotNil(rs.Response) - st, ok := rs.Response.(*ctypes.ResultStatus) + st, ok := rs.Response.(*coretypes.ResultStatus) require.True(ok) assert.EqualValues("block", st.SyncInfo.LatestBlockHash) assert.EqualValues(10, st.SyncInfo.LatestBlockHeight) assert.EqualValues(20, st.SyncInfo.MaxPeerBlockHeight) assert.EqualValues(time.Second, status.SyncInfo.TotalSyncedTime) assert.EqualValues(time.Minute, status.SyncInfo.RemainingTime) + + assert.EqualValues(10, st.SyncInfo.TotalSnapshots) + assert.EqualValues(time.Duration(10), st.SyncInfo.ChunkProcessAvgTime) + assert.EqualValues(10, st.SyncInfo.SnapshotHeight) + assert.EqualValues(9, status.SyncInfo.SnapshotChunksCount) + assert.EqualValues(10, status.SyncInfo.SnapshotChunksTotal) + assert.EqualValues(9, status.SyncInfo.BackFilledBlocks) + assert.EqualValues(10, status.SyncInfo.BackFillBlocksTotal) } diff --git a/rpc/client/mocks/client.go b/rpc/client/mocks/client.go index 8e4c7cbf5..7012e1c2d 100644 --- a/rpc/client/mocks/client.go +++ b/rpc/client/mocks/client.go @@ -8,9 +8,7 @@ import ( context "context" - coretypes "github.com/tendermint/tendermint/rpc/core/types" - - log "github.com/tendermint/tendermint/libs/log" + coretypes "github.com/tendermint/tendermint/rpc/coretypes" mock "github.com/stretchr/testify/mock" @@ -542,13 +540,13 @@ func (_m *Client) NumUnconfirmedTxs(_a0 context.Context) (*coretypes.ResultUncon return r0, r1 } -// OnReset provides a mock function with given fields: -func (_m *Client) OnReset() error { - ret := _m.Called() +// RemoveTx provides a mock function with given fields: _a0, _a1 +func (_m *Client) RemoveTx(_a0 context.Context, _a1 types.TxKey) error { + ret := _m.Called(_a0, _a1) var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context, types.TxKey) error); ok { + r0 = rf(_a0, _a1) } else { r0 = ret.Error(0) } @@ -556,67 +554,13 @@ func (_m *Client) OnReset() error { return r0 } -// OnStart provides a mock function with given fields: -func (_m *Client) OnStart() error { - ret := _m.Called() +// Start provides a mock function with given fields: _a0 +func (_m *Client) Start(_a0 context.Context) error { + ret := _m.Called(_a0) var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// OnStop provides a mock function with given fields: -func (_m *Client) OnStop() { - _m.Called() -} - -// Quit provides a mock function with given fields: -func (_m *Client) Quit() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// Reset provides a mock function with given fields: -func (_m *Client) Reset() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SetLogger provides a mock function with given fields: _a0 -func (_m *Client) SetLogger(_a0 log.Logger) { - _m.Called(_a0) -} - -// Start provides a mock function with given fields: -func (_m *Client) Start() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) } else { r0 = ret.Error(0) } @@ -661,20 +605,6 @@ func (_m *Client) Stop() error { return r0 } -// String provides a mock function with given fields: -func (_m *Client) String() string { - ret := _m.Called() - - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(string) - } - - return r0 -} - // Subscribe provides a mock function with given fields: ctx, subscriber, query, outCapacity func (_m *Client) Subscribe(ctx context.Context, subscriber string, query string, outCapacity ...int) (<-chan coretypes.ResultEvent, error) { _va := make([]interface{}, len(outCapacity)) @@ -824,8 +754,3 @@ func (_m *Client) Validators(ctx context.Context, height *int64, page *int, perP return r0, r1 } - -// Wait provides a mock function with given fields: -func (_m *Client) Wait() { - _m.Called() -} diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index f8962fb35..12c13d686 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -1,6 +1,7 @@ package client_test import ( + "bytes" "context" "encoding/base64" "fmt" @@ -16,15 +17,18 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/mempool" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/rpc/client" rpchttp "github.com/tendermint/tendermint/rpc/client/http" rpclocal "github.com/tendermint/tendermint/rpc/client/local" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" "github.com/tendermint/tendermint/types" ) @@ -33,10 +37,16 @@ func getHTTPClient(t *testing.T, conf *config.Config) *rpchttp.HTTP { t.Helper() rpcAddr := conf.RPC.ListenAddress - c, err := rpchttp.New(rpcAddr) + c, err := rpchttp.NewWithClient(rpcAddr, http.DefaultClient) require.NoError(t, err) c.SetLogger(log.TestingLogger()) + t.Cleanup(func() { + if c.IsRunning() { + require.NoError(t, c.Stop()) + } + }) + return c } @@ -44,10 +54,18 @@ func getHTTPClientWithTimeout(t *testing.T, conf *config.Config, timeout time.Du t.Helper() rpcAddr := conf.RPC.ListenAddress - c, err := rpchttp.NewWithTimeout(rpcAddr, timeout) + + http.DefaultClient.Timeout = timeout + c, err := rpchttp.NewWithClient(rpcAddr, http.DefaultClient) require.NoError(t, err) c.SetLogger(log.TestingLogger()) + t.Cleanup(func() { + http.DefaultClient.Timeout = 0 + if c.IsRunning() { + require.NoError(t, c.Stop()) + } + }) return c } @@ -63,707 +81,758 @@ func GetClients(t *testing.T, ns service.Service, conf *config.Config) []client. require.NoError(t, err) return []client.Client{ - getHTTPClient(t, conf), ncl, + getHTTPClient(t, conf), } } -func TestNilCustomHTTPClient(t *testing.T) { - require.Panics(t, func() { - _, _ = rpchttp.NewWithClient("http://example.com", nil) - }) - require.Panics(t, func() { - _, _ = rpcclient.NewWithHTTPClient("http://example.com", nil) - }) -} - -func TestParseInvalidAddress(t *testing.T) { - _, conf := NodeSuite(t) - // should remove trailing / - invalidRemote := conf.RPC.ListenAddress + "/" - _, err := rpchttp.New(invalidRemote) - require.NoError(t, err) -} - -func TestCustomHTTPClient(t *testing.T) { - _, conf := NodeSuite(t) - remote := conf.RPC.ListenAddress - c, err := rpchttp.NewWithClient(remote, http.DefaultClient) - require.Nil(t, err) - status, err := c.Status(context.Background()) - require.NoError(t, err) - require.NotNil(t, status) -} - -func TestCorsEnabled(t *testing.T) { - _, conf := NodeSuite(t) - origin := conf.RPC.CORSAllowedOrigins[0] - remote := strings.ReplaceAll(conf.RPC.ListenAddress, "tcp", "http") - - req, err := http.NewRequest("GET", remote, nil) - require.Nil(t, err, "%+v", err) - req.Header.Set("Origin", origin) - c := &http.Client{} - resp, err := c.Do(req) - require.Nil(t, err, "%+v", err) - defer resp.Body.Close() - - assert.Equal(t, resp.Header.Get("Access-Control-Allow-Origin"), origin) -} - -// Make sure status is correct (we connect properly) -func TestStatus(t *testing.T) { +func TestClientOperations(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - n, conf := NodeSuite(t) - for i, c := range GetClients(t, n, conf) { - moniker := conf.Moniker + _, conf := NodeSuite(t) + + t.Run("NilCustomHTTPClient", func(t *testing.T) { + _, err := rpchttp.NewWithClient("http://example.com", nil) + require.Error(t, err) + + _, err = rpcclient.NewWithHTTPClient("http://example.com", nil) + require.Error(t, err) + }) + t.Run("ParseInvalidAddress", func(t *testing.T) { + // should remove trailing / + invalidRemote := conf.RPC.ListenAddress + "/" + _, err := rpchttp.New(invalidRemote) + require.NoError(t, err) + }) + t.Run("CustomHTTPClient", func(t *testing.T) { + remote := conf.RPC.ListenAddress + c, err := rpchttp.NewWithClient(remote, http.DefaultClient) + require.Nil(t, err) status, err := c.Status(ctx) - require.Nil(t, err, "%d: %+v", i, err) - assert.Equal(t, moniker, status.NodeInfo.Moniker) - } + require.NoError(t, err) + require.NotNil(t, status) + }) + t.Run("CorsEnabled", func(t *testing.T) { + origin := conf.RPC.CORSAllowedOrigins[0] + remote := strings.ReplaceAll(conf.RPC.ListenAddress, "tcp", "http") + + req, err := http.NewRequestWithContext(ctx, "GET", remote, nil) + require.Nil(t, err, "%+v", err) + req.Header.Set("Origin", origin) + resp, err := http.DefaultClient.Do(req) + require.Nil(t, err, "%+v", err) + defer resp.Body.Close() + + assert.Equal(t, resp.Header.Get("Access-Control-Allow-Origin"), origin) + }) + t.Run("Batching", func(t *testing.T) { + t.Run("JSONRPCCalls", func(t *testing.T) { + c := getHTTPClient(t, conf) + testBatchedJSONRPCCalls(ctx, t, c) + }) + t.Run("JSONRPCCallsCancellation", func(t *testing.T) { + _, _, tx1 := MakeTxKV() + _, _, tx2 := MakeTxKV() + + c := getHTTPClient(t, conf) + batch := c.NewBatch() + _, err := batch.BroadcastTxCommit(ctx, tx1) + require.NoError(t, err) + _, err = batch.BroadcastTxCommit(ctx, tx2) + require.NoError(t, err) + // we should have 2 requests waiting + require.Equal(t, 2, batch.Count()) + // we want to make sure we cleared 2 pending requests + require.Equal(t, 2, batch.Clear()) + // now there should be no batched requests + require.Equal(t, 0, batch.Count()) + }) + t.Run("SendingEmptyRequest", func(t *testing.T) { + c := getHTTPClient(t, conf) + batch := c.NewBatch() + _, err := batch.Send(ctx) + require.Error(t, err, "sending an empty batch of JSON RPC requests should result in an error") + }) + t.Run("ClearingEmptyRequest", func(t *testing.T) { + c := getHTTPClient(t, conf) + batch := c.NewBatch() + require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result") + }) + t.Run("ConcurrentJSONRPC", func(t *testing.T) { + var wg sync.WaitGroup + c := getHTTPClient(t, conf) + for i := 0; i < 50; i++ { + wg.Add(1) + go func() { + defer wg.Done() + testBatchedJSONRPCCalls(ctx, t, c) + }() + } + wg.Wait() + }) + }) + t.Run("HTTPReturnsErrorIfClientIsNotRunning", func(t *testing.T) { + c := getHTTPClientWithTimeout(t, conf, 100*time.Millisecond) + + // on Subscribe + _, err := c.Subscribe(ctx, "TestHeaderEvents", + types.QueryForEvent(types.EventNewBlockHeaderValue).String()) + assert.Error(t, err) + + // on Unsubscribe + err = c.Unsubscribe(ctx, "TestHeaderEvents", + types.QueryForEvent(types.EventNewBlockHeaderValue).String()) + assert.Error(t, err) + + // on UnsubscribeAll + err = c.UnsubscribeAll(ctx, "TestHeaderEvents") + assert.Error(t, err) + }) } // Make sure info is correct (we connect properly) -func TestInfo(t *testing.T) { +func TestClientMethodCalls(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() n, conf := NodeSuite(t) + // for broadcast tx tests + pool := getMempool(t, n) + + // for evidence tests + pv, err := privval.LoadOrGenFilePV(conf.PrivValidator.KeyFile(), conf.PrivValidator.StateFile()) + require.NoError(t, err) + for i, c := range GetClients(t, n, conf) { - // status, err := c.Status() - // require.Nil(t, err, "%+v", err) - info, err := c.ABCIInfo(ctx) - require.Nil(t, err, "%d: %+v", i, err) - // TODO: this is not correct - fix merkleeyes! - // assert.EqualValues(t, status.SyncInfo.LatestBlockHeight, info.Response.LastBlockHeight) - assert.True(t, strings.Contains(info.Response.Data, "size")) + t.Run(fmt.Sprintf("%T", c), func(t *testing.T) { + t.Run("Status", func(t *testing.T) { + status, err := c.Status(ctx) + require.Nil(t, err, "%d: %+v", i, err) + assert.Equal(t, conf.Moniker, status.NodeInfo.Moniker) + }) + t.Run("Info", func(t *testing.T) { + info, err := c.ABCIInfo(ctx) + require.NoError(t, err) + + status, err := c.Status(ctx) + require.NoError(t, err) + + assert.GreaterOrEqual(t, status.SyncInfo.LatestBlockHeight, info.Response.LastBlockHeight) + assert.True(t, strings.Contains(info.Response.Data, "size")) + }) + t.Run("NetInfo", func(t *testing.T) { + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + netinfo, err := nc.NetInfo(ctx) + require.Nil(t, err, "%d: %+v", i, err) + assert.True(t, netinfo.Listening) + assert.Equal(t, 0, len(netinfo.Peers)) + }) + t.Run("DumpConsensusState", func(t *testing.T) { + // FIXME: fix server so it doesn't panic on invalid input + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + cons, err := nc.DumpConsensusState(ctx) + require.Nil(t, err, "%d: %+v", i, err) + assert.NotEmpty(t, cons.RoundState) + assert.Empty(t, cons.Peers) + }) + t.Run("ConsensusState", func(t *testing.T) { + // FIXME: fix server so it doesn't panic on invalid input + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + cons, err := nc.ConsensusState(ctx) + require.Nil(t, err, "%d: %+v", i, err) + assert.NotEmpty(t, cons.RoundState) + }) + t.Run("Health", func(t *testing.T) { + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + _, err := nc.Health(ctx) + require.Nil(t, err, "%d: %+v", i, err) + }) + t.Run("GenesisAndValidators", func(t *testing.T) { + // make sure this is the right genesis file + gen, err := c.Genesis(ctx) + require.Nil(t, err, "%d: %+v", i, err) + // get the genesis validator + require.Equal(t, 1, len(gen.Genesis.Validators)) + gval := gen.Genesis.Validators[0] + + // get the current validators + h := int64(1) + vals, err := c.Validators(ctx, &h, nil, nil) + require.Nil(t, err, "%d: %+v", i, err) + require.Equal(t, 1, len(vals.Validators)) + require.Equal(t, 1, vals.Count) + require.Equal(t, 1, vals.Total) + val := vals.Validators[0] + + // make sure the current set is also the genesis set + assert.Equal(t, gval.Power, val.VotingPower) + assert.Equal(t, gval.PubKey, val.PubKey) + }) + t.Run("GenesisChunked", func(t *testing.T) { + first, err := c.GenesisChunked(ctx, 0) + require.NoError(t, err) + + decoded := make([]string, 0, first.TotalChunks) + for i := 0; i < first.TotalChunks; i++ { + chunk, err := c.GenesisChunked(ctx, uint(i)) + require.NoError(t, err) + data, err := base64.StdEncoding.DecodeString(chunk.Data) + require.NoError(t, err) + decoded = append(decoded, string(data)) + + } + doc := []byte(strings.Join(decoded, "")) + + var out types.GenesisDoc + require.NoError(t, tmjson.Unmarshal(doc, &out), + "first: %+v, doc: %s", first, string(doc)) + }) + t.Run("ABCIQuery", func(t *testing.T) { + // write something + k, v, tx := MakeTxKV() + status, err := c.Status(ctx) + require.NoError(t, err) + _, err = c.BroadcastTxSync(ctx, tx) + require.NoError(t, err, "%d: %+v", i, err) + apph := status.SyncInfo.LatestBlockHeight + 2 // this is where the tx will be applied to the state + + // wait before querying + err = client.WaitForHeight(c, apph, nil) + require.NoError(t, err) + res, err := c.ABCIQuery(ctx, "/key", k) + qres := res.Response + if assert.NoError(t, err) && assert.True(t, qres.IsOK()) { + assert.EqualValues(t, v, qres.Value) + } + }) + t.Run("AppCalls", func(t *testing.T) { + // get an offset of height to avoid racing and guessing + s, err := c.Status(ctx) + require.NoError(t, err) + // sh is start height or status height + sh := s.SyncInfo.LatestBlockHeight + + // look for the future + h := sh + 20 + _, err = c.Block(ctx, &h) + require.Error(t, err) // no block yet + + // write something + k, v, tx := MakeTxKV() + bres, err := c.BroadcastTxCommit(ctx, tx) + require.NoError(t, err) + require.True(t, bres.DeliverTx.IsOK()) + txh := bres.Height + apph := txh + 1 // this is where the tx will be applied to the state + + // wait before querying + err = client.WaitForHeight(c, apph, nil) + require.NoError(t, err) + + _qres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: false}) + require.NoError(t, err) + qres := _qres.Response + if assert.True(t, qres.IsOK()) { + assert.Equal(t, k, qres.Key) + assert.EqualValues(t, v, qres.Value) + } + + // make sure we can lookup the tx with proof + ptx, err := c.Tx(ctx, bres.Hash, true) + require.NoError(t, err) + assert.EqualValues(t, txh, ptx.Height) + assert.EqualValues(t, tx, ptx.Tx) + + // and we can even check the block is added + block, err := c.Block(ctx, &apph) + require.NoError(t, err) + appHash := block.Block.Header.AppHash + assert.True(t, len(appHash) > 0) + assert.EqualValues(t, apph, block.Block.Header.Height) + + blockByHash, err := c.BlockByHash(ctx, block.BlockID.Hash) + require.NoError(t, err) + require.Equal(t, block, blockByHash) + + // now check the results + blockResults, err := c.BlockResults(ctx, &txh) + require.NoError(t, err, "%d: %+v", i, err) + assert.Equal(t, txh, blockResults.Height) + if assert.Equal(t, 1, len(blockResults.TxsResults)) { + // check success code + assert.EqualValues(t, 0, blockResults.TxsResults[0].Code) + } + + // check blockchain info, now that we know there is info + info, err := c.BlockchainInfo(ctx, apph, apph) + require.NoError(t, err) + assert.True(t, info.LastHeight >= apph) + if assert.Equal(t, 1, len(info.BlockMetas)) { + lastMeta := info.BlockMetas[0] + assert.EqualValues(t, apph, lastMeta.Header.Height) + blockData := block.Block + assert.Equal(t, blockData.Header.AppHash, lastMeta.Header.AppHash) + assert.Equal(t, block.BlockID, lastMeta.BlockID) + } + + // and get the corresponding commit with the same apphash + commit, err := c.Commit(ctx, &apph) + require.NoError(t, err) + cappHash := commit.Header.AppHash + assert.Equal(t, appHash, cappHash) + assert.NotNil(t, commit.Commit) + + // compare the commits (note Commit(2) has commit from Block(3)) + h = apph - 1 + commit2, err := c.Commit(ctx, &h) + require.NoError(t, err) + assert.Equal(t, block.Block.LastCommitHash, commit2.Commit.Hash()) + + // and we got a proof that works! + _pres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: true}) + require.NoError(t, err) + pres := _pres.Response + assert.True(t, pres.IsOK()) + + // XXX Test proof + }) + t.Run("BlockchainInfo", func(t *testing.T) { + err := client.WaitForHeight(c, 10, nil) + require.NoError(t, err) + + res, err := c.BlockchainInfo(ctx, 0, 0) + require.Nil(t, err, "%d: %+v", i, err) + assert.True(t, res.LastHeight > 0) + assert.True(t, len(res.BlockMetas) > 0) + + res, err = c.BlockchainInfo(ctx, 1, 1) + require.Nil(t, err, "%d: %+v", i, err) + assert.True(t, res.LastHeight > 0) + assert.True(t, len(res.BlockMetas) == 1) + + res, err = c.BlockchainInfo(ctx, 1, 10000) + require.Nil(t, err, "%d: %+v", i, err) + assert.True(t, res.LastHeight > 0) + assert.True(t, len(res.BlockMetas) < 100) + for _, m := range res.BlockMetas { + assert.NotNil(t, m) + } + + res, err = c.BlockchainInfo(ctx, 10000, 1) + require.NotNil(t, err) + assert.Nil(t, res) + assert.Contains(t, err.Error(), "can't be greater than max") + }) + t.Run("BroadcastTxCommit", func(t *testing.T) { + _, _, tx := MakeTxKV() + bres, err := c.BroadcastTxCommit(ctx, tx) + require.Nil(t, err, "%d: %+v", i, err) + require.True(t, bres.CheckTx.IsOK()) + require.True(t, bres.DeliverTx.IsOK()) + + require.Equal(t, 0, pool.Size()) + }) + t.Run("BroadcastTxSync", func(t *testing.T) { + _, _, tx := MakeTxKV() + initMempoolSize := pool.Size() + bres, err := c.BroadcastTxSync(ctx, tx) + require.Nil(t, err, "%d: %+v", i, err) + require.Equal(t, bres.Code, abci.CodeTypeOK) // FIXME + + require.Equal(t, initMempoolSize+1, pool.Size()) + + txs := pool.ReapMaxTxs(len(tx)) + require.EqualValues(t, tx, txs[0]) + pool.Flush() + }) + t.Run("CheckTx", func(t *testing.T) { + _, _, tx := MakeTxKV() + + res, err := c.CheckTx(ctx, tx) + require.NoError(t, err) + assert.Equal(t, abci.CodeTypeOK, res.Code) + + assert.Equal(t, 0, pool.Size(), "mempool must be empty") + }) + t.Run("Events", func(t *testing.T) { + // start for this test it if it wasn't already running + if !c.IsRunning() { + // if so, then we start it, listen, and stop it. + err := c.Start(ctx) + require.Nil(t, err) + t.Cleanup(func() { + if err := c.Stop(); err != nil { + t.Error(err) + } + }) + } + + t.Run("Header", func(t *testing.T) { + evt, err := client.WaitForOneEvent(c, types.EventNewBlockHeaderValue, waitForEventTimeout) + require.Nil(t, err, "%d: %+v", i, err) + _, ok := evt.(types.EventDataNewBlockHeader) + require.True(t, ok, "%d: %#v", i, evt) + // TODO: more checks... + }) + t.Run("Block", func(t *testing.T) { + const subscriber = "TestBlockEvents" + + eventCh, err := c.Subscribe(ctx, subscriber, types.QueryForEvent(types.EventNewBlockValue).String()) + require.NoError(t, err) + t.Cleanup(func() { + if err := c.UnsubscribeAll(ctx, subscriber); err != nil { + t.Error(err) + } + }) + + var firstBlockHeight int64 + for i := int64(0); i < 3; i++ { + event := <-eventCh + blockEvent, ok := event.Data.(types.EventDataNewBlock) + require.True(t, ok) + + block := blockEvent.Block + + if firstBlockHeight == 0 { + firstBlockHeight = block.Header.Height + } + + require.Equal(t, firstBlockHeight+i, block.Header.Height) + } + }) + t.Run("BroadcastTxAsync", func(t *testing.T) { + testTxEventsSent(ctx, t, "async", c) + }) + t.Run("BroadcastTxSync", func(t *testing.T) { + testTxEventsSent(ctx, t, "sync", c) + }) + }) + t.Run("Evidence", func(t *testing.T) { + t.Run("BraodcastDuplicateVote", func(t *testing.T) { + chainID := conf.ChainID() + + correct, fakes := makeEvidences(t, pv, chainID) + + // make sure that the node has produced enough blocks + waitForBlock(ctx, t, c, 2) + + result, err := c.BroadcastEvidence(ctx, correct) + require.NoError(t, err, "BroadcastEvidence(%s) failed", correct) + assert.Equal(t, correct.Hash(), result.Hash, "expected result hash to match evidence hash") + + status, err := c.Status(ctx) + require.NoError(t, err) + err = client.WaitForHeight(c, status.SyncInfo.LatestBlockHeight+2, nil) + require.NoError(t, err) + + ed25519pub := pv.Key.PubKey.(ed25519.PubKey) + rawpub := ed25519pub.Bytes() + result2, err := c.ABCIQuery(ctx, "/val", rawpub) + require.NoError(t, err) + qres := result2.Response + require.True(t, qres.IsOK()) + + var v abci.ValidatorUpdate + err = abci.ReadMessage(bytes.NewReader(qres.Value), &v) + require.NoError(t, err, "Error reading query result, value %v", qres.Value) + + pk, err := encoding.PubKeyFromProto(v.PubKey) + require.NoError(t, err) + + require.EqualValues(t, rawpub, pk, "Stored PubKey not equal with expected, value %v", string(qres.Value)) + require.Equal(t, int64(9), v.Power, "Stored Power not equal with expected, value %v", string(qres.Value)) + + for _, fake := range fakes { + _, err := c.BroadcastEvidence(ctx, fake) + require.Error(t, err, "BroadcastEvidence(%s) succeeded, but the evidence was fake", fake) + } + + }) + t.Run("BroadcastEmpty", func(t *testing.T) { + _, err := c.BroadcastEvidence(ctx, nil) + assert.Error(t, err) + }) + }) + }) } } -func TestNetInfo(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - for i, c := range GetClients(t, n, conf) { - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - netinfo, err := nc.NetInfo(ctx) - require.Nil(t, err, "%d: %+v", i, err) - assert.True(t, netinfo.Listening) - assert.Equal(t, 0, len(netinfo.Peers)) - } -} - -func TestDumpConsensusState(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - for i, c := range GetClients(t, n, conf) { - // FIXME: fix server so it doesn't panic on invalid input - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - cons, err := nc.DumpConsensusState(ctx) - require.Nil(t, err, "%d: %+v", i, err) - assert.NotEmpty(t, cons.RoundState) - assert.Empty(t, cons.Peers) - } -} - -func TestConsensusState(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - // FIXME: fix server so it doesn't panic on invalid input - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - cons, err := nc.ConsensusState(ctx) - require.Nil(t, err, "%d: %+v", i, err) - assert.NotEmpty(t, cons.RoundState) - } -} - -func TestHealth(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - _, err := nc.Health(ctx) - require.Nil(t, err, "%d: %+v", i, err) - } -} - -func TestGenesisAndValidators(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - for i, c := range GetClients(t, n, conf) { - - // make sure this is the right genesis file - gen, err := c.Genesis(ctx) - require.Nil(t, err, "%d: %+v", i, err) - // get the genesis validator - require.Equal(t, 1, len(gen.Genesis.Validators)) - gval := gen.Genesis.Validators[0] - - // get the current validators - h := int64(1) - vals, err := c.Validators(ctx, &h, nil, nil) - require.Nil(t, err, "%d: %+v", i, err) - require.Equal(t, 1, len(vals.Validators)) - require.Equal(t, 1, vals.Count) - require.Equal(t, 1, vals.Total) - val := vals.Validators[0] - - // make sure the current set is also the genesis set - assert.Equal(t, gval.Power, val.VotingPower) - assert.Equal(t, gval.PubKey, val.PubKey) - } -} - -func TestGenesisChunked(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for _, c := range GetClients(t, n, conf) { - first, err := c.GenesisChunked(ctx, 0) - require.NoError(t, err) - - decoded := make([]string, 0, first.TotalChunks) - for i := 0; i < first.TotalChunks; i++ { - chunk, err := c.GenesisChunked(ctx, uint(i)) - require.NoError(t, err) - data, err := base64.StdEncoding.DecodeString(chunk.Data) - require.NoError(t, err) - decoded = append(decoded, string(data)) - - } - doc := []byte(strings.Join(decoded, "")) - - var out types.GenesisDoc - require.NoError(t, tmjson.Unmarshal(doc, &out), - "first: %+v, doc: %s", first, string(doc)) - } -} - -func TestABCIQuery(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - // write something - k, v, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(ctx, tx) - require.Nil(t, err, "%d: %+v", i, err) - apph := bres.Height + 1 // this is where the tx will be applied to the state - - // wait before querying - err = client.WaitForHeight(c, apph, nil) - require.NoError(t, err) - res, err := c.ABCIQuery(ctx, "/key", k) - qres := res.Response - if assert.Nil(t, err) && assert.True(t, qres.IsOK()) { - assert.EqualValues(t, v, qres.Value) - } - } -} - -// Make some app checks -func TestAppCalls(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - - // get an offset of height to avoid racing and guessing - s, err := c.Status(ctx) - require.NoError(t, err) - // sh is start height or status height - sh := s.SyncInfo.LatestBlockHeight - - // look for the future - h := sh + 20 - _, err = c.Block(ctx, &h) - require.Error(t, err) // no block yet - - // write something - k, v, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(ctx, tx) - require.NoError(t, err) - require.True(t, bres.DeliverTx.IsOK()) - txh := bres.Height - apph := txh + 1 // this is where the tx will be applied to the state - - // wait before querying - err = client.WaitForHeight(c, apph, nil) - require.NoError(t, err) - - _qres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: false}) - require.NoError(t, err) - qres := _qres.Response - if assert.True(t, qres.IsOK()) { - assert.Equal(t, k, qres.Key) - assert.EqualValues(t, v, qres.Value) - } - - // make sure we can lookup the tx with proof - ptx, err := c.Tx(ctx, bres.Hash, true) - require.NoError(t, err) - assert.EqualValues(t, txh, ptx.Height) - assert.EqualValues(t, tx, ptx.Tx) - - // and we can even check the block is added - block, err := c.Block(ctx, &apph) - require.NoError(t, err) - appHash := block.Block.Header.AppHash - assert.True(t, len(appHash) > 0) - assert.EqualValues(t, apph, block.Block.Header.Height) - - blockByHash, err := c.BlockByHash(ctx, block.BlockID.Hash) - require.NoError(t, err) - require.Equal(t, block, blockByHash) - - // now check the results - blockResults, err := c.BlockResults(ctx, &txh) - require.NoError(t, err, "%d: %+v", i, err) - assert.Equal(t, txh, blockResults.Height) - if assert.Equal(t, 1, len(blockResults.TxsResults)) { - // check success code - assert.EqualValues(t, 0, blockResults.TxsResults[0].Code) - } - - // check blockchain info, now that we know there is info - info, err := c.BlockchainInfo(ctx, apph, apph) - require.NoError(t, err) - assert.True(t, info.LastHeight >= apph) - if assert.Equal(t, 1, len(info.BlockMetas)) { - lastMeta := info.BlockMetas[0] - assert.EqualValues(t, apph, lastMeta.Header.Height) - blockData := block.Block - assert.Equal(t, blockData.Header.AppHash, lastMeta.Header.AppHash) - assert.Equal(t, block.BlockID, lastMeta.BlockID) - } - - // and get the corresponding commit with the same apphash - commit, err := c.Commit(ctx, &apph) - require.NoError(t, err) - cappHash := commit.Header.AppHash - assert.Equal(t, appHash, cappHash) - assert.NotNil(t, commit.Commit) - - // compare the commits (note Commit(2) has commit from Block(3)) - h = apph - 1 - commit2, err := c.Commit(ctx, &h) - require.NoError(t, err) - assert.Equal(t, block.Block.LastCommitHash, commit2.Commit.Hash()) - - // and we got a proof that works! - _pres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: true}) - require.NoError(t, err) - pres := _pres.Response - assert.True(t, pres.IsOK()) - - // XXX Test proof - } -} - -func TestBlockchainInfo(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - err := client.WaitForHeight(c, 10, nil) - require.NoError(t, err) - - res, err := c.BlockchainInfo(ctx, 0, 0) - require.Nil(t, err, "%d: %+v", i, err) - assert.True(t, res.LastHeight > 0) - assert.True(t, len(res.BlockMetas) > 0) - - res, err = c.BlockchainInfo(ctx, 1, 1) - require.Nil(t, err, "%d: %+v", i, err) - assert.True(t, res.LastHeight > 0) - assert.True(t, len(res.BlockMetas) == 1) - - res, err = c.BlockchainInfo(ctx, 1, 10000) - require.Nil(t, err, "%d: %+v", i, err) - assert.True(t, res.LastHeight > 0) - assert.True(t, len(res.BlockMetas) < 100) - for _, m := range res.BlockMetas { - assert.NotNil(t, m) - } - - res, err = c.BlockchainInfo(ctx, 10000, 1) - require.NotNil(t, err) - assert.Nil(t, res) - assert.Contains(t, err.Error(), "can't be greater than max") - } -} - -func TestBroadcastTxSync(t *testing.T) { - n, conf := NodeSuite(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // TODO (melekes): use mempool which is set on RPC rather than getting it from node - mempool := getMempool(t, n) - initMempoolSize := mempool.Size() - - for i, c := range GetClients(t, n, conf) { - _, _, tx := MakeTxKV() - bres, err := c.BroadcastTxSync(ctx, tx) - require.Nil(t, err, "%d: %+v", i, err) - require.Equal(t, bres.Code, abci.CodeTypeOK) // FIXME - - require.Equal(t, initMempoolSize+1, mempool.Size()) - - txs := mempool.ReapMaxTxs(len(tx)) - require.EqualValues(t, tx, txs[0]) - mempool.Flush() - } -} - -func getMempool(t *testing.T, srv service.Service) mempl.Mempool { +func getMempool(t *testing.T, srv service.Service) mempool.Mempool { t.Helper() n, ok := srv.(interface { - Mempool() mempl.Mempool + Mempool() mempool.Mempool }) require.True(t, ok) return n.Mempool() } -func TestBroadcastTxCommit(t *testing.T) { +// these cases are roughly the same as the TestClientMethodCalls, but +// they have to loop over their clients in the individual test cases, +// so making a separate suite makes more sense, though isn't strictly +// speaking desirable. +func TestClientMethodCallsAdvanced(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() n, conf := NodeSuite(t) + pool := getMempool(t, n) - mempool := getMempool(t, n) - for i, c := range GetClients(t, n, conf) { + t.Run("UnconfirmedTxs", func(t *testing.T) { + _, _, tx := MakeTxKV() + ch := make(chan struct{}) + + err := pool.CheckTx(ctx, tx, func(_ *abci.Response) { close(ch) }, mempool.TxInfo{}) + require.NoError(t, err) + + // wait for tx to arrive in mempoool. + select { + case <-ch: + case <-time.After(5 * time.Second): + t.Error("Timed out waiting for CheckTx callback") + } + + for _, c := range GetClients(t, n, conf) { + mc := c.(client.MempoolClient) + limit := 1 + res, err := mc.UnconfirmedTxs(ctx, &limit) + require.NoError(t, err) + + assert.Equal(t, 1, res.Count) + assert.Equal(t, 1, res.Total) + assert.Equal(t, pool.SizeBytes(), res.TotalBytes) + assert.Exactly(t, types.Txs{tx}, types.Txs(res.Txs)) + } + + pool.Flush() + }) + t.Run("NumUnconfirmedTxs", func(t *testing.T) { + ch := make(chan struct{}) + + pool := getMempool(t, n) + + _, _, tx := MakeTxKV() + + err := pool.CheckTx(ctx, tx, func(_ *abci.Response) { close(ch) }, mempool.TxInfo{}) + require.NoError(t, err) + + // wait for tx to arrive in mempoool. + select { + case <-ch: + case <-time.After(5 * time.Second): + t.Error("Timed out waiting for CheckTx callback") + } + + mempoolSize := pool.Size() + for i, c := range GetClients(t, n, conf) { + mc, ok := c.(client.MempoolClient) + require.True(t, ok, "%d", i) + res, err := mc.NumUnconfirmedTxs(ctx) + require.Nil(t, err, "%d: %+v", i, err) + + assert.Equal(t, mempoolSize, res.Count) + assert.Equal(t, mempoolSize, res.Total) + assert.Equal(t, pool.SizeBytes(), res.TotalBytes) + } + + pool.Flush() + }) + t.Run("Tx", func(t *testing.T) { + c := getHTTPClient(t, conf) + + // first we broadcast a tx _, _, tx := MakeTxKV() bres, err := c.BroadcastTxCommit(ctx, tx) - require.Nil(t, err, "%d: %+v", i, err) - require.True(t, bres.CheckTx.IsOK()) - require.True(t, bres.DeliverTx.IsOK()) + require.Nil(t, err, "%+v", err) - require.Equal(t, 0, mempool.Size()) - } -} + txHeight := bres.Height + txHash := bres.Hash -func TestUnconfirmedTxs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + anotherTxHash := types.Tx("a different tx").Hash() - _, _, tx := MakeTxKV() - ch := make(chan *abci.Response, 1) + cases := []struct { + valid bool + prove bool + hash []byte + }{ + // only valid if correct hash provided + {true, false, txHash}, + {true, true, txHash}, + {false, false, anotherTxHash}, + {false, true, anotherTxHash}, + {false, false, nil}, + {false, true, nil}, + } - n, conf := NodeSuite(t) - mempool := getMempool(t, n) - err := mempool.CheckTx(ctx, tx, func(resp *abci.Response) { ch <- resp }, mempl.TxInfo{}) + for _, c := range GetClients(t, n, conf) { + t.Run(fmt.Sprintf("%T", c), func(t *testing.T) { + for j, tc := range cases { + t.Run(fmt.Sprintf("Case%d", j), func(t *testing.T) { + // now we query for the tx. + // since there's only one tx, we know index=0. + ptx, err := c.Tx(ctx, tc.hash, tc.prove) - require.NoError(t, err) + if !tc.valid { + require.NotNil(t, err) + } else { + require.Nil(t, err, "%+v", err) + assert.EqualValues(t, txHeight, ptx.Height) + assert.EqualValues(t, tx, ptx.Tx) + assert.Zero(t, ptx.Index) + assert.True(t, ptx.TxResult.IsOK()) + assert.EqualValues(t, txHash, ptx.Hash) - // wait for tx to arrive in mempoool. - select { - case <-ch: - case <-time.After(5 * time.Second): - t.Error("Timed out waiting for CheckTx callback") - } - - for _, c := range GetClients(t, n, conf) { - mc := c.(client.MempoolClient) - limit := 1 - res, err := mc.UnconfirmedTxs(ctx, &limit) - require.NoError(t, err) - - assert.Equal(t, 1, res.Count) - assert.Equal(t, 1, res.Total) - assert.Equal(t, mempool.SizeBytes(), res.TotalBytes) - assert.Exactly(t, types.Txs{tx}, types.Txs(res.Txs)) - } - - mempool.Flush() -} - -func TestNumUnconfirmedTxs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, _, tx := MakeTxKV() - - n, conf := NodeSuite(t) - ch := make(chan *abci.Response, 1) - mempool := getMempool(t, n) - - err := mempool.CheckTx(ctx, tx, func(resp *abci.Response) { ch <- resp }, mempl.TxInfo{}) - require.NoError(t, err) - - // wait for tx to arrive in mempoool. - select { - case <-ch: - case <-time.After(5 * time.Second): - t.Error("Timed out waiting for CheckTx callback") - } - - mempoolSize := mempool.Size() - for i, c := range GetClients(t, n, conf) { - mc, ok := c.(client.MempoolClient) - require.True(t, ok, "%d", i) - res, err := mc.NumUnconfirmedTxs(ctx) - require.Nil(t, err, "%d: %+v", i, err) - - assert.Equal(t, mempoolSize, res.Count) - assert.Equal(t, mempoolSize, res.Total) - assert.Equal(t, mempool.SizeBytes(), res.TotalBytes) - } - - mempool.Flush() -} - -func TestCheckTx(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - mempool := getMempool(t, n) - - for _, c := range GetClients(t, n, conf) { - _, _, tx := MakeTxKV() - - res, err := c.CheckTx(ctx, tx) - require.NoError(t, err) - assert.Equal(t, abci.CodeTypeOK, res.Code) - - assert.Equal(t, 0, mempool.Size(), "mempool must be empty") - } -} - -func TestTx(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - n, conf := NodeSuite(t) - - c := getHTTPClient(t, conf) - - // first we broadcast a tx - _, _, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(ctx, tx) - require.Nil(t, err, "%+v", err) - - txHeight := bres.Height - txHash := bres.Hash - - anotherTxHash := types.Tx("a different tx").Hash() - - cases := []struct { - valid bool - prove bool - hash []byte - }{ - // only valid if correct hash provided - {true, false, txHash}, - {true, true, txHash}, - {false, false, anotherTxHash}, - {false, true, anotherTxHash}, - {false, false, nil}, - {false, true, nil}, - } - - for i, c := range GetClients(t, n, conf) { - for j, tc := range cases { - t.Logf("client %d, case %d", i, j) - - // now we query for the tx. - // since there's only one tx, we know index=0. - ptx, err := c.Tx(ctx, tc.hash, tc.prove) - - if !tc.valid { - require.NotNil(t, err) - } else { - require.Nil(t, err, "%+v", err) - assert.EqualValues(t, txHeight, ptx.Height) - assert.EqualValues(t, tx, ptx.Tx) - assert.Zero(t, ptx.Index) - assert.True(t, ptx.TxResult.IsOK()) - assert.EqualValues(t, txHash, ptx.Hash) - - // time to verify the proof - proof := ptx.Proof - if tc.prove && assert.EqualValues(t, tx, proof.Data) { - assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash)) + // time to verify the proof + proof := ptx.Proof + if tc.prove && assert.EqualValues(t, tx, proof.Data) { + assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash)) + } + } + }) } - } + }) } - } -} + }) + t.Run("TxSearchWithTimeout", func(t *testing.T) { + timeoutClient := getHTTPClientWithTimeout(t, conf, 10*time.Second) -func TestTxSearchWithTimeout(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - timeoutClient := getHTTPClientWithTimeout(t, conf, 10*time.Second) - - _, _, tx := MakeTxKV() - _, err := timeoutClient.BroadcastTxCommit(ctx, tx) - require.NoError(t, err) - - // query using a compositeKey (see kvstore application) - result, err := timeoutClient.TxSearch(ctx, "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") - require.Nil(t, err) - require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") -} - -func TestTxSearch(t *testing.T) { - n, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - - // first we broadcast a few txs - for i := 0; i < 10; i++ { _, _, tx := MakeTxKV() - _, err := c.BroadcastTxCommit(context.Background(), tx) + _, err := timeoutClient.BroadcastTxCommit(ctx, tx) require.NoError(t, err) - } - - // since we're not using an isolated test server, we'll have lingering transactions - // from other tests as well - result, err := c.TxSearch(context.Background(), "tx.height >= 0", true, nil, nil, "asc") - require.NoError(t, err) - txCount := len(result.Txs) - - // pick out the last tx to have something to search for in tests - find := result.Txs[len(result.Txs)-1] - anotherTxHash := types.Tx("a different tx").Hash() - - for i, c := range GetClients(t, n, conf) { - t.Logf("client %d", i) - - // now we query for the tx. - result, err := c.TxSearch(context.Background(), fmt.Sprintf("tx.hash='%v'", find.Hash), true, nil, nil, "asc") - require.Nil(t, err) - require.Len(t, result.Txs, 1) - require.Equal(t, find.Hash, result.Txs[0].Hash) - - ptx := result.Txs[0] - assert.EqualValues(t, find.Height, ptx.Height) - assert.EqualValues(t, find.Tx, ptx.Tx) - assert.Zero(t, ptx.Index) - assert.True(t, ptx.TxResult.IsOK()) - assert.EqualValues(t, find.Hash, ptx.Hash) - - // time to verify the proof - if assert.EqualValues(t, find.Tx, ptx.Proof.Data) { - assert.NoError(t, ptx.Proof.Proof.Verify(ptx.Proof.RootHash, find.Hash)) - } - - // query by height - result, err = c.TxSearch(context.Background(), fmt.Sprintf("tx.height=%d", find.Height), true, nil, nil, "asc") - require.Nil(t, err) - require.Len(t, result.Txs, 1) - - // query for non existing tx - result, err = c.TxSearch(context.Background(), fmt.Sprintf("tx.hash='%X'", anotherTxHash), false, nil, nil, "asc") - require.Nil(t, err) - require.Len(t, result.Txs, 0) // query using a compositeKey (see kvstore application) - result, err = c.TxSearch(context.Background(), "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") + result, err := timeoutClient.TxSearch(ctx, "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") require.Nil(t, err) require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + }) + t.Run("TxSearch", func(t *testing.T) { + t.Skip("Test Asserts Non-Deterministic Results") + c := getHTTPClient(t, conf) - // query using an index key - result, err = c.TxSearch(context.Background(), "app.index_key='index is working'", false, nil, nil, "asc") - require.Nil(t, err) - require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") - - // query using an noindex key - result, err = c.TxSearch(context.Background(), "app.noindex_key='index is working'", false, nil, nil, "asc") - require.Nil(t, err) - require.Equal(t, len(result.Txs), 0, "expected a lot of transactions") - - // query using a compositeKey (see kvstore application) and height - result, err = c.TxSearch(context.Background(), - "app.creator='Cosmoshi Netowoko' AND tx.height<10000", true, nil, nil, "asc") - require.Nil(t, err) - require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") - - // query a non existing tx with page 1 and txsPerPage 1 - perPage := 1 - result, err = c.TxSearch(context.Background(), "app.creator='Cosmoshi Neetowoko'", true, nil, &perPage, "asc") - require.Nil(t, err) - require.Len(t, result.Txs, 0) - - // check sorting - result, err = c.TxSearch(context.Background(), "tx.height >= 1", false, nil, nil, "asc") - require.Nil(t, err) - for k := 0; k < len(result.Txs)-1; k++ { - require.LessOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) - require.LessOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) - } - - result, err = c.TxSearch(context.Background(), "tx.height >= 1", false, nil, nil, "desc") - require.Nil(t, err) - for k := 0; k < len(result.Txs)-1; k++ { - require.GreaterOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) - require.GreaterOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) - } - // check pagination - perPage = 3 - var ( - seen = map[int64]bool{} - maxHeight int64 - pages = int(math.Ceil(float64(txCount) / float64(perPage))) - ) - - for page := 1; page <= pages; page++ { - page := page - result, err := c.TxSearch(context.Background(), "tx.height >= 1", false, &page, &perPage, "asc") + // first we broadcast a few txs + for i := 0; i < 10; i++ { + _, _, tx := MakeTxKV() + _, err := c.BroadcastTxSync(ctx, tx) require.NoError(t, err) - if page < pages { - require.Len(t, result.Txs, perPage) - } else { - require.LessOrEqual(t, len(result.Txs), perPage) - } - require.Equal(t, txCount, result.TotalCount) - for _, tx := range result.Txs { - require.False(t, seen[tx.Height], - "Found duplicate height %v in page %v", tx.Height, page) - require.Greater(t, tx.Height, maxHeight, - "Found decreasing height %v (max seen %v) in page %v", tx.Height, maxHeight, page) - seen[tx.Height] = true - maxHeight = tx.Height - } } - require.Len(t, seen, txCount) - } -} -func TestBatchedJSONRPCCalls(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + // since we're not using an isolated test server, we'll have lingering transactions + // from other tests as well + result, err := c.TxSearch(ctx, "tx.height >= 0", true, nil, nil, "asc") + require.NoError(t, err) + txCount := len(result.Txs) - _, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - testBatchedJSONRPCCalls(ctx, t, c) + // pick out the last tx to have something to search for in tests + find := result.Txs[len(result.Txs)-1] + anotherTxHash := types.Tx("a different tx").Hash() + + for _, c := range GetClients(t, n, conf) { + t.Run(fmt.Sprintf("%T", c), func(t *testing.T) { + + // now we query for the tx. + result, err := c.TxSearch(ctx, fmt.Sprintf("tx.hash='%v'", find.Hash), true, nil, nil, "asc") + require.Nil(t, err) + require.Len(t, result.Txs, 1) + require.Equal(t, find.Hash, result.Txs[0].Hash) + + ptx := result.Txs[0] + assert.EqualValues(t, find.Height, ptx.Height) + assert.EqualValues(t, find.Tx, ptx.Tx) + assert.Zero(t, ptx.Index) + assert.True(t, ptx.TxResult.IsOK()) + assert.EqualValues(t, find.Hash, ptx.Hash) + + // time to verify the proof + if assert.EqualValues(t, find.Tx, ptx.Proof.Data) { + assert.NoError(t, ptx.Proof.Proof.Verify(ptx.Proof.RootHash, find.Hash)) + } + + // query by height + result, err = c.TxSearch(ctx, fmt.Sprintf("tx.height=%d", find.Height), true, nil, nil, "asc") + require.Nil(t, err) + require.Len(t, result.Txs, 1) + + // query for non existing tx + result, err = c.TxSearch(ctx, fmt.Sprintf("tx.hash='%X'", anotherTxHash), false, nil, nil, "asc") + require.Nil(t, err) + require.Len(t, result.Txs, 0) + + // query using a compositeKey (see kvstore application) + result, err = c.TxSearch(ctx, "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") + require.Nil(t, err) + require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + + // query using an index key + result, err = c.TxSearch(ctx, "app.index_key='index is working'", false, nil, nil, "asc") + require.Nil(t, err) + require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + + // query using an noindex key + result, err = c.TxSearch(ctx, "app.noindex_key='index is working'", false, nil, nil, "asc") + require.Nil(t, err) + require.Equal(t, len(result.Txs), 0, "expected a lot of transactions") + + // query using a compositeKey (see kvstore application) and height + result, err = c.TxSearch(ctx, + "app.creator='Cosmoshi Netowoko' AND tx.height<10000", true, nil, nil, "asc") + require.Nil(t, err) + require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + + // query a non existing tx with page 1 and txsPerPage 1 + perPage := 1 + result, err = c.TxSearch(ctx, "app.creator='Cosmoshi Neetowoko'", true, nil, &perPage, "asc") + require.Nil(t, err) + require.Len(t, result.Txs, 0) + + // check sorting + result, err = c.TxSearch(ctx, "tx.height >= 1", false, nil, nil, "asc") + require.Nil(t, err) + for k := 0; k < len(result.Txs)-1; k++ { + require.LessOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) + require.LessOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) + } + + result, err = c.TxSearch(ctx, "tx.height >= 1", false, nil, nil, "desc") + require.Nil(t, err) + for k := 0; k < len(result.Txs)-1; k++ { + require.GreaterOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) + require.GreaterOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) + } + // check pagination + perPage = 3 + var ( + seen = map[int64]bool{} + maxHeight int64 + pages = int(math.Ceil(float64(txCount) / float64(perPage))) + ) + + for page := 1; page <= pages; page++ { + page := page + result, err := c.TxSearch(ctx, "tx.height >= 1", false, &page, &perPage, "asc") + require.NoError(t, err) + if page < pages { + require.Len(t, result.Txs, perPage) + } else { + require.LessOrEqual(t, len(result.Txs), perPage) + } + require.Equal(t, txCount, result.TotalCount) + for _, tx := range result.Txs { + require.False(t, seen[tx.Height], + "Found duplicate height %v in page %v", tx.Height, page) + require.Greater(t, tx.Height, maxHeight, + "Found decreasing height %v (max seen %v) in page %v", tx.Height, maxHeight, page) + seen[tx.Height] = true + maxHeight = tx.Height + } + } + require.Len(t, seen, txCount) + }) + } + }) } func testBatchedJSONRPCCalls(ctx context.Context, t *testing.T, c *rpchttp.HTTP) { @@ -781,10 +850,10 @@ func testBatchedJSONRPCCalls(ctx context.Context, t *testing.T, c *rpchttp.HTTP) require.Len(t, bresults, 2) require.Equal(t, 0, batch.Count()) - bresult1, ok := bresults[0].(*ctypes.ResultBroadcastTxCommit) + bresult1, ok := bresults[0].(*coretypes.ResultBroadcastTxCommit) require.True(t, ok) require.Equal(t, *bresult1, *r1) - bresult2, ok := bresults[1].(*ctypes.ResultBroadcastTxCommit) + bresult2, ok := bresults[1].(*coretypes.ResultBroadcastTxCommit) require.True(t, ok) require.Equal(t, *bresult2, *r2) apph := tmmath.MaxInt64(bresult1.Height, bresult2.Height) + 1 @@ -802,10 +871,10 @@ func testBatchedJSONRPCCalls(ctx context.Context, t *testing.T, c *rpchttp.HTTP) require.Len(t, qresults, 2) require.Equal(t, 0, batch.Count()) - qresult1, ok := qresults[0].(*ctypes.ResultABCIQuery) + qresult1, ok := qresults[0].(*coretypes.ResultABCIQuery) require.True(t, ok) require.Equal(t, *qresult1, *q1) - qresult2, ok := qresults[1].(*ctypes.ResultABCIQuery) + qresult2, ok := qresults[1].(*coretypes.ResultABCIQuery) require.True(t, ok) require.Equal(t, *qresult2, *q2) @@ -814,60 +883,3 @@ func testBatchedJSONRPCCalls(ctx context.Context, t *testing.T, c *rpchttp.HTTP) require.Equal(t, qresult1.Response.Value, v1) require.Equal(t, qresult2.Response.Value, v2) } - -func TestBatchedJSONRPCCallsCancellation(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - _, _, tx1 := MakeTxKV() - _, _, tx2 := MakeTxKV() - - batch := c.NewBatch() - _, err := batch.BroadcastTxCommit(ctx, tx1) - require.NoError(t, err) - _, err = batch.BroadcastTxCommit(ctx, tx2) - require.NoError(t, err) - // we should have 2 requests waiting - require.Equal(t, 2, batch.Count()) - // we want to make sure we cleared 2 pending requests - require.Equal(t, 2, batch.Clear()) - // now there should be no batched requests - require.Equal(t, 0, batch.Count()) -} - -func TestSendingEmptyRequestBatch(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - batch := c.NewBatch() - _, err := batch.Send(ctx) - require.Error(t, err, "sending an empty batch of JSON RPC requests should result in an error") -} - -func TestClearingEmptyRequestBatch(t *testing.T) { - _, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - batch := c.NewBatch() - require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result") -} - -func TestConcurrentJSONRPCBatching(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - var wg sync.WaitGroup - c := getHTTPClient(t, conf) - for i := 0; i < 50; i++ { - wg.Add(1) - go func() { - defer wg.Done() - testBatchedJSONRPCCalls(ctx, t, c) - }() - } - wg.Wait() -} diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go deleted file mode 100644 index 690b0a295..000000000 --- a/rpc/core/mempool.go +++ /dev/null @@ -1,180 +0,0 @@ -package core - -import ( - "context" - "errors" - "fmt" - "time" - - abci "github.com/tendermint/tendermint/abci/types" - mempl "github.com/tendermint/tendermint/internal/mempool" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/types" -) - -//----------------------------------------------------------------------------- -// NOTE: tx should be signed, but this is only checked at the app level (not by Tendermint!) - -// BroadcastTxAsync returns right away, with no response. Does not wait for -// CheckTx nor DeliverTx results. -// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_async -func (env *Environment) BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - err := env.Mempool.CheckTx(ctx.Context(), tx, nil, mempl.TxInfo{}) - if err != nil { - return nil, err - } - - return &ctypes.ResultBroadcastTx{Hash: tx.Hash()}, nil -} - -// BroadcastTxSync returns with the response from CheckTx. Does not wait for -// DeliverTx result. -// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_sync -func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - resCh := make(chan *abci.Response, 1) - err := env.Mempool.CheckTx( - ctx.Context(), - tx, - func(res *abci.Response) { resCh <- res }, - mempl.TxInfo{}, - ) - if err != nil { - return nil, err - } - - res := <-resCh - r := res.GetCheckTx() - - return &ctypes.ResultBroadcastTx{ - Code: r.Code, - Data: r.Data, - Log: r.Log, - Codespace: r.Codespace, - MempoolError: r.MempoolError, - Hash: tx.Hash(), - }, nil -} - -// BroadcastTxCommit returns with the responses from CheckTx and DeliverTx. -// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_commit -func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - subscriber := ctx.RemoteAddr() - - if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients { - return nil, fmt.Errorf("max_subscription_clients %d reached", env.Config.MaxSubscriptionClients) - } else if env.EventBus.NumClientSubscriptions(subscriber) >= env.Config.MaxSubscriptionsPerClient { - return nil, fmt.Errorf("max_subscriptions_per_client %d reached", env.Config.MaxSubscriptionsPerClient) - } - - // Subscribe to tx being committed in block. - subCtx, cancel := context.WithTimeout(ctx.Context(), SubscribeTimeout) - defer cancel() - q := types.EventQueryTxFor(tx) - deliverTxSub, err := env.EventBus.Subscribe(subCtx, subscriber, q) - if err != nil { - err = fmt.Errorf("failed to subscribe to tx: %w", err) - env.Logger.Error("Error on broadcast_tx_commit", "err", err) - return nil, err - } - defer func() { - args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: q} - if err := env.EventBus.Unsubscribe(context.Background(), args); err != nil { - env.Logger.Error("Error unsubscribing from eventBus", "err", err) - } - }() - - // Broadcast tx and wait for CheckTx result - checkTxResCh := make(chan *abci.Response, 1) - err = env.Mempool.CheckTx( - ctx.Context(), - tx, - func(res *abci.Response) { checkTxResCh <- res }, - mempl.TxInfo{}, - ) - if err != nil { - env.Logger.Error("Error on broadcastTxCommit", "err", err) - return nil, fmt.Errorf("error on broadcastTxCommit: %v", err) - } - - checkTxResMsg := <-checkTxResCh - checkTxRes := checkTxResMsg.GetCheckTx() - - if checkTxRes.Code != abci.CodeTypeOK { - return &ctypes.ResultBroadcastTxCommit{ - CheckTx: *checkTxRes, - DeliverTx: abci.ResponseDeliverTx{}, - Hash: tx.Hash(), - }, nil - } - - // Wait for the tx to be included in a block or timeout. - select { - case msg := <-deliverTxSub.Out(): // The tx was included in a block. - deliverTxRes := msg.Data().(types.EventDataTx) - return &ctypes.ResultBroadcastTxCommit{ - CheckTx: *checkTxRes, - DeliverTx: deliverTxRes.Result, - Hash: tx.Hash(), - Height: deliverTxRes.Height, - }, nil - case <-deliverTxSub.Canceled(): - var reason string - if deliverTxSub.Err() == nil { - reason = "Tendermint exited" - } else { - reason = deliverTxSub.Err().Error() - } - err = fmt.Errorf("deliverTxSub was canceled (reason: %s)", reason) - env.Logger.Error("Error on broadcastTxCommit", "err", err) - return &ctypes.ResultBroadcastTxCommit{ - CheckTx: *checkTxRes, - DeliverTx: abci.ResponseDeliverTx{}, - Hash: tx.Hash(), - }, err - case <-time.After(env.Config.TimeoutBroadcastTxCommit): - err = errors.New("timed out waiting for tx to be included in a block") - env.Logger.Error("Error on broadcastTxCommit", "err", err) - return &ctypes.ResultBroadcastTxCommit{ - CheckTx: *checkTxRes, - DeliverTx: abci.ResponseDeliverTx{}, - Hash: tx.Hash(), - }, err - } -} - -// UnconfirmedTxs gets unconfirmed transactions (maximum ?limit entries) -// including their number. -// More: https://docs.tendermint.com/master/rpc/#/Info/unconfirmed_txs -func (env *Environment) UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*ctypes.ResultUnconfirmedTxs, error) { - // reuse per_page validator - limit := env.validatePerPage(limitPtr) - - txs := env.Mempool.ReapMaxTxs(limit) - return &ctypes.ResultUnconfirmedTxs{ - Count: len(txs), - Total: env.Mempool.Size(), - TotalBytes: env.Mempool.SizeBytes(), - Txs: txs}, nil -} - -// NumUnconfirmedTxs gets number of unconfirmed transactions. -// More: https://docs.tendermint.com/master/rpc/#/Info/num_unconfirmed_txs -func (env *Environment) NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) { - return &ctypes.ResultUnconfirmedTxs{ - Count: env.Mempool.Size(), - Total: env.Mempool.Size(), - TotalBytes: env.Mempool.SizeBytes()}, nil -} - -// CheckTx checks the transaction without executing it. The transaction won't -// be added to the mempool either. -// More: https://docs.tendermint.com/master/rpc/#/Tx/check_tx -func (env *Environment) CheckTx(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { - res, err := env.ProxyAppMempool.CheckTxSync(ctx.Context(), abci.RequestCheckTx{Tx: tx}) - if err != nil { - return nil, err - } - return &ctypes.ResultCheckTx{ResponseCheckTx: *res}, nil -} diff --git a/rpc/core/net.go b/rpc/core/net.go deleted file mode 100644 index 5b1672e26..000000000 --- a/rpc/core/net.go +++ /dev/null @@ -1,162 +0,0 @@ -package core - -import ( - "errors" - "fmt" - "strings" - - "github.com/tendermint/tendermint/internal/p2p" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" -) - -// NetInfo returns network info. -// More: https://docs.tendermint.com/master/rpc/#/Info/net_info -func (env *Environment) NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) { - var peers []ctypes.Peer - - switch { - case env.P2PPeers != nil: - peersList := env.P2PPeers.Peers().List() - peers = make([]ctypes.Peer, 0, len(peersList)) - for _, peer := range peersList { - peers = append(peers, ctypes.Peer{ - ID: peer.ID(), - URL: peer.SocketAddr().String(), - }) - } - case env.PeerManager != nil: - peerList := env.PeerManager.Peers() - for _, peer := range peerList { - addrs := env.PeerManager.Addresses(peer) - if len(addrs) == 0 { - continue - } - - peers = append(peers, ctypes.Peer{ - ID: peer, - URL: addrs[0].String(), - }) - } - default: - return nil, errors.New("peer management system does not support NetInfo responses") - } - - return &ctypes.ResultNetInfo{ - Listening: env.P2PTransport.IsListening(), - Listeners: env.P2PTransport.Listeners(), - NPeers: len(peers), - Peers: peers, - }, nil -} - -// UnsafeDialSeeds dials the given seeds (comma-separated id@IP:PORT). -func (env *Environment) UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { - if env.P2PPeers == nil { - return nil, errors.New("peer management system does not support this operation") - } - - if len(seeds) == 0 { - return &ctypes.ResultDialSeeds{}, fmt.Errorf("%w: no seeds provided", ctypes.ErrInvalidRequest) - } - env.Logger.Info("DialSeeds", "seeds", seeds) - if err := env.P2PPeers.DialPeersAsync(seeds); err != nil { - return &ctypes.ResultDialSeeds{}, err - } - return &ctypes.ResultDialSeeds{Log: "Dialing seeds in progress. See /net_info for details"}, nil -} - -// UnsafeDialPeers dials the given peers (comma-separated id@IP:PORT), -// optionally making them persistent. -func (env *Environment) UnsafeDialPeers( - ctx *rpctypes.Context, - peers []string, - persistent, unconditional, private bool) (*ctypes.ResultDialPeers, error) { - - if env.P2PPeers == nil { - return nil, errors.New("peer management system does not support this operation") - } - - if len(peers) == 0 { - return &ctypes.ResultDialPeers{}, fmt.Errorf("%w: no peers provided", ctypes.ErrInvalidRequest) - } - - ids, err := getIDs(peers) - if err != nil { - return &ctypes.ResultDialPeers{}, err - } - - env.Logger.Info("DialPeers", "peers", peers, "persistent", - persistent, "unconditional", unconditional, "private", private) - - if persistent { - if err := env.P2PPeers.AddPersistentPeers(peers); err != nil { - return &ctypes.ResultDialPeers{}, err - } - } - - if private { - if err := env.P2PPeers.AddPrivatePeerIDs(ids); err != nil { - return &ctypes.ResultDialPeers{}, err - } - } - - if unconditional { - if err := env.P2PPeers.AddUnconditionalPeerIDs(ids); err != nil { - return &ctypes.ResultDialPeers{}, err - } - } - - if err := env.P2PPeers.DialPeersAsync(peers); err != nil { - return &ctypes.ResultDialPeers{}, err - } - - return &ctypes.ResultDialPeers{Log: "Dialing peers in progress. See /net_info for details"}, nil -} - -// Genesis returns genesis file. -// More: https://docs.tendermint.com/master/rpc/#/Info/genesis -func (env *Environment) Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { - if len(env.genChunks) > 1 { - return nil, errors.New("genesis response is large, please use the genesis_chunked API instead") - } - - return &ctypes.ResultGenesis{Genesis: env.GenDoc}, nil -} - -func (env *Environment) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) { - if env.genChunks == nil { - return nil, fmt.Errorf("service configuration error, genesis chunks are not initialized") - } - - if len(env.genChunks) == 0 { - return nil, fmt.Errorf("service configuration error, there are no chunks") - } - - id := int(chunk) - - if id > len(env.genChunks)-1 { - return nil, fmt.Errorf("there are %d chunks, %d is invalid", len(env.genChunks)-1, id) - } - - return &ctypes.ResultGenesisChunk{ - TotalChunks: len(env.genChunks), - ChunkNumber: id, - Data: env.genChunks[id], - }, nil -} - -func getIDs(peers []string) ([]string, error) { - ids := make([]string, 0, len(peers)) - - for _, peer := range peers { - - spl := strings.Split(peer, "@") - if len(spl) != 2 { - return nil, p2p.ErrNetAddressNoID{Addr: peer} - } - ids = append(ids, spl[0]) - - } - return ids, nil -} diff --git a/rpc/core/net_test.go b/rpc/core/net_test.go deleted file mode 100644 index 821cdb663..000000000 --- a/rpc/core/net_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package core - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/libs/log" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" -) - -func TestUnsafeDialSeeds(t *testing.T) { - sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", - func(n int, sw *p2p.Switch) *p2p.Switch { return sw }, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - env := &Environment{} - env.Logger = log.TestingLogger() - env.P2PPeers = sw - - testCases := []struct { - seeds []string - isErr bool - }{ - {[]string{}, true}, - {[]string{"d51fb70907db1c6c2d5237e78379b25cf1a37ab4@127.0.0.1:41198"}, false}, - {[]string{"127.0.0.1:41198"}, true}, - } - - for _, tc := range testCases { - res, err := env.UnsafeDialSeeds(&rpctypes.Context{}, tc.seeds) - if tc.isErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.NotNil(t, res) - } - } -} - -func TestUnsafeDialPeers(t *testing.T) { - sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", - func(n int, sw *p2p.Switch) *p2p.Switch { return sw }, log.TestingLogger()) - sw.SetAddrBook(&p2p.AddrBookMock{ - Addrs: make(map[string]struct{}), - OurAddrs: make(map[string]struct{}), - PrivateAddrs: make(map[string]struct{}), - }) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - env := &Environment{} - env.Logger = log.TestingLogger() - env.P2PPeers = sw - - testCases := []struct { - peers []string - persistence, unconditional, private bool - isErr bool - }{ - {[]string{}, false, false, false, true}, - {[]string{"d51fb70907db1c6c2d5237e78379b25cf1a37ab4@127.0.0.1:41198"}, true, true, true, false}, - {[]string{"127.0.0.1:41198"}, true, true, false, true}, - } - - for _, tc := range testCases { - res, err := env.UnsafeDialPeers(&rpctypes.Context{}, tc.peers, tc.persistence, tc.unconditional, tc.private) - if tc.isErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.NotNil(t, res) - } - } -} diff --git a/rpc/core/types/responses.go b/rpc/coretypes/responses.go similarity index 94% rename from rpc/core/types/responses.go rename to rpc/coretypes/responses.go index caa9b8732..ecb058312 100644 --- a/rpc/core/types/responses.go +++ b/rpc/coretypes/responses.go @@ -100,6 +100,14 @@ type SyncInfo struct { TotalSyncedTime time.Duration `json:"total_synced_time"` RemainingTime time.Duration `json:"remaining_time"` + + TotalSnapshots int64 `json:"total_snapshots"` + ChunkProcessAvgTime time.Duration `json:"chunk_process_avg_time"` + SnapshotHeight int64 `json:"snapshot_height"` + SnapshotChunksCount int64 `json:"snapshot_chunks_count"` + SnapshotChunksTotal int64 `json:"snapshot_chunks_total"` + BackFilledBlocks int64 `json:"backfilled_blocks"` + BackFillBlocksTotal int64 `json:"backfill_blocks_total"` } // Info about the node's validator diff --git a/rpc/core/types/responses_test.go b/rpc/coretypes/responses_test.go similarity index 100% rename from rpc/core/types/responses_test.go rename to rpc/coretypes/responses_test.go diff --git a/rpc/grpc/api.go b/rpc/grpc/api.go deleted file mode 100644 index 27f8c97e4..000000000 --- a/rpc/grpc/api.go +++ /dev/null @@ -1,41 +0,0 @@ -package coregrpc - -import ( - "context" - - abci "github.com/tendermint/tendermint/abci/types" - core "github.com/tendermint/tendermint/rpc/core" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" -) - -type broadcastAPI struct { - env *core.Environment -} - -func (bapi *broadcastAPI) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { - // kvstore so we can check if the server is up - return &ResponsePing{}, nil -} - -// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36. -func (bapi *broadcastAPI) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { - // NOTE: there's no way to get client's remote address - // see https://stackoverflow.com/questions/33684570/session-and-remote-ip-address-in-grpc-go - res, err := bapi.env.BroadcastTxCommit(&rpctypes.Context{}, req.Tx) - if err != nil { - return nil, err - } - - return &ResponseBroadcastTx{ - CheckTx: &abci.ResponseCheckTx{ - Code: res.CheckTx.Code, - Data: res.CheckTx.Data, - Log: res.CheckTx.Log, - }, - DeliverTx: &abci.ResponseDeliverTx{ - Code: res.DeliverTx.Code, - Data: res.DeliverTx.Data, - Log: res.DeliverTx.Log, - }, - }, nil -} diff --git a/rpc/grpc/client_server.go b/rpc/grpc/client_server.go deleted file mode 100644 index 2fb0abb67..000000000 --- a/rpc/grpc/client_server.go +++ /dev/null @@ -1,40 +0,0 @@ -package coregrpc - -import ( - "context" - "net" - - "google.golang.org/grpc" - - tmnet "github.com/tendermint/tendermint/libs/net" - "github.com/tendermint/tendermint/rpc/core" -) - -// Config is an gRPC server configuration. -type Config struct { - MaxOpenConnections int -} - -// StartGRPCServer starts a new gRPC BroadcastAPIServer using the given -// net.Listener. -// NOTE: This function blocks - you may want to call it in a go-routine. -// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36 -func StartGRPCServer(env *core.Environment, ln net.Listener) error { - grpcServer := grpc.NewServer() - RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{env: env}) - return grpcServer.Serve(ln) -} - -// StartGRPCClient dials the gRPC server using protoAddr and returns a new -// BroadcastAPIClient. -func StartGRPCClient(protoAddr string) BroadcastAPIClient { - conn, err := grpc.Dial(protoAddr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) - if err != nil { - panic(err) - } - return NewBroadcastAPIClient(conn) -} - -func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { - return tmnet.Connect(addr) -} diff --git a/rpc/grpc/grpc_test.go b/rpc/grpc/grpc_test.go deleted file mode 100644 index 45deb6b76..000000000 --- a/rpc/grpc/grpc_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package coregrpc_test - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/libs/service" - core_grpc "github.com/tendermint/tendermint/rpc/grpc" - rpctest "github.com/tendermint/tendermint/rpc/test" -) - -func NodeSuite(t *testing.T) (service.Service, *config.Config) { - t.Helper() - - ctx, cancel := context.WithCancel(context.Background()) - - conf := rpctest.CreateConfig(t.Name()) - - // start a tendermint node in the background to test against - app := kvstore.NewApplication() - - node, closer, err := rpctest.StartTendermint(ctx, conf, app) - require.NoError(t, err) - t.Cleanup(func() { - _ = closer(ctx) - cancel() - }) - return node, conf -} - -func TestBroadcastTx(t *testing.T) { - _, conf := NodeSuite(t) - - res, err := rpctest.GetGRPCClient(conf).BroadcastTx( - context.Background(), - &core_grpc.RequestBroadcastTx{Tx: []byte("this is a tx")}, - ) - require.NoError(t, err) - require.EqualValues(t, 0, res.CheckTx.Code) - require.EqualValues(t, 0, res.DeliverTx.Code) -} diff --git a/rpc/grpc/types.pb.go b/rpc/grpc/types.pb.go deleted file mode 100644 index b9cbee03f..000000000 --- a/rpc/grpc/types.pb.go +++ /dev/null @@ -1,924 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/rpc/grpc/types.proto - -package coregrpc - -import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - types "github.com/tendermint/tendermint/abci/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type RequestPing struct { -} - -func (m *RequestPing) Reset() { *m = RequestPing{} } -func (m *RequestPing) String() string { return proto.CompactTextString(m) } -func (*RequestPing) ProtoMessage() {} -func (*RequestPing) Descriptor() ([]byte, []int) { - return fileDescriptor_0ffff5682c662b95, []int{0} -} -func (m *RequestPing) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestPing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestPing.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestPing) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestPing.Merge(m, src) -} -func (m *RequestPing) XXX_Size() int { - return m.Size() -} -func (m *RequestPing) XXX_DiscardUnknown() { - xxx_messageInfo_RequestPing.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestPing proto.InternalMessageInfo - -type RequestBroadcastTx struct { - Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` -} - -func (m *RequestBroadcastTx) Reset() { *m = RequestBroadcastTx{} } -func (m *RequestBroadcastTx) String() string { return proto.CompactTextString(m) } -func (*RequestBroadcastTx) ProtoMessage() {} -func (*RequestBroadcastTx) Descriptor() ([]byte, []int) { - return fileDescriptor_0ffff5682c662b95, []int{1} -} -func (m *RequestBroadcastTx) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestBroadcastTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestBroadcastTx.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestBroadcastTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestBroadcastTx.Merge(m, src) -} -func (m *RequestBroadcastTx) XXX_Size() int { - return m.Size() -} -func (m *RequestBroadcastTx) XXX_DiscardUnknown() { - xxx_messageInfo_RequestBroadcastTx.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestBroadcastTx proto.InternalMessageInfo - -func (m *RequestBroadcastTx) GetTx() []byte { - if m != nil { - return m.Tx - } - return nil -} - -type ResponsePing struct { -} - -func (m *ResponsePing) Reset() { *m = ResponsePing{} } -func (m *ResponsePing) String() string { return proto.CompactTextString(m) } -func (*ResponsePing) ProtoMessage() {} -func (*ResponsePing) Descriptor() ([]byte, []int) { - return fileDescriptor_0ffff5682c662b95, []int{2} -} -func (m *ResponsePing) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponsePing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponsePing.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponsePing) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponsePing.Merge(m, src) -} -func (m *ResponsePing) XXX_Size() int { - return m.Size() -} -func (m *ResponsePing) XXX_DiscardUnknown() { - xxx_messageInfo_ResponsePing.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponsePing proto.InternalMessageInfo - -type ResponseBroadcastTx struct { - CheckTx *types.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx,proto3" json:"check_tx,omitempty"` - DeliverTx *types.ResponseDeliverTx `protobuf:"bytes,2,opt,name=deliver_tx,json=deliverTx,proto3" json:"deliver_tx,omitempty"` -} - -func (m *ResponseBroadcastTx) Reset() { *m = ResponseBroadcastTx{} } -func (m *ResponseBroadcastTx) String() string { return proto.CompactTextString(m) } -func (*ResponseBroadcastTx) ProtoMessage() {} -func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { - return fileDescriptor_0ffff5682c662b95, []int{3} -} -func (m *ResponseBroadcastTx) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseBroadcastTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseBroadcastTx.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponseBroadcastTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseBroadcastTx.Merge(m, src) -} -func (m *ResponseBroadcastTx) XXX_Size() int { - return m.Size() -} -func (m *ResponseBroadcastTx) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseBroadcastTx.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseBroadcastTx proto.InternalMessageInfo - -func (m *ResponseBroadcastTx) GetCheckTx() *types.ResponseCheckTx { - if m != nil { - return m.CheckTx - } - return nil -} - -func (m *ResponseBroadcastTx) GetDeliverTx() *types.ResponseDeliverTx { - if m != nil { - return m.DeliverTx - } - return nil -} - -func init() { - proto.RegisterType((*RequestPing)(nil), "tendermint.rpc.grpc.RequestPing") - proto.RegisterType((*RequestBroadcastTx)(nil), "tendermint.rpc.grpc.RequestBroadcastTx") - proto.RegisterType((*ResponsePing)(nil), "tendermint.rpc.grpc.ResponsePing") - proto.RegisterType((*ResponseBroadcastTx)(nil), "tendermint.rpc.grpc.ResponseBroadcastTx") -} - -func init() { proto.RegisterFile("tendermint/rpc/grpc/types.proto", fileDescriptor_0ffff5682c662b95) } - -var fileDescriptor_0ffff5682c662b95 = []byte{ - // 316 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2f, 0x49, 0xcd, 0x4b, - 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x2a, 0x48, 0xd6, 0x4f, 0x07, 0x11, 0x25, 0x95, - 0x05, 0xa9, 0xc5, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0xc2, 0x08, 0x05, 0x7a, 0x45, 0x05, - 0xc9, 0x7a, 0x20, 0x05, 0x52, 0xd2, 0x48, 0xba, 0x12, 0x93, 0x92, 0x33, 0x91, 0x75, 0x28, 0xf1, - 0x72, 0x71, 0x07, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x04, 0x64, 0xe6, 0xa5, 0x2b, 0xa9, 0x70, - 0x09, 0x41, 0xb9, 0x4e, 0x45, 0xf9, 0x89, 0x29, 0xc9, 0x89, 0xc5, 0x25, 0x21, 0x15, 0x42, 0x7c, - 0x5c, 0x4c, 0x25, 0x15, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x4c, 0x25, 0x15, 0x4a, 0x7c, - 0x5c, 0x3c, 0x41, 0xa9, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x60, 0x5d, 0x53, 0x19, 0xb9, 0x84, - 0x61, 0x02, 0xc8, 0xfa, 0xac, 0xb9, 0x38, 0x92, 0x33, 0x52, 0x93, 0xb3, 0xe3, 0xa1, 0xba, 0xb9, - 0x8d, 0x14, 0xf4, 0x90, 0x5c, 0x08, 0x72, 0x8c, 0x1e, 0x4c, 0x9f, 0x33, 0x48, 0x61, 0x48, 0x45, - 0x10, 0x7b, 0x32, 0x84, 0x21, 0xe4, 0xc8, 0xc5, 0x95, 0x92, 0x9a, 0x93, 0x59, 0x96, 0x5a, 0x04, - 0xd2, 0xce, 0x04, 0xd6, 0xae, 0x84, 0x53, 0xbb, 0x0b, 0x44, 0x69, 0x48, 0x45, 0x10, 0x67, 0x0a, - 0x8c, 0x69, 0xb4, 0x97, 0x91, 0x8b, 0x07, 0xee, 0x1e, 0xc7, 0x00, 0x4f, 0x21, 0x6f, 0x2e, 0x16, - 0x90, 0x83, 0x85, 0x50, 0x9c, 0x01, 0x0b, 0x28, 0x3d, 0xa4, 0x80, 0x90, 0x52, 0xc4, 0xa1, 0x02, - 0xe1, 0x6b, 0xa1, 0x04, 0x2e, 0x6e, 0x64, 0xcf, 0xaa, 0xe3, 0x33, 0x13, 0x49, 0xa1, 0x94, 0x06, - 0x5e, 0xa3, 0x91, 0x54, 0x3a, 0xf9, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, - 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, - 0x94, 0x51, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x3e, 0x52, 0xf4, 0x62, - 0x49, 0x1f, 0xd6, 0xc9, 0xf9, 0x45, 0xa9, 0x20, 0x46, 0x12, 0x1b, 0x38, 0xc6, 0x8d, 0x01, 0x01, - 0x00, 0x00, 0xff, 0xff, 0xf6, 0x4b, 0x02, 0xd8, 0x46, 0x02, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// BroadcastAPIClient is the client API for BroadcastAPI service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type BroadcastAPIClient interface { - Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) - BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) -} - -type broadcastAPIClient struct { - cc *grpc.ClientConn -} - -func NewBroadcastAPIClient(cc *grpc.ClientConn) BroadcastAPIClient { - return &broadcastAPIClient{cc} -} - -func (c *broadcastAPIClient) Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) { - out := new(ResponsePing) - err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/Ping", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) { - out := new(ResponseBroadcastTx) - err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// BroadcastAPIServer is the server API for BroadcastAPI service. -type BroadcastAPIServer interface { - Ping(context.Context, *RequestPing) (*ResponsePing, error) - BroadcastTx(context.Context, *RequestBroadcastTx) (*ResponseBroadcastTx, error) -} - -// UnimplementedBroadcastAPIServer can be embedded to have forward compatible implementations. -type UnimplementedBroadcastAPIServer struct { -} - -func (*UnimplementedBroadcastAPIServer) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { - return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") -} -func (*UnimplementedBroadcastAPIServer) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { - return nil, status.Errorf(codes.Unimplemented, "method BroadcastTx not implemented") -} - -func RegisterBroadcastAPIServer(s *grpc.Server, srv BroadcastAPIServer) { - s.RegisterService(&_BroadcastAPI_serviceDesc, srv) -} - -func _BroadcastAPI_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestPing) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BroadcastAPIServer).Ping(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/Ping", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BroadcastAPIServer).Ping(ctx, req.(*RequestPing)) - } - return interceptor(ctx, in, info, handler) -} - -func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestBroadcastTx) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BroadcastAPIServer).BroadcastTx(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BroadcastAPIServer).BroadcastTx(ctx, req.(*RequestBroadcastTx)) - } - return interceptor(ctx, in, info, handler) -} - -var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ - ServiceName: "tendermint.rpc.grpc.BroadcastAPI", - HandlerType: (*BroadcastAPIServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Ping", - Handler: _BroadcastAPI_Ping_Handler, - }, - { - MethodName: "BroadcastTx", - Handler: _BroadcastAPI_BroadcastTx_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "tendermint/rpc/grpc/types.proto", -} - -func (m *RequestPing) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestPing) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestPing) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *RequestBroadcastTx) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestBroadcastTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Tx) > 0 { - i -= len(m.Tx) - copy(dAtA[i:], m.Tx) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ResponsePing) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponsePing) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponsePing) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *ResponseBroadcastTx) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseBroadcastTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DeliverTx != nil { - { - size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.CheckTx != nil { - { - size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { - offset -= sovTypes(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *RequestPing) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *RequestBroadcastTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Tx) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - return n -} - -func (m *ResponsePing) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *ResponseBroadcastTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.CheckTx != nil { - l = m.CheckTx.Size() - n += 1 + l + sovTypes(uint64(l)) - } - if m.DeliverTx != nil { - l = m.DeliverTx.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} - -func sovTypes(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTypes(x uint64) (n int) { - return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *RequestPing) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestPing: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestPing: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestBroadcastTx: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) - if m.Tx == nil { - m.Tx = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResponsePing) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponsePing: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponsePing: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseBroadcastTx: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CheckTx == nil { - m.CheckTx = &types.ResponseCheckTx{} - } - if err := m.CheckTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DeliverTx == nil { - m.DeliverTx = &types.ResponseDeliverTx{} - } - if err := m.DeliverTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTypes(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthTypes - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTypes - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthTypes - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") -) diff --git a/rpc/jsonrpc/client/decode.go b/rpc/jsonrpc/client/decode.go index 42941ea68..f69926cb7 100644 --- a/rpc/jsonrpc/client/decode.go +++ b/rpc/jsonrpc/client/decode.go @@ -6,18 +6,18 @@ import ( "fmt" tmjson "github.com/tendermint/tendermint/libs/json" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) func unmarshalResponseBytes( responseBytes []byte, - expectedID types.JSONRPCIntID, + expectedID rpctypes.JSONRPCIntID, result interface{}, ) (interface{}, error) { // Read response. If rpc/core/types is imported, the result will unmarshal // into the correct type. - response := &types.RPCResponse{} + response := &rpctypes.RPCResponse{} if err := json.Unmarshal(responseBytes, response); err != nil { return nil, fmt.Errorf("error unmarshaling: %w", err) } @@ -40,12 +40,12 @@ func unmarshalResponseBytes( func unmarshalResponseBytesArray( responseBytes []byte, - expectedIDs []types.JSONRPCIntID, + expectedIDs []rpctypes.JSONRPCIntID, results []interface{}, ) ([]interface{}, error) { var ( - responses []types.RPCResponse + responses []rpctypes.RPCResponse ) if err := json.Unmarshal(responseBytes, &responses); err != nil { @@ -64,10 +64,10 @@ func unmarshalResponseBytesArray( } // Intersect IDs from responses with expectedIDs. - ids := make([]types.JSONRPCIntID, len(responses)) + ids := make([]rpctypes.JSONRPCIntID, len(responses)) var ok bool for i, resp := range responses { - ids[i], ok = resp.ID.(types.JSONRPCIntID) + ids[i], ok = resp.ID.(rpctypes.JSONRPCIntID) if !ok { return nil, fmt.Errorf("expected JSONRPCIntID, got %T", resp.ID) } @@ -85,8 +85,8 @@ func unmarshalResponseBytesArray( return results, nil } -func validateResponseIDs(ids, expectedIDs []types.JSONRPCIntID) error { - m := make(map[types.JSONRPCIntID]bool, len(expectedIDs)) +func validateResponseIDs(ids, expectedIDs []rpctypes.JSONRPCIntID) error { + m := make(map[rpctypes.JSONRPCIntID]bool, len(expectedIDs)) for _, expectedID := range expectedIDs { m[expectedID] = true } @@ -104,11 +104,11 @@ func validateResponseIDs(ids, expectedIDs []types.JSONRPCIntID) error { // From the JSON-RPC 2.0 spec: // id: It MUST be the same as the value of the id member in the Request Object. -func validateAndVerifyID(res *types.RPCResponse, expectedID types.JSONRPCIntID) error { +func validateAndVerifyID(res *rpctypes.RPCResponse, expectedID rpctypes.JSONRPCIntID) error { if err := validateResponseID(res.ID); err != nil { return err } - if expectedID != res.ID.(types.JSONRPCIntID) { // validateResponseID ensured res.ID has the right type + if expectedID != res.ID.(rpctypes.JSONRPCIntID) { // validateResponseID ensured res.ID has the right type return fmt.Errorf("response ID (%d) does not match request ID (%d)", res.ID, expectedID) } return nil @@ -118,7 +118,7 @@ func validateResponseID(id interface{}) error { if id == nil { return errors.New("no ID") } - _, ok := id.(types.JSONRPCIntID) + _, ok := id.(rpctypes.JSONRPCIntID) if !ok { return fmt.Errorf("expected JSONRPCIntID, but got: %T", id) } diff --git a/rpc/jsonrpc/client/http_json_client.go b/rpc/jsonrpc/client/http_json_client.go index 71c00137b..03fc19be4 100644 --- a/rpc/jsonrpc/client/http_json_client.go +++ b/rpc/jsonrpc/client/http_json_client.go @@ -4,8 +4,9 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -13,7 +14,7 @@ import ( "time" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) const ( @@ -155,7 +156,7 @@ func New(remote string) (*Client, error) { // panics when client is nil. func NewWithHTTPClient(remote string, c *http.Client) (*Client, error) { if c == nil { - panic("nil http.Client") + return nil, errors.New("nil client") } parsedURL, err := newParsedURL(remote) @@ -189,7 +190,7 @@ func (c *Client) Call( ) (interface{}, error) { id := c.nextRequestID() - request, err := types.MapToRequest(id, method, params) + request, err := rpctypes.MapToRequest(id, method, params) if err != nil { return nil, fmt.Errorf("failed to encode params: %w", err) } @@ -218,7 +219,7 @@ func (c *Client) Call( defer httpResponse.Body.Close() - responseBytes, err := ioutil.ReadAll(httpResponse.Body) + responseBytes, err := io.ReadAll(httpResponse.Body) if err != nil { return nil, fmt.Errorf("failed to read response body: %w", err) } @@ -235,7 +236,7 @@ func (c *Client) NewRequestBatch() *RequestBatch { } func (c *Client) sendBatch(ctx context.Context, requests []*jsonRPCBufferedRequest) ([]interface{}, error) { - reqs := make([]types.RPCRequest, 0, len(requests)) + reqs := make([]rpctypes.RPCRequest, 0, len(requests)) results := make([]interface{}, 0, len(requests)) for _, req := range requests { reqs = append(reqs, req.request) @@ -266,26 +267,26 @@ func (c *Client) sendBatch(ctx context.Context, requests []*jsonRPCBufferedReque defer httpResponse.Body.Close() - responseBytes, err := ioutil.ReadAll(httpResponse.Body) + responseBytes, err := io.ReadAll(httpResponse.Body) if err != nil { return nil, fmt.Errorf("read response body: %w", err) } // collect ids to check responses IDs in unmarshalResponseBytesArray - ids := make([]types.JSONRPCIntID, len(requests)) + ids := make([]rpctypes.JSONRPCIntID, len(requests)) for i, req := range requests { - ids[i] = req.request.ID.(types.JSONRPCIntID) + ids[i] = req.request.ID.(rpctypes.JSONRPCIntID) } return unmarshalResponseBytesArray(responseBytes, ids, results) } -func (c *Client) nextRequestID() types.JSONRPCIntID { +func (c *Client) nextRequestID() rpctypes.JSONRPCIntID { c.mtx.Lock() id := c.nextReqID c.nextReqID++ c.mtx.Unlock() - return types.JSONRPCIntID(id) + return rpctypes.JSONRPCIntID(id) } //------------------------------------------------------------------------------------ @@ -293,7 +294,7 @@ func (c *Client) nextRequestID() types.JSONRPCIntID { // jsonRPCBufferedRequest encapsulates a single buffered request, as well as its // anticipated response structure. type jsonRPCBufferedRequest struct { - request types.RPCRequest + request rpctypes.RPCRequest result interface{} // The result will be deserialized into this object. } @@ -354,7 +355,7 @@ func (b *RequestBatch) Call( result interface{}, ) (interface{}, error) { id := b.client.nextRequestID() - request, err := types.MapToRequest(id, method, params) + request, err := rpctypes.MapToRequest(id, method, params) if err != nil { return nil, err } diff --git a/rpc/jsonrpc/client/http_json_client_test.go b/rpc/jsonrpc/client/http_json_client_test.go index 5a03af512..fd433d458 100644 --- a/rpc/jsonrpc/client/http_json_client_test.go +++ b/rpc/jsonrpc/client/http_json_client_test.go @@ -1,7 +1,7 @@ package client import ( - "io/ioutil" + "io" "log" "net/http" "net/http/httptest" @@ -21,7 +21,7 @@ func TestHTTPClientMakeHTTPDialer(t *testing.T) { defer tsTLS.Close() // This silences a TLS handshake error, caused by the dialer just immediately // disconnecting, which we can just ignore. - tsTLS.Config.ErrorLog = log.New(ioutil.Discard, "", 0) + tsTLS.Config.ErrorLog = log.New(io.Discard, "", 0) for _, testURL := range []string{ts.URL, tsTLS.URL} { u, err := newParsedURL(testURL) diff --git a/rpc/jsonrpc/client/http_uri_client.go b/rpc/jsonrpc/client/http_uri_client.go index 3f376ddb0..061622942 100644 --- a/rpc/jsonrpc/client/http_uri_client.go +++ b/rpc/jsonrpc/client/http_uri_client.go @@ -3,16 +3,16 @@ package client import ( "context" "fmt" - "io/ioutil" + "io" "net/http" "strings" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) const ( // URIClientRequestID in a request ID used by URIClient - URIClientRequestID = types.JSONRPCIntID(-1) + URIClientRequestID = rpctypes.JSONRPCIntID(-1) ) // URIClient is a JSON-RPC client, which sends POST form HTTP requests to the @@ -76,7 +76,7 @@ func (c *URIClient) Call(ctx context.Context, method string, } defer resp.Body.Close() - responseBytes, err := ioutil.ReadAll(resp.Body) + responseBytes, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("read response body: %w", err) } diff --git a/rpc/jsonrpc/client/integration_test.go b/rpc/jsonrpc/client/integration_test.go index 228bbb460..26f24d255 100644 --- a/rpc/jsonrpc/client/integration_test.go +++ b/rpc/jsonrpc/client/integration_test.go @@ -1,3 +1,4 @@ +//go:build release // +build release // The code in here is comprehensive as an integration diff --git a/rpc/jsonrpc/client/ws_client.go b/rpc/jsonrpc/client/ws_client.go index f47186429..8d8f9d18d 100644 --- a/rpc/jsonrpc/client/ws_client.go +++ b/rpc/jsonrpc/client/ws_client.go @@ -14,8 +14,8 @@ import ( metrics "github.com/rcrowley/go-metrics" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/service" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + tmclient "github.com/tendermint/tendermint/rpc/client" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // WSOptions for WSClient. @@ -41,6 +41,7 @@ func DefaultWSOptions() WSOptions { // // WSClient is safe for concurrent use by multiple goroutines. type WSClient struct { // nolint: maligned + *tmclient.RunState conn *websocket.Conn Address string // IP:PORT or /path/to/socket @@ -49,16 +50,16 @@ type WSClient struct { // nolint: maligned // Single user facing channel to read RPCResponses from, closed only when the // client is being stopped. - ResponsesCh chan types.RPCResponse + ResponsesCh chan rpctypes.RPCResponse // Callback, which will be called each time after successful reconnect. onReconnect func() // internal channels - send chan types.RPCRequest // user requests - backlog chan types.RPCRequest // stores a single user request received during a conn failure - reconnectAfter chan error // reconnect requests - readRoutineQuit chan struct{} // a way for readRoutine to close writeRoutine + send chan rpctypes.RPCRequest // user requests + backlog chan rpctypes.RPCRequest // stores a single user request received during a conn failure + reconnectAfter chan error // reconnect requests + readRoutineQuit chan struct{} // a way for readRoutine to close writeRoutine // Maximum reconnect attempts (0 or greater; default: 25). maxReconnectAttempts uint @@ -83,8 +84,6 @@ type WSClient struct { // nolint: maligned // Send pings to server with this period. Must be less than readWait. If 0, no pings will be sent. pingPeriod time.Duration - service.BaseService - // Time between sending a ping and receiving a pong. See // https://godoc.org/github.com/rcrowley/go-metrics#Timer. PingPongLatencyTimer metrics.Timer @@ -114,6 +113,7 @@ func NewWSWithOptions(remoteAddr, endpoint string, opts WSOptions) (*WSClient, e } c := &WSClient{ + RunState: tmclient.NewRunState("WSClient", nil), Address: parsedURL.GetTrimmedHostWithPath(), Dialer: dialFn, Endpoint: endpoint, @@ -127,7 +127,6 @@ func NewWSWithOptions(remoteAddr, endpoint string, opts WSOptions) (*WSClient, e // sentIDs: make(map[types.JSONRPCIntID]bool), } - c.BaseService = *service.NewBaseService(nil, "WSClient", c) return c, nil } @@ -143,23 +142,25 @@ func (c *WSClient) String() string { return fmt.Sprintf("WSClient{%s (%s)}", c.Address, c.Endpoint) } -// OnStart implements service.Service by dialing a server and creating read and -// write routines. -func (c *WSClient) OnStart() error { +// Start dials the specified service address and starts the I/O routines. +func (c *WSClient) Start() error { + if err := c.RunState.Start(); err != nil { + return err + } err := c.dial() if err != nil { return err } - c.ResponsesCh = make(chan types.RPCResponse) + c.ResponsesCh = make(chan rpctypes.RPCResponse) - c.send = make(chan types.RPCRequest) + c.send = make(chan rpctypes.RPCRequest) // 1 additional error may come from the read/write // goroutine depending on which failed first. c.reconnectAfter = make(chan error, 1) // capacity for 1 request. a user won't be able to send more because the send // channel is unbuffered. - c.backlog = make(chan types.RPCRequest, 1) + c.backlog = make(chan rpctypes.RPCRequest, 1) c.startReadWriteRoutines() go c.reconnectRoutine() @@ -167,10 +168,9 @@ func (c *WSClient) OnStart() error { return nil } -// Stop overrides service.Service#Stop. There is no other way to wait until Quit -// channel is closed. +// Stop shuts down the client. func (c *WSClient) Stop() error { - if err := c.BaseService.Stop(); err != nil { + if err := c.RunState.Stop(); err != nil { return err } // only close user-facing channels when we can't write to them @@ -195,7 +195,7 @@ func (c *WSClient) IsActive() bool { // Send the given RPC request to the server. Results will be available on // ResponsesCh, errors, if any, on ErrorsCh. Will block until send succeeds or // ctx.Done is closed. -func (c *WSClient) Send(ctx context.Context, request types.RPCRequest) error { +func (c *WSClient) Send(ctx context.Context, request rpctypes.RPCRequest) error { select { case c.send <- request: c.Logger.Info("sent a request", "req", request) @@ -210,7 +210,7 @@ func (c *WSClient) Send(ctx context.Context, request types.RPCRequest) error { // Call enqueues a call request onto the Send queue. Requests are JSON encoded. func (c *WSClient) Call(ctx context.Context, method string, params map[string]interface{}) error { - request, err := types.MapToRequest(c.nextRequestID(), method, params) + request, err := rpctypes.MapToRequest(c.nextRequestID(), method, params) if err != nil { return err } @@ -220,7 +220,7 @@ func (c *WSClient) Call(ctx context.Context, method string, params map[string]in // CallWithArrayParams enqueues a call request onto the Send queue. Params are // in a form of array (e.g. []interface{}{"abcd"}). Requests are JSON encoded. func (c *WSClient) CallWithArrayParams(ctx context.Context, method string, params []interface{}) error { - request, err := types.ArrayToRequest(c.nextRequestID(), method, params) + request, err := rpctypes.ArrayToRequest(c.nextRequestID(), method, params) if err != nil { return err } @@ -229,12 +229,12 @@ func (c *WSClient) CallWithArrayParams(ctx context.Context, method string, param // Private methods -func (c *WSClient) nextRequestID() types.JSONRPCIntID { +func (c *WSClient) nextRequestID() rpctypes.JSONRPCIntID { c.mtx.Lock() id := c.nextReqID c.nextReqID++ c.mtx.Unlock() - return types.JSONRPCIntID(id) + return rpctypes.JSONRPCIntID(id) } func (c *WSClient) dial() error { @@ -462,7 +462,7 @@ func (c *WSClient) readRoutine() { return } - var response types.RPCResponse + var response rpctypes.RPCResponse err = json.Unmarshal(data, &response) if err != nil { c.Logger.Error("failed to parse response", "err", err, "data", string(data)) diff --git a/rpc/jsonrpc/client/ws_client_test.go b/rpc/jsonrpc/client/ws_client_test.go index bb8c149f6..208313e79 100644 --- a/rpc/jsonrpc/client/ws_client_test.go +++ b/rpc/jsonrpc/client/ws_client_test.go @@ -14,7 +14,7 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) var wsCallTimeout = 5 * time.Second @@ -41,7 +41,7 @@ func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } - var req types.RPCRequest + var req rpctypes.RPCRequest err = json.Unmarshal(in, &req) if err != nil { panic(err) @@ -56,7 +56,7 @@ func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.mtx.RUnlock() res := json.RawMessage(`{}`) - emptyRespBytes, _ := json.Marshal(types.RPCResponse{Result: res, ID: req.ID}) + emptyRespBytes, _ := json.Marshal(rpctypes.RPCResponse{Result: res, ID: req.ID}) if err := conn.WriteMessage(messageType, emptyRespBytes); err != nil { return } diff --git a/rpc/jsonrpc/jsonrpc_test.go b/rpc/jsonrpc/jsonrpc_test.go index b5e422280..5013590b6 100644 --- a/rpc/jsonrpc/jsonrpc_test.go +++ b/rpc/jsonrpc/jsonrpc_test.go @@ -18,9 +18,9 @@ import ( tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" - client "github.com/tendermint/tendermint/rpc/jsonrpc/client" - server "github.com/tendermint/tendermint/rpc/jsonrpc/server" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + "github.com/tendermint/tendermint/rpc/jsonrpc/client" + "github.com/tendermint/tendermint/rpc/jsonrpc/server" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // Client and Server should work over tcp or unix sockets @@ -64,23 +64,23 @@ var Routes = map[string]*server.RPCFunc{ "echo_int": server.NewRPCFunc(EchoIntResult, "arg", false), } -func EchoResult(ctx *types.Context, v string) (*ResultEcho, error) { +func EchoResult(ctx *rpctypes.Context, v string) (*ResultEcho, error) { return &ResultEcho{v}, nil } -func EchoWSResult(ctx *types.Context, v string) (*ResultEcho, error) { +func EchoWSResult(ctx *rpctypes.Context, v string) (*ResultEcho, error) { return &ResultEcho{v}, nil } -func EchoIntResult(ctx *types.Context, v int) (*ResultEchoInt, error) { +func EchoIntResult(ctx *rpctypes.Context, v int) (*ResultEchoInt, error) { return &ResultEchoInt{v}, nil } -func EchoBytesResult(ctx *types.Context, v []byte) (*ResultEchoBytes, error) { +func EchoBytesResult(ctx *rpctypes.Context, v []byte) (*ResultEchoBytes, error) { return &ResultEchoBytes{v}, nil } -func EchoDataBytesResult(ctx *types.Context, v tmbytes.HexBytes) (*ResultEchoDataBytes, error) { +func EchoDataBytesResult(ctx *rpctypes.Context, v tmbytes.HexBytes) (*ResultEchoDataBytes, error) { return &ResultEchoDataBytes{v}, nil } diff --git a/rpc/jsonrpc/server/http_json_handler.go b/rpc/jsonrpc/server/http_json_handler.go index bbb32b407..dabeee074 100644 --- a/rpc/jsonrpc/server/http_json_handler.go +++ b/rpc/jsonrpc/server/http_json_handler.go @@ -5,15 +5,15 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net/http" "reflect" "sort" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - "github.com/tendermint/tendermint/rpc/jsonrpc/types" + "github.com/tendermint/tendermint/rpc/coretypes" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // HTTP + JSON handler @@ -21,9 +21,9 @@ import ( // jsonrpc calls grab the given method's function info and runs reflect.Call func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - b, err := ioutil.ReadAll(r.Body) + b, err := io.ReadAll(r.Body) if err != nil { - res := types.RPCInvalidRequestError(nil, + res := rpctypes.RPCInvalidRequestError(nil, fmt.Errorf("error reading request body: %w", err), ) if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { @@ -41,20 +41,20 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han // first try to unmarshal the incoming request as an array of RPC requests var ( - requests []types.RPCRequest - responses []types.RPCResponse + requests []rpctypes.RPCRequest + responses []rpctypes.RPCResponse ) if err := json.Unmarshal(b, &requests); err != nil { // next, try to unmarshal as a single request - var request types.RPCRequest + var request rpctypes.RPCRequest if err := json.Unmarshal(b, &request); err != nil { - res := types.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err)) + res := rpctypes.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err)) if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { logger.Error("failed to write response", "res", res, "err", wErr) } return } - requests = []types.RPCRequest{request} + requests = []rpctypes.RPCRequest{request} } // Set the default response cache to true unless @@ -77,25 +77,25 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han if len(r.URL.Path) > 1 { responses = append( responses, - types.RPCInvalidRequestError(request.ID, fmt.Errorf("path %s is invalid", r.URL.Path)), + rpctypes.RPCInvalidRequestError(request.ID, fmt.Errorf("path %s is invalid", r.URL.Path)), ) c = false continue } rpcFunc, ok := funcMap[request.Method] if !ok || rpcFunc.ws { - responses = append(responses, types.RPCMethodNotFoundError(request.ID)) + responses = append(responses, rpctypes.RPCMethodNotFoundError(request.ID)) c = false continue } - ctx := &types.Context{JSONReq: &request, HTTPReq: r} + ctx := &rpctypes.Context{JSONReq: &request, HTTPReq: r} args := []reflect.Value{reflect.ValueOf(ctx)} if len(request.Params) > 0 { fnArgs, err := jsonParamsToArgs(rpcFunc, request.Params) if err != nil { responses = append( responses, - types.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)), + rpctypes.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)), ) c = false continue @@ -114,22 +114,22 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han switch e := err.(type) { // if no error then return a success response case nil: - responses = append(responses, types.NewRPCSuccessResponse(request.ID, result)) + responses = append(responses, rpctypes.NewRPCSuccessResponse(request.ID, result)) // if this already of type RPC error then forward that error - case *types.RPCError: - responses = append(responses, types.NewRPCErrorResponse(request.ID, e.Code, e.Message, e.Data)) + case *rpctypes.RPCError: + responses = append(responses, rpctypes.NewRPCErrorResponse(request.ID, e.Code, e.Message, e.Data)) c = false default: // we need to unwrap the error and parse it accordingly switch errors.Unwrap(err) { // check if the error was due to an invald request - case ctypes.ErrZeroOrNegativeHeight, ctypes.ErrZeroOrNegativePerPage, - ctypes.ErrPageOutOfRange, ctypes.ErrInvalidRequest: - responses = append(responses, types.RPCInvalidRequestError(request.ID, err)) + case coretypes.ErrZeroOrNegativeHeight, coretypes.ErrZeroOrNegativePerPage, + coretypes.ErrPageOutOfRange, coretypes.ErrInvalidRequest: + responses = append(responses, rpctypes.RPCInvalidRequestError(request.ID, err)) c = false // lastly default all remaining errors as internal errors default: // includes ctypes.ErrHeightNotAvailable and ctypes.ErrHeightExceedsChainHead - responses = append(responses, types.RPCInternalError(request.ID, err)) + responses = append(responses, rpctypes.RPCInternalError(request.ID, err)) c = false } } @@ -277,7 +277,7 @@ func writeListOfEndpoints(w http.ResponseWriter, r *http.Request, funcMap map[st w.Write(buf.Bytes()) // nolint: errcheck } -func hasDefaultHeight(r types.RPCRequest, h []reflect.Value) bool { +func hasDefaultHeight(r rpctypes.RPCRequest, h []reflect.Value) bool { switch r.Method { case "block", "block_results", "commit", "consensus_params", "validators": return len(h) < 2 || h[1].IsZero() diff --git a/rpc/jsonrpc/server/http_json_handler_test.go b/rpc/jsonrpc/server/http_json_handler_test.go index 529f7619c..94c241ca0 100644 --- a/rpc/jsonrpc/server/http_json_handler_test.go +++ b/rpc/jsonrpc/server/http_json_handler_test.go @@ -2,7 +2,7 @@ package server import ( "encoding/json" - "io/ioutil" + "io" "net/http" "net/http/httptest" "strings" @@ -12,13 +12,13 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/log" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) func testMux() *http.ServeMux { funcMap := map[string]*RPCFunc{ - "c": NewRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i", false), - "block": NewRPCFunc(func(ctx *types.Context, h int) (string, error) { return "block", nil }, "height", true), + "c": NewRPCFunc(func(ctx *rpctypes.Context, s string, i int) (string, error) { return "foo", nil }, "s,i", false), + "block": NewRPCFunc(func(ctx *rpctypes.Context, h int) (string, error) { return "block", nil }, "height", true), } mux := http.NewServeMux() logger := log.NewNopLogger() @@ -40,21 +40,21 @@ func TestRPCParams(t *testing.T) { expectedID interface{} }{ // bad - {`{"jsonrpc": "2.0", "id": "0"}`, "Method not found", types.JSONRPCStringID("0")}, - {`{"jsonrpc": "2.0", "method": "y", "id": "0"}`, "Method not found", types.JSONRPCStringID("0")}, + {`{"jsonrpc": "2.0", "id": "0"}`, "Method not found", rpctypes.JSONRPCStringID("0")}, + {`{"jsonrpc": "2.0", "method": "y", "id": "0"}`, "Method not found", rpctypes.JSONRPCStringID("0")}, // id not captured in JSON parsing failures {`{"method": "c", "id": "0", "params": a}`, "invalid character", nil}, - {`{"method": "c", "id": "0", "params": ["a"]}`, "got 1", types.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": ["a", "b"]}`, "invalid character", types.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": [1, 1]}`, "of type string", types.JSONRPCStringID("0")}, + {`{"method": "c", "id": "0", "params": ["a"]}`, "got 1", rpctypes.JSONRPCStringID("0")}, + {`{"method": "c", "id": "0", "params": ["a", "b"]}`, "invalid character", rpctypes.JSONRPCStringID("0")}, + {`{"method": "c", "id": "0", "params": [1, 1]}`, "of type string", rpctypes.JSONRPCStringID("0")}, // no ID - notification // {`{"jsonrpc": "2.0", "method": "c", "params": ["a", "10"]}`, false, nil}, // good - {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": null}`, "", types.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": {}}`, "", types.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": ["a", "10"]}`, "", types.JSONRPCStringID("0")}, + {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": null}`, "", rpctypes.JSONRPCStringID("0")}, + {`{"method": "c", "id": "0", "params": {}}`, "", rpctypes.JSONRPCStringID("0")}, + {`{"method": "c", "id": "0", "params": ["a", "10"]}`, "", rpctypes.JSONRPCStringID("0")}, } for i, tt := range tests { @@ -65,15 +65,15 @@ func TestRPCParams(t *testing.T) { defer res.Body.Close() // Always expecting back a JSONRPCResponse assert.NotZero(t, res.StatusCode, "#%d: should always return code", i) - blob, err := ioutil.ReadAll(res.Body) + blob, err := io.ReadAll(res.Body) if err != nil { t.Errorf("#%d: err reading body: %v", i, err) continue } - recv := new(types.RPCResponse) + recv := new(rpctypes.RPCResponse) assert.Nil(t, json.Unmarshal(blob, recv), "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) - assert.NotEqual(t, recv, new(types.RPCResponse), "#%d: not expecting a blank RPCResponse", i) + assert.NotEqual(t, recv, new(rpctypes.RPCResponse), "#%d: not expecting a blank RPCResponse", i) assert.Equal(t, tt.expectedID, recv.ID, "#%d: expected ID not matched in RPCResponse", i) if tt.wantErr == "" { assert.Nil(t, recv.Error, "#%d: not expecting an error", i) @@ -93,12 +93,12 @@ func TestJSONRPCID(t *testing.T) { expectedID interface{} }{ // good id - {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": ["a", "10"]}`, false, types.JSONRPCStringID("0")}, - {`{"jsonrpc": "2.0", "method": "c", "id": "abc", "params": ["a", "10"]}`, false, types.JSONRPCStringID("abc")}, - {`{"jsonrpc": "2.0", "method": "c", "id": 0, "params": ["a", "10"]}`, false, types.JSONRPCIntID(0)}, - {`{"jsonrpc": "2.0", "method": "c", "id": 1, "params": ["a", "10"]}`, false, types.JSONRPCIntID(1)}, - {`{"jsonrpc": "2.0", "method": "c", "id": 1.3, "params": ["a", "10"]}`, false, types.JSONRPCIntID(1)}, - {`{"jsonrpc": "2.0", "method": "c", "id": -1, "params": ["a", "10"]}`, false, types.JSONRPCIntID(-1)}, + {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": ["a", "10"]}`, false, rpctypes.JSONRPCStringID("0")}, + {`{"jsonrpc": "2.0", "method": "c", "id": "abc", "params": ["a", "10"]}`, false, rpctypes.JSONRPCStringID("abc")}, + {`{"jsonrpc": "2.0", "method": "c", "id": 0, "params": ["a", "10"]}`, false, rpctypes.JSONRPCIntID(0)}, + {`{"jsonrpc": "2.0", "method": "c", "id": 1, "params": ["a", "10"]}`, false, rpctypes.JSONRPCIntID(1)}, + {`{"jsonrpc": "2.0", "method": "c", "id": 1.3, "params": ["a", "10"]}`, false, rpctypes.JSONRPCIntID(1)}, + {`{"jsonrpc": "2.0", "method": "c", "id": -1, "params": ["a", "10"]}`, false, rpctypes.JSONRPCIntID(-1)}, // bad id {`{"jsonrpc": "2.0", "method": "c", "id": {}, "params": ["a", "10"]}`, true, nil}, @@ -112,18 +112,18 @@ func TestJSONRPCID(t *testing.T) { res := rec.Result() // Always expecting back a JSONRPCResponse assert.NotZero(t, res.StatusCode, "#%d: should always return code", i) - blob, err := ioutil.ReadAll(res.Body) + blob, err := io.ReadAll(res.Body) if err != nil { t.Errorf("#%d: err reading body: %v", i, err) continue } res.Body.Close() - recv := new(types.RPCResponse) + recv := new(rpctypes.RPCResponse) err = json.Unmarshal(blob, recv) assert.Nil(t, err, "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) if !tt.wantErr { - assert.NotEqual(t, recv, new(types.RPCResponse), "#%d: not expecting a blank RPCResponse", i) + assert.NotEqual(t, recv, new(rpctypes.RPCResponse), "#%d: not expecting a blank RPCResponse", i) assert.Equal(t, tt.expectedID, recv.ID, "#%d: expected ID not matched in RPCResponse", i) assert.Nil(t, recv.Error, "#%d: not expecting an error", i) } else { @@ -142,7 +142,7 @@ func TestRPCNotification(t *testing.T) { // Always expecting back a JSONRPCResponse require.True(t, statusOK(res.StatusCode), "should always return 2XX") - blob, err := ioutil.ReadAll(res.Body) + blob, err := io.ReadAll(res.Body) res.Body.Close() require.Nil(t, err, "reading from the body should not give back an error") require.Equal(t, len(blob), 0, "a notification SHOULD NOT be responded to by the server") @@ -178,14 +178,14 @@ func TestRPCNotificationInBatch(t *testing.T) { res := rec.Result() // Always expecting back a JSONRPCResponse assert.True(t, statusOK(res.StatusCode), "#%d: should always return 2XX", i) - blob, err := ioutil.ReadAll(res.Body) + blob, err := io.ReadAll(res.Body) if err != nil { t.Errorf("#%d: err reading body: %v", i, err) continue } res.Body.Close() - var responses []types.RPCResponse + var responses []rpctypes.RPCResponse // try to unmarshal an array first err = json.Unmarshal(blob, &responses) if err != nil { @@ -195,14 +195,14 @@ func TestRPCNotificationInBatch(t *testing.T) { continue } else { // we were expecting an error here, so let's unmarshal a single response - var response types.RPCResponse + var response rpctypes.RPCResponse err = json.Unmarshal(blob, &response) if err != nil { t.Errorf("#%d: expected successful parsing of an RPCResponse\nblob: %s", i, blob) continue } // have a single-element result - responses = []types.RPCResponse{response} + responses = []rpctypes.RPCResponse{response} } } if tt.expectCount != len(responses) { @@ -210,7 +210,7 @@ func TestRPCNotificationInBatch(t *testing.T) { continue } for _, response := range responses { - assert.NotEqual(t, response, new(types.RPCResponse), "#%d: not expecting a blank RPCResponse", i) + assert.NotEqual(t, response, new(rpctypes.RPCResponse), "#%d: not expecting a blank RPCResponse", i) } } } @@ -239,7 +239,7 @@ func TestRPCResponseCache(t *testing.T) { require.True(t, statusOK(res.StatusCode), "should always return 2XX") require.Equal(t, "max-age=31536000", res.Header.Get("Cache-control")) - _, err := ioutil.ReadAll(res.Body) + _, err := io.ReadAll(res.Body) res.Body.Close() require.Nil(t, err, "reading from the body should not give back an error") @@ -254,7 +254,7 @@ func TestRPCResponseCache(t *testing.T) { require.True(t, statusOK(res.StatusCode), "should always return 2XX") require.Equal(t, "", res.Header.Get("Cache-control")) - _, err = ioutil.ReadAll(res.Body) + _, err = io.ReadAll(res.Body) res.Body.Close() require.Nil(t, err, "reading from the body should not give back an error") } diff --git a/rpc/jsonrpc/server/http_server.go b/rpc/jsonrpc/server/http_server.go index 549671241..49e1e510e 100644 --- a/rpc/jsonrpc/server/http_server.go +++ b/rpc/jsonrpc/server/http_server.go @@ -16,7 +16,7 @@ import ( "golang.org/x/net/netutil" "github.com/tendermint/tendermint/libs/log" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // Config is a RPC server configuration. @@ -105,7 +105,7 @@ func ServeTLS( // source: https://www.jsonrpc.org/historical/json-rpc-over-http.html func WriteRPCResponseHTTPError( w http.ResponseWriter, - res types.RPCResponse, + res rpctypes.RPCResponse, ) error { if res.Error == nil { panic("tried to write http error response without RPC error") @@ -134,7 +134,7 @@ func WriteRPCResponseHTTPError( // WriteRPCResponseHTTP marshals res as JSON (with indent) and writes it to w. // If the rpc response can be cached, add cache-control to the response header. -func WriteRPCResponseHTTP(w http.ResponseWriter, c bool, res ...types.RPCResponse) error { +func WriteRPCResponseHTTP(w http.ResponseWriter, c bool, res ...rpctypes.RPCResponse) error { var v interface{} if len(res) == 1 { v = res[0] @@ -189,7 +189,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler if e := recover(); e != nil { // If RPCResponse - if res, ok := e.(types.RPCResponse); ok { + if res, ok := e.(rpctypes.RPCResponse); ok { if wErr := WriteRPCResponseHTTP(rww, false, res); wErr != nil { logger.Error("failed to write response", "res", res, "err", wErr) } @@ -208,7 +208,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler logger.Error("panic in RPC HTTP handler", "err", e, "stack", string(debug.Stack())) - res := types.RPCInternalError(types.JSONRPCIntID(-1), err) + res := rpctypes.RPCInternalError(rpctypes.JSONRPCIntID(-1), err) if wErr := WriteRPCResponseHTTPError(rww, res); wErr != nil { logger.Error("failed to write response", "res", res, "err", wErr) } diff --git a/rpc/jsonrpc/server/http_server_test.go b/rpc/jsonrpc/server/http_server_test.go index 823719e41..ff2776bb4 100644 --- a/rpc/jsonrpc/server/http_server_test.go +++ b/rpc/jsonrpc/server/http_server_test.go @@ -4,7 +4,7 @@ import ( "crypto/tls" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/http/httptest" @@ -17,7 +17,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/log" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) type sampleResult struct { @@ -101,20 +101,20 @@ func TestServeTLS(t *testing.T) { defer res.Body.Close() assert.Equal(t, http.StatusOK, res.StatusCode) - body, err := ioutil.ReadAll(res.Body) + body, err := io.ReadAll(res.Body) require.NoError(t, err) assert.Equal(t, []byte("some body"), body) } func TestWriteRPCResponseHTTP(t *testing.T) { - id := types.JSONRPCIntID(-1) + id := rpctypes.JSONRPCIntID(-1) // one argument w := httptest.NewRecorder() - err := WriteRPCResponseHTTP(w, true, types.NewRPCSuccessResponse(id, &sampleResult{"hello"})) + err := WriteRPCResponseHTTP(w, true, rpctypes.NewRPCSuccessResponse(id, &sampleResult{"hello"})) require.NoError(t, err) resp := w.Result() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) _ = resp.Body.Close() require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) @@ -132,11 +132,11 @@ func TestWriteRPCResponseHTTP(t *testing.T) { w = httptest.NewRecorder() err = WriteRPCResponseHTTP(w, false, - types.NewRPCSuccessResponse(id, &sampleResult{"hello"}), - types.NewRPCSuccessResponse(id, &sampleResult{"world"})) + rpctypes.NewRPCSuccessResponse(id, &sampleResult{"hello"}), + rpctypes.NewRPCSuccessResponse(id, &sampleResult{"world"})) require.NoError(t, err) resp = w.Result() - body, err = ioutil.ReadAll(resp.Body) + body, err = io.ReadAll(resp.Body) _ = resp.Body.Close() require.NoError(t, err) @@ -162,10 +162,10 @@ func TestWriteRPCResponseHTTP(t *testing.T) { func TestWriteRPCResponseHTTPError(t *testing.T) { w := httptest.NewRecorder() - err := WriteRPCResponseHTTPError(w, types.RPCInternalError(types.JSONRPCIntID(-1), errors.New("foo"))) + err := WriteRPCResponseHTTPError(w, rpctypes.RPCInternalError(rpctypes.JSONRPCIntID(-1), errors.New("foo"))) require.NoError(t, err) resp := w.Result() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) _ = resp.Body.Close() require.NoError(t, err) assert.Equal(t, http.StatusInternalServerError, resp.StatusCode) diff --git a/rpc/jsonrpc/server/http_uri_handler.go b/rpc/jsonrpc/server/http_uri_handler.go index f5a2ecebc..9fb5c1cde 100644 --- a/rpc/jsonrpc/server/http_uri_handler.go +++ b/rpc/jsonrpc/server/http_uri_handler.go @@ -5,14 +5,15 @@ import ( "errors" "fmt" "net/http" + "net/http/httputil" "reflect" "regexp" "strings" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + "github.com/tendermint/tendermint/rpc/coretypes" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // HTTP + URI handler @@ -22,12 +23,12 @@ var reInt = regexp.MustCompile(`^-?[0-9]+$`) // convert from a function name to the http handler func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWriter, *http.Request) { // Always return -1 as there's no ID here. - dummyID := types.JSONRPCIntID(-1) // URIClientRequestID + dummyID := rpctypes.JSONRPCIntID(-1) // URIClientRequestID // Exception for websocket endpoints if rpcFunc.ws { return func(w http.ResponseWriter, r *http.Request) { - res := types.RPCMethodNotFoundError(dummyID) + res := rpctypes.RPCMethodNotFoundError(dummyID) if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { logger.Error("failed to write response", "res", res, "err", wErr) } @@ -36,14 +37,14 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit // All other endpoints return func(w http.ResponseWriter, r *http.Request) { - logger.Debug("HTTP HANDLER", "req", r) + logger.Debug("HTTP HANDLER", "req", dumpHTTPRequest(r)) - ctx := &types.Context{HTTPReq: r} + ctx := &rpctypes.Context{HTTPReq: r} args := []reflect.Value{reflect.ValueOf(ctx)} fnArgs, err := httpParamsToArgs(rpcFunc, r) if err != nil { - res := types.RPCInvalidParamsError(dummyID, + res := rpctypes.RPCInvalidParamsError(dummyID, fmt.Errorf("error converting http params to arguments: %w", err), ) if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { @@ -60,29 +61,29 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit switch e := err.(type) { // if no error then return a success response case nil: - res := types.NewRPCSuccessResponse(dummyID, result) + res := rpctypes.NewRPCSuccessResponse(dummyID, result) if wErr := WriteRPCResponseHTTP(w, rpcFunc.cache, res); wErr != nil { logger.Error("failed to write response", "res", res, "err", wErr) } // if this already of type RPC error then forward that error. - case *types.RPCError: - res := types.NewRPCErrorResponse(dummyID, e.Code, e.Message, e.Data) + case *rpctypes.RPCError: + res := rpctypes.NewRPCErrorResponse(dummyID, e.Code, e.Message, e.Data) if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { logger.Error("failed to write response", "res", res, "err", wErr) } default: // we need to unwrap the error and parse it accordingly - var res types.RPCResponse + var res rpctypes.RPCResponse switch errors.Unwrap(err) { - case ctypes.ErrZeroOrNegativeHeight, - ctypes.ErrZeroOrNegativePerPage, - ctypes.ErrPageOutOfRange, - ctypes.ErrInvalidRequest: - res = types.RPCInvalidRequestError(dummyID, err) + case coretypes.ErrZeroOrNegativeHeight, + coretypes.ErrZeroOrNegativePerPage, + coretypes.ErrPageOutOfRange, + coretypes.ErrInvalidRequest: + res = rpctypes.RPCInvalidRequestError(dummyID, err) default: // ctypes.ErrHeightNotAvailable, ctypes.ErrHeightExceedsChainHead: - res = types.RPCInternalError(dummyID, err) + res = rpctypes.RPCInternalError(dummyID, err) } if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { @@ -232,3 +233,12 @@ func getParam(r *http.Request, param string) string { } return s } + +func dumpHTTPRequest(r *http.Request) string { + d, e := httputil.DumpRequest(r, true) + if e != nil { + return e.Error() + } + + return string(d) +} diff --git a/rpc/jsonrpc/server/parse_test.go b/rpc/jsonrpc/server/parse_test.go index 86316f8e5..92ea6f2c0 100644 --- a/rpc/jsonrpc/server/parse_test.go +++ b/rpc/jsonrpc/server/parse_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/tendermint/tendermint/libs/bytes" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) func TestParseJSONMap(t *testing.T) { @@ -134,7 +134,7 @@ func TestParseJSONArray(t *testing.T) { } func TestParseJSONRPC(t *testing.T) { - demo := func(ctx *types.Context, height int, name string) {} + demo := func(ctx *rpctypes.Context, height int, name string) {} call := NewRPCFunc(demo, "height,name", false) cases := []struct { @@ -171,7 +171,7 @@ func TestParseJSONRPC(t *testing.T) { } func TestParseURI(t *testing.T) { - demo := func(ctx *types.Context, height int, name string) {} + demo := func(ctx *rpctypes.Context, height int, name string) {} call := NewRPCFunc(demo, "height,name", false) cases := []struct { diff --git a/rpc/jsonrpc/server/ws_handler.go b/rpc/jsonrpc/server/ws_handler.go index a7b77dbd3..2271d03f8 100644 --- a/rpc/jsonrpc/server/ws_handler.go +++ b/rpc/jsonrpc/server/ws_handler.go @@ -13,9 +13,9 @@ import ( "github.com/gorilla/websocket" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + "github.com/tendermint/tendermint/rpc/client" + "github.com/tendermint/tendermint/rpc/coretypes" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // WebSocket handler @@ -86,8 +86,8 @@ func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Requ }() // register connection - con := newWSConnection(wsConn, wm.funcMap, wm.wsConnOptions...) - con.SetLogger(wm.logger.With("remote", wsConn.RemoteAddr())) + logger := wm.logger.With("remote", wsConn.RemoteAddr()) + con := newWSConnection(wsConn, wm.funcMap, logger, wm.wsConnOptions...) wm.logger.Info("New websocket connection", "remote", con.remoteAddr) err = con.Start() // BLOCKING if err != nil { @@ -106,12 +106,12 @@ func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Requ // // In case of an error, the connection is stopped. type wsConnection struct { - service.BaseService + *client.RunState remoteAddr string baseConn *websocket.Conn // writeChan is never closed, to allow WriteRPCResponse() to fail. - writeChan chan types.RPCResponse + writeChan chan rpctypes.RPCResponse // chan, which is closed when/if readRoutine errors // used to abort writeRoutine @@ -150,9 +150,11 @@ type wsConnection struct { func newWSConnection( baseConn *websocket.Conn, funcMap map[string]*RPCFunc, + logger log.Logger, options ...func(*wsConnection), ) *wsConnection { wsc := &wsConnection{ + RunState: client.NewRunState("wsConnection", logger), remoteAddr: baseConn.RemoteAddr().String(), baseConn: baseConn, funcMap: funcMap, @@ -166,7 +168,6 @@ func newWSConnection( option(wsc) } wsc.baseConn.SetReadLimit(wsc.readLimit) - wsc.BaseService = *service.NewBaseService(nil, "wsConnection", wsc) return wsc } @@ -218,10 +219,12 @@ func ReadLimit(readLimit int64) func(*wsConnection) { } } -// OnStart implements service.Service by starting the read and write routines. It -// blocks until there's some error. -func (wsc *wsConnection) OnStart() error { - wsc.writeChan = make(chan types.RPCResponse, wsc.writeChanCapacity) +// Start starts the client service routines and blocks until there is an error. +func (wsc *wsConnection) Start() error { + if err := wsc.RunState.Start(); err != nil { + return err + } + wsc.writeChan = make(chan rpctypes.RPCResponse, wsc.writeChanCapacity) // Read subscriptions/unsubscriptions to events go wsc.readRoutine() @@ -231,16 +234,18 @@ func (wsc *wsConnection) OnStart() error { return nil } -// OnStop implements service.Service by unsubscribing remoteAddr from all -// subscriptions. -func (wsc *wsConnection) OnStop() { +// Stop unsubscribes the remote from all subscriptions. +func (wsc *wsConnection) Stop() error { + if err := wsc.RunState.Stop(); err != nil { + return err + } if wsc.onDisconnect != nil { wsc.onDisconnect(wsc.remoteAddr) } - if wsc.ctx != nil { wsc.cancel() } + return nil } // GetRemoteAddr returns the remote address of the underlying connection. @@ -252,7 +257,7 @@ func (wsc *wsConnection) GetRemoteAddr() string { // WriteRPCResponse pushes a response to the writeChan, and blocks until it is // accepted. // It implements WSRPCConnection. It is Goroutine-safe. -func (wsc *wsConnection) WriteRPCResponse(ctx context.Context, resp types.RPCResponse) error { +func (wsc *wsConnection) WriteRPCResponse(ctx context.Context, resp rpctypes.RPCResponse) error { select { case <-wsc.Quit(): return errors.New("connection was stopped") @@ -266,7 +271,7 @@ func (wsc *wsConnection) WriteRPCResponse(ctx context.Context, resp types.RPCRes // TryWriteRPCResponse attempts to push a response to the writeChan, but does // not block. // It implements WSRPCConnection. It is Goroutine-safe -func (wsc *wsConnection) TryWriteRPCResponse(resp types.RPCResponse) bool { +func (wsc *wsConnection) TryWriteRPCResponse(resp rpctypes.RPCResponse) bool { select { case <-wsc.Quit(): return false @@ -299,7 +304,7 @@ func (wsc *wsConnection) readRoutine() { err = fmt.Errorf("WSJSONRPC: %v", r) } wsc.Logger.Error("Panic in WSJSONRPC handler", "err", err, "stack", string(debug.Stack())) - if err := wsc.WriteRPCResponse(writeCtx, types.RPCInternalError(types.JSONRPCIntID(-1), err)); err != nil { + if err := wsc.WriteRPCResponse(writeCtx, rpctypes.RPCInternalError(rpctypes.JSONRPCIntID(-1), err)); err != nil { wsc.Logger.Error("Error writing RPC response", "err", err) } go wsc.readRoutine() @@ -335,11 +340,11 @@ func (wsc *wsConnection) readRoutine() { } dec := json.NewDecoder(r) - var request types.RPCRequest + var request rpctypes.RPCRequest err = dec.Decode(&request) if err != nil { if err := wsc.WriteRPCResponse(writeCtx, - types.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err))); err != nil { + rpctypes.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err))); err != nil { wsc.Logger.Error("Error writing RPC response", "err", err) } continue @@ -358,19 +363,19 @@ func (wsc *wsConnection) readRoutine() { // Now, fetch the RPCFunc and execute it. rpcFunc := wsc.funcMap[request.Method] if rpcFunc == nil { - if err := wsc.WriteRPCResponse(writeCtx, types.RPCMethodNotFoundError(request.ID)); err != nil { + if err := wsc.WriteRPCResponse(writeCtx, rpctypes.RPCMethodNotFoundError(request.ID)); err != nil { wsc.Logger.Error("Error writing RPC response", "err", err) } continue } - ctx := &types.Context{JSONReq: &request, WSConn: wsc} + ctx := &rpctypes.Context{JSONReq: &request, WSConn: wsc} args := []reflect.Value{reflect.ValueOf(ctx)} if len(request.Params) > 0 { fnArgs, err := jsonParamsToArgs(rpcFunc, request.Params) if err != nil { if err := wsc.WriteRPCResponse(writeCtx, - types.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)), + rpctypes.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)), ); err != nil { wsc.Logger.Error("Error writing RPC response", "err", err) } @@ -384,27 +389,27 @@ func (wsc *wsConnection) readRoutine() { // TODO: Need to encode args/returns to string if we want to log them wsc.Logger.Info("WSJSONRPC", "method", request.Method) - var resp types.RPCResponse + var resp rpctypes.RPCResponse result, err := unreflectResult(returns) switch e := err.(type) { // if no error then return a success response case nil: - resp = types.NewRPCSuccessResponse(request.ID, result) + resp = rpctypes.NewRPCSuccessResponse(request.ID, result) // if this already of type RPC error then forward that error - case *types.RPCError: - resp = types.NewRPCErrorResponse(request.ID, e.Code, e.Message, e.Data) + case *rpctypes.RPCError: + resp = rpctypes.NewRPCErrorResponse(request.ID, e.Code, e.Message, e.Data) default: // we need to unwrap the error and parse it accordingly switch errors.Unwrap(err) { // check if the error was due to an invald request - case ctypes.ErrZeroOrNegativeHeight, ctypes.ErrZeroOrNegativePerPage, - ctypes.ErrPageOutOfRange, ctypes.ErrInvalidRequest: - resp = types.RPCInvalidRequestError(request.ID, err) + case coretypes.ErrZeroOrNegativeHeight, coretypes.ErrZeroOrNegativePerPage, + coretypes.ErrPageOutOfRange, coretypes.ErrInvalidRequest: + resp = rpctypes.RPCInvalidRequestError(request.ID, err) // lastly default all remaining errors as internal errors default: // includes ctypes.ErrHeightNotAvailable and ctypes.ErrHeightExceedsChainHead - resp = types.RPCInternalError(request.ID, err) + resp = rpctypes.RPCInternalError(request.ID, err) } } diff --git a/rpc/jsonrpc/server/ws_handler_test.go b/rpc/jsonrpc/server/ws_handler_test.go index 42a96d1d3..b691172a4 100644 --- a/rpc/jsonrpc/server/ws_handler_test.go +++ b/rpc/jsonrpc/server/ws_handler_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/log" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) func TestWebsocketManagerHandler(t *testing.T) { @@ -26,8 +26,8 @@ func TestWebsocketManagerHandler(t *testing.T) { } // check basic functionality works - req, err := types.MapToRequest( - types.JSONRPCStringID("TestWebsocketManager"), + req, err := rpctypes.MapToRequest( + rpctypes.JSONRPCStringID("TestWebsocketManager"), "c", map[string]interface{}{"s": "a", "i": 10}, ) @@ -35,7 +35,7 @@ func TestWebsocketManagerHandler(t *testing.T) { err = c.WriteJSON(req) require.NoError(t, err) - var resp types.RPCResponse + var resp rpctypes.RPCResponse err = c.ReadJSON(&resp) require.NoError(t, err) require.Nil(t, resp.Error) @@ -44,7 +44,7 @@ func TestWebsocketManagerHandler(t *testing.T) { func newWSServer() *httptest.Server { funcMap := map[string]*RPCFunc{ - "c": NewWSRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), + "c": NewWSRPCFunc(func(ctx *rpctypes.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), } wm := NewWebsocketManager(funcMap) wm.SetLogger(log.TestingLogger()) diff --git a/rpc/jsonrpc/test/main.go b/rpc/jsonrpc/test/main.go index d348e1639..64f9de87a 100644 --- a/rpc/jsonrpc/test/main.go +++ b/rpc/jsonrpc/test/main.go @@ -3,6 +3,7 @@ package main import ( "fmt" "net/http" + "os" "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" @@ -35,10 +36,12 @@ func main() { config := rpcserver.DefaultConfig() listener, err := rpcserver.Listen("tcp://127.0.0.1:8008", config.MaxOpenConnections) if err != nil { - tmos.Exit(err.Error()) + logger.Error("rpc listening", "err", err) + os.Exit(1) } if err = rpcserver.Serve(listener, mux, logger, config); err != nil { - tmos.Exit(err.Error()) + logger.Error("rpc serve", "err", err) + os.Exit(1) } } diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index d5a9ffafa..83d85be8f 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -237,6 +237,31 @@ paths: application/json: schema: $ref: "#/components/schemas/ErrorResponse" + + /remove_tx: + get: + summary: Removes a transaction from the mempool. + tags: + - TxKey + operationId: remove_tx + parameters: + - in: query + name: txKey + required: true + schema: + type: string + example: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + description: The transaction key + responses: + "200": + description: empty response. + "500": + description: empty error. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + /subscribe: get: summary: Subscribe for events via WebSocket. @@ -1396,6 +1421,27 @@ components: remaining_time: type: string example: "0" + total_snapshots: + type: string + example: "10" + chunk_process_avg_time: + type: string + example: "1000000000" + snapshot_height: + type: string + example: "1262196" + snapshot_chunks_count: + type: string + example: "10" + snapshot_chunks_total: + type: string + example: "100" + backfilled_blocks: + type: string + example: "10" + backfill_blocks_total: + type: string + example: "100" ValidatorInfo: type: object properties: diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 259450540..90c3b2e49 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -6,15 +6,14 @@ import ( "os" "time" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" - nm "github.com/tendermint/tendermint/node" - "github.com/tendermint/tendermint/proxy" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - core_grpc "github.com/tendermint/tendermint/rpc/grpc" + "github.com/tendermint/tendermint/node" + "github.com/tendermint/tendermint/rpc/coretypes" rpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" ) @@ -24,13 +23,14 @@ type Options struct { suppressStdout bool } -func waitForRPC(ctx context.Context, conf *cfg.Config) { +// waitForRPC connects to the RPC service and blocks until a /status call succeeds. +func waitForRPC(ctx context.Context, conf *config.Config) { laddr := conf.RPC.ListenAddress client, err := rpcclient.New(laddr) if err != nil { panic(err) } - result := new(ctypes.ResultStatus) + result := new(coretypes.ResultStatus) for { _, err := client.Call(ctx, "status", map[string]interface{}{}, result) if err == nil { @@ -42,16 +42,6 @@ func waitForRPC(ctx context.Context, conf *cfg.Config) { } } -func waitForGRPC(ctx context.Context, conf *cfg.Config) { - client := GetGRPCClient(conf) - for { - _, err := client.Ping(ctx, &core_grpc.RequestPing{}) - if err == nil { - return - } - } -} - func randPort() int { port, err := tmnet.GetFreePort() if err != nil { @@ -60,35 +50,36 @@ func randPort() int { return port } -func makeAddrs() (string, string, string) { - return fmt.Sprintf("tcp://127.0.0.1:%d", randPort()), - fmt.Sprintf("tcp://127.0.0.1:%d", randPort()), - fmt.Sprintf("tcp://127.0.0.1:%d", randPort()) +// makeAddrs constructs local listener addresses for node services. This +// implementation uses random ports so test instances can run concurrently. +func makeAddrs() (p2pAddr, rpcAddr string) { + const addrTemplate = "tcp://127.0.0.1:%d" + return fmt.Sprintf(addrTemplate, randPort()), fmt.Sprintf(addrTemplate, randPort()) } -func CreateConfig(testName string) *cfg.Config { - c := cfg.ResetTestRoot(testName) +func CreateConfig(testName string) (*config.Config, error) { + c, err := config.ResetTestRoot(testName) + if err != nil { + return nil, err + } - // and we use random ports to run in parallel - tm, rpc, grpc := makeAddrs() - c.P2P.ListenAddress = tm - c.RPC.ListenAddress = rpc + p2pAddr, rpcAddr := makeAddrs() + c.P2P.ListenAddress = p2pAddr + c.RPC.ListenAddress = rpcAddr + c.Consensus.WalPath = "rpc-test" c.RPC.CORSAllowedOrigins = []string{"https://tendermint.com/"} - c.RPC.GRPCListenAddress = grpc - return c -} - -func GetGRPCClient(conf *cfg.Config) core_grpc.BroadcastAPIClient { - grpcAddr := conf.RPC.GRPCListenAddress - return core_grpc.StartGRPCClient(grpcAddr) + return c, nil } type ServiceCloser func(context.Context) error -func StartTendermint(ctx context.Context, - conf *cfg.Config, +func StartTendermint( + ctx context.Context, + conf *config.Config, app abci.Application, - opts ...func(*Options)) (service.Service, ServiceCloser, error) { + opts ...func(*Options), +) (service.Service, ServiceCloser, error) { + ctx, cancel := context.WithCancel(ctx) nodeOpts := &Options{} for _, opt := range opts { @@ -100,30 +91,26 @@ func StartTendermint(ctx context.Context, } else { logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) } - papp := proxy.NewLocalClientCreator(app) - node, err := nm.New(conf, logger, papp, nil) + papp := abciclient.NewLocalCreator(app) + tmNode, err := node.New(ctx, conf, logger, papp, nil) if err != nil { - return nil, func(_ context.Context) error { return nil }, err + return nil, func(_ context.Context) error { cancel(); return nil }, err } - err = node.Start() + err = tmNode.Start(ctx) if err != nil { - return nil, func(_ context.Context) error { return nil }, err + return nil, func(_ context.Context) error { cancel(); return nil }, err } - // wait for rpc waitForRPC(ctx, conf) - waitForGRPC(ctx, conf) if !nodeOpts.suppressStdout { fmt.Println("Tendermint running!") } - return node, func(ctx context.Context) error { - if err := node.Stop(); err != nil { - logger.Error("Error when trying to stop node", "err", err) - } - node.Wait() + return tmNode, func(ctx context.Context) error { + cancel() + tmNode.Wait() os.RemoveAll(conf.RootDir) return nil }, nil diff --git a/scripts/authors.sh b/scripts/authors.sh index 7aafb0127..49251242e 100755 --- a/scripts/authors.sh +++ b/scripts/authors.sh @@ -1,16 +1,16 @@ #! /bin/bash -# Usage: -# `./authors.sh` -# Print a list of all authors who have committed to develop since master. -# -# `./authors.sh ` -# Lookup the email address on Github and print the associated username +set -euo pipefail -author=$1 +ref=$1 -if [[ "$author" == "" ]]; then - git log master..develop | grep Author | sort | uniq +if [[ ! -z "$ref" ]]; then + git log master..$ref | grep Author | sort | uniq else - curl -s "https://api.github.com/search/users?q=$author+in%3Aemail&type=Users&utf8=%E2%9C%93" | jq .items[0].login +cat << EOF +Usage: + ./authors.sh + Print a list of all authors who have committed to the codebase since the supplied commit ref. +EOF fi + diff --git a/scripts/json2wal/main.go b/scripts/json2wal/main.go index d21dc6c44..6b60ac2fc 100644 --- a/scripts/json2wal/main.go +++ b/scripts/json2wal/main.go @@ -14,7 +14,7 @@ import ( "os" "strings" - cs "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/consensus" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/types" ) @@ -41,7 +41,7 @@ func main() { // because of the byte array in BlockPart // leading to unmarshal error: unexpected end of JSON input br := bufio.NewReaderSize(f, int(2*types.BlockPartSizeBytes)) - dec := cs.NewWALEncoder(walFile) + dec := consensus.NewWALEncoder(walFile) for { msgJSON, _, err := br.ReadLine() @@ -55,7 +55,7 @@ func main() { continue } - var msg cs.TimedWALMessage + var msg consensus.TimedWALMessage err = tmjson.Unmarshal(msgJSON, &msg) if err != nil { panic(fmt.Errorf("failed to unmarshal json: %v", err)) diff --git a/scripts/protocgen.sh b/scripts/protocgen.sh index 51b1cc6d3..8e121448b 100755 --- a/scripts/protocgen.sh +++ b/scripts/protocgen.sh @@ -1,9 +1,44 @@ #!/usr/bin/env bash +set -euo pipefail -set -eo pipefail +# By default, this script runs against the latest commit to the master branch +# in the Tendermint spec repository. To use this script with a different version +# of the spec repository, run it with the $VERS environment variable set to the +# desired branch name or commit hash from the spec repo. -buf generate --path proto/tendermint +: ${VERS:=master} + +echo "fetching proto files" + +# Get shortened ref of commit +REF=$(curl -H "Accept: application/vnd.github.v3.sha" -qL \ + "https://api.github.com/repos/tendermint/spec/commits/${VERS}" \ + | cut -c -7) + +readonly OUTDIR="tendermint-spec-${REF}" +curl -qL "https://api.github.com/repos/tendermint/spec/tarball/${REF}" | tar -xzf - ${OUTDIR}/ + +cp -r ${OUTDIR}/proto/tendermint/* ./proto/tendermint +cp -r ${OUTDIR}/third_party/** ./third_party + +MODNAME="$(go list -m)" +find ./proto/tendermint -name '*.proto' -not -path "./proto/tendermint/abci/types.proto" \ + -exec sh ./scripts/protopackage.sh {} "$MODNAME" ';' + +# For historical compatibility, the abci file needs to get a slightly different import name +# so that it can be moved into the ./abci/types directory. +sh ./scripts/protopackage.sh ./proto/tendermint/abci/types.proto $MODNAME "abci/types" + +buf generate --path proto/tendermint --template ./${OUTDIR}/buf.gen.yaml --config ./${OUTDIR}/buf.yaml mv ./proto/tendermint/abci/types.pb.go ./abci/types -mv ./proto/tendermint/rpc/grpc/types.pb.go ./rpc/grpc +echo "proto files have been compiled" + +echo "removing copied files" + +find ${OUTDIR}/proto/tendermint/ -name *.proto \ + | sed "s/$OUTDIR\/\(.*\)/\1/g" \ + | xargs -I {} rm {} + +rm -rf ${OUTDIR} diff --git a/scripts/protopackage.sh b/scripts/protopackage.sh new file mode 100755 index 000000000..a69e758ca --- /dev/null +++ b/scripts/protopackage.sh @@ -0,0 +1,23 @@ +#!/usr/bin/sh +set -eo pipefail + +# This script appends the "option go_package" proto option to the file located at $FNAME. +# This option specifies what the package will be named when imported by other packages. +# This option is not directly included in the proto files to allow the files to more easily +# be hosted in github.com/tendermint/spec and shared between other repos. +# If the option is already specified in the file, it will be replaced using the +# arguments passed to this script. + +FNAME="${1:?missing required .proto filename}" +MODNAME=$(echo $2| sed 's/\//\\\//g') +PACKAGE="$(dirname $FNAME | sed 's/^\.\/\(.*\)/\1/g' | sed 's/\//\\\//g')" +if [[ ! -z "$3" ]]; then + PACKAGE="$(echo $3 | sed 's/\//\\\//g')" +fi + + +if ! grep -q 'option\s\+go_package\s\+=\s\+.*;' $FNAME; then + sed -i "s/\(package tendermint.*\)/\1\n\noption go_package = \"$MODNAME\/$PACKAGE\";/g" $FNAME +else + sed -i "s/option\s\+go_package\s\+=\s\+.*;/option go_package = \"$MODNAME\/$PACKAGE\";/g" $FNAME +fi diff --git a/scripts/wal2json/main.go b/scripts/wal2json/main.go index 886e5402f..5a5a0abac 100644 --- a/scripts/wal2json/main.go +++ b/scripts/wal2json/main.go @@ -12,7 +12,7 @@ import ( "io" "os" - cs "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/consensus" tmjson "github.com/tendermint/tendermint/libs/json" ) @@ -28,7 +28,7 @@ func main() { } defer f.Close() - dec := cs.NewWALDecoder(f) + dec := consensus.NewWALDecoder(f) for { msg, err := dec.Decode() if err == io.EOF { @@ -48,7 +48,7 @@ func main() { } if err == nil { - if endMsg, ok := msg.Msg.(cs.EndHeightMessage); ok { + if endMsg, ok := msg.Msg.(consensus.EndHeightMessage); ok { _, err = os.Stdout.Write([]byte(fmt.Sprintf("ENDHEIGHT %d\n", endMsg.Height))) } } diff --git a/state/indexer/indexer_service.go b/state/indexer/indexer_service.go deleted file mode 100644 index 39a1847f8..000000000 --- a/state/indexer/indexer_service.go +++ /dev/null @@ -1,137 +0,0 @@ -package indexer - -import ( - "context" - - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -// XXX/TODO: These types should be moved to the indexer package. - -const ( - subscriber = "IndexerService" -) - -// Service connects event bus, transaction and block indexers together in -// order to index transactions and blocks coming from the event bus. -type Service struct { - service.BaseService - - eventSinks []EventSink - eventBus *types.EventBus -} - -// NewIndexerService returns a new service instance. -func NewIndexerService(es []EventSink, eventBus *types.EventBus) *Service { - - is := &Service{eventSinks: es, eventBus: eventBus} - is.BaseService = *service.NewBaseService(nil, "IndexerService", is) - return is -} - -// OnStart implements service.Service by subscribing for all transactions -// and indexing them by events. -func (is *Service) OnStart() error { - // Use SubscribeUnbuffered here to ensure both subscriptions does not get - // canceled due to not pulling messages fast enough. Cause this might - // sometimes happen when there are no other subscribers. - blockHeadersSub, err := is.eventBus.SubscribeUnbuffered( - context.Background(), - subscriber, - types.EventQueryNewBlockHeader) - if err != nil { - return err - } - - txsSub, err := is.eventBus.SubscribeUnbuffered(context.Background(), subscriber, types.EventQueryTx) - if err != nil { - return err - } - - go func() { - for { - select { - case <-blockHeadersSub.Canceled(): - return - case msg := <-blockHeadersSub.Out(): - - eventDataHeader := msg.Data().(types.EventDataNewBlockHeader) - height := eventDataHeader.Header.Height - batch := NewBatch(eventDataHeader.NumTxs) - - for i := int64(0); i < eventDataHeader.NumTxs; i++ { - msg2 := <-txsSub.Out() - txResult := msg2.Data().(types.EventDataTx).TxResult - - if err = batch.Add(&txResult); err != nil { - is.Logger.Error( - "failed to add tx to batch", - "height", height, - "index", txResult.Index, - "err", err, - ) - } - } - - if !IndexingEnabled(is.eventSinks) { - continue - } - - for _, sink := range is.eventSinks { - if err := sink.IndexBlockEvents(eventDataHeader); err != nil { - is.Logger.Error("failed to index block", "height", height, "err", err) - } else { - is.Logger.Debug("indexed block", "height", height, "sink", sink.Type()) - } - - if len(batch.Ops) > 0 { - err := sink.IndexTxEvents(batch.Ops) - if err != nil { - is.Logger.Error("failed to index block txs", "height", height, "err", err) - } else { - is.Logger.Debug("indexed txs", "height", height, "sink", sink.Type()) - } - } - } - } - } - }() - return nil -} - -// OnStop implements service.Service by unsubscribing from all transactions and -// close the eventsink. -func (is *Service) OnStop() { - if is.eventBus.IsRunning() { - _ = is.eventBus.UnsubscribeAll(context.Background(), subscriber) - } - - for _, sink := range is.eventSinks { - if err := sink.Stop(); err != nil { - is.Logger.Error("failed to close eventsink", "eventsink", sink.Type(), "err", err) - } - } -} - -// KVSinkEnabled returns the given eventSinks is containing KVEventSink. -func KVSinkEnabled(sinks []EventSink) bool { - for _, sink := range sinks { - if sink.Type() == KV { - return true - } - } - - return false -} - -// IndexingEnabled returns the given eventSinks is supporting the indexing services. -func IndexingEnabled(sinks []EventSink) bool { - for _, sink := range sinks { - if sink.Type() == KV || sink.Type() == PSQL { - return true - } - } - - return false -} diff --git a/test/app/grpc_client.go b/test/app/grpc_client.go deleted file mode 100644 index 73022aaf8..000000000 --- a/test/app/grpc_client.go +++ /dev/null @@ -1,42 +0,0 @@ -package main - -import ( - "encoding/hex" - "fmt" - "os" - - "context" - - tmjson "github.com/tendermint/tendermint/libs/json" - coregrpc "github.com/tendermint/tendermint/rpc/grpc" -) - -var grpcAddr = "tcp://localhost:36656" - -func main() { - args := os.Args - if len(args) == 1 { - fmt.Println("Must enter a transaction to send (hex)") - os.Exit(1) - } - tx := args[1] - txBytes, err := hex.DecodeString(tx) - if err != nil { - fmt.Println("Invalid hex", err) - os.Exit(1) - } - - clientGRPC := coregrpc.StartGRPCClient(grpcAddr) - res, err := clientGRPC.BroadcastTx(context.Background(), &coregrpc.RequestBroadcastTx{Tx: txBytes}) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - bz, err := tmjson.Marshal(res) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - fmt.Println(string(bz)) -} diff --git a/test/e2e/Makefile b/test/e2e/Makefile index 2b41cc1cd..23cf4d039 100644 --- a/test/e2e/Makefile +++ b/test/e2e/Makefile @@ -3,11 +3,8 @@ all: docker generator runner tests docker: docker build --tag tendermint/e2e-node -f docker/Dockerfile ../.. -# We need to build support for database backends into the app in -# order to build a binary with a Tendermint node in it (for built-in -# ABCI testing). -app: - go build -o build/app -tags badgerdb,boltdb,cleveldb,rocksdb ./app +node: + go build -o build/node -tags badgerdb,boltdb,cleveldb,rocksdb ./node generator: go build -o build/generator ./generator @@ -18,4 +15,4 @@ runner: tests: go test -o build/tests ./tests -.PHONY: all app docker generator runner tests +.PHONY: all docker generator runner tests node diff --git a/test/e2e/README.md b/test/e2e/README.md index d737120c1..00bce5ad8 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -142,10 +142,42 @@ Docker does not enable IPv6 by default. To do so, enter the following in } ``` -## Benchmarking testnets +## Benchmarking Testnets It is also possible to run a simple benchmark on a testnet. This is done through the `benchmark` command. This manages the entire process: setting up the environment, starting the test net, waiting for a considerable amount of blocks to be used (currently 100), and then returning the following metrics from the sample of the blockchain: - Average time to produce a block - Standard deviation of producing a block - Minimum and maximum time to produce a block + +## Running Individual Nodes + +The E2E test harness is designed to run several nodes of varying configurations within docker. It is also possible to run a single node in the case of running larger, geographically-dispersed testnets. To run a single node you can either run: + +**Built-in** + +```bash +make node +tendermint init validator +TMHOME=$HOME/.tendermint ./build/node ./node/built-in.toml +``` + +To make things simpler the e2e application can also be run in the tendermint binary +by running + +```bash +tendermint start --proxy-app e2e +``` + +However this won't offer the same level of configurability of the application. + +**Socket** + +```bash +make node +tendermint init validator +tendermint start +./build/node ./node.socket.toml +``` + +Check `node/config.go` to see how the settings of the test application can be tweaked. \ No newline at end of file diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index 26b10d32a..395c87024 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -1,4 +1,4 @@ -package main +package app import ( "bytes" @@ -11,6 +11,7 @@ import ( "github.com/tendermint/tendermint/abci/example/code" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/version" ) @@ -27,6 +28,55 @@ type Application struct { restoreChunks [][]byte } +// Config allows for the setting of high level parameters for running the e2e Application +// KeyType and ValidatorUpdates must be the same for all nodes running the same application. +type Config struct { + // The directory with which state.json will be persisted in. Usually $HOME/.tendermint/data + Dir string `toml:"dir"` + + // SnapshotInterval specifies the height interval at which the application + // will take state sync snapshots. Defaults to 0 (disabled). + SnapshotInterval uint64 `toml:"snapshot_interval"` + + // RetainBlocks specifies the number of recent blocks to retain. Defaults to + // 0, which retains all blocks. Must be greater that PersistInterval, + // SnapshotInterval and EvidenceAgeHeight. + RetainBlocks uint64 `toml:"retain_blocks"` + + // KeyType sets the curve that will be used by validators. + // Options are ed25519 & secp256k1 + KeyType string `toml:"key_type"` + + // PersistInterval specifies the height interval at which the application + // will persist state to disk. Defaults to 1 (every height), setting this to + // 0 disables state persistence. + PersistInterval uint64 `toml:"persist_interval"` + + // ValidatorUpdates is a map of heights to validator names and their power, + // and will be returned by the ABCI application. For example, the following + // changes the power of validator01 and validator02 at height 1000: + // + // [validator_update.1000] + // validator01 = 20 + // validator02 = 10 + // + // Specifying height 0 returns the validator update during InitChain. The + // application returns the validator updates as-is, i.e. removing a + // validator must be done by returning it with power 0, and any validators + // not specified are not changed. + // + // height <-> pubkey <-> voting power + ValidatorUpdates map[string]map[string]uint8 `toml:"validator_update"` +} + +func DefaultConfig(dir string) *Config { + return &Config{ + PersistInterval: 1, + SnapshotInterval: 100, + Dir: dir, + } +} + // NewApplication creates the application. func NewApplication(cfg *Config) (*Application, error) { state, err := NewState(filepath.Join(cfg.Dir, "state.json"), cfg.PersistInterval) @@ -67,6 +117,11 @@ func (app *Application) InitChain(req abci.RequestInitChain) abci.ResponseInitCh } resp := abci.ResponseInitChain{ AppHash: app.state.Hash, + ConsensusParams: &types.ConsensusParams{ + Version: &types.VersionParams{ + AppVersion: 1, + }, + }, } if resp.Validators, err = app.validatorUpdates(0); err != nil { panic(err) @@ -134,7 +189,11 @@ func (app *Application) Commit() abci.ResponseCommit { if err != nil { panic(err) } - logger.Info("Created state sync snapshot", "height", snapshot.Height) + app.logger.Info("Created state sync snapshot", "height", snapshot.Height) + err = app.snapshots.Prune(maxSnapshotCount) + if err != nil { + app.logger.Error("Failed to prune snapshots", "err", err) + } } retainHeight := int64(0) if app.cfg.RetainBlocks > 0 { diff --git a/test/e2e/app/snapshots.go b/test/e2e/app/snapshots.go index 4ddb7ecdc..65edbc3a5 100644 --- a/test/e2e/app/snapshots.go +++ b/test/e2e/app/snapshots.go @@ -1,11 +1,10 @@ // nolint: gosec -package main +package app import ( "encoding/json" "errors" "fmt" - "io/ioutil" "math" "os" "path/filepath" @@ -16,6 +15,9 @@ import ( const ( snapshotChunkSize = 1e6 + + // Keep only the most recent 10 snapshots. Older snapshots are pruned + maxSnapshotCount = 10 ) // SnapshotStore stores state sync snapshots. Snapshots are stored simply as @@ -45,7 +47,7 @@ func (s *SnapshotStore) loadMetadata() error { file := filepath.Join(s.dir, "metadata.json") metadata := []abci.Snapshot{} - bz, err := ioutil.ReadFile(file) + bz, err := os.ReadFile(file) switch { case errors.Is(err, os.ErrNotExist): case err != nil: @@ -72,7 +74,7 @@ func (s *SnapshotStore) saveMetadata() error { // save the file to a new file and move it to make saving atomic. newFile := filepath.Join(s.dir, "metadata.json.new") file := filepath.Join(s.dir, "metadata.json") - err = ioutil.WriteFile(newFile, bz, 0644) // nolint: gosec + err = os.WriteFile(newFile, bz, 0644) // nolint: gosec if err != nil { return err } @@ -93,7 +95,7 @@ func (s *SnapshotStore) Create(state *State) (abci.Snapshot, error) { Hash: hashItems(state.Values), Chunks: byteChunks(bz), } - err = ioutil.WriteFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", state.Height)), bz, 0644) + err = os.WriteFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", state.Height)), bz, 0644) if err != nil { return abci.Snapshot{}, err } @@ -105,6 +107,27 @@ func (s *SnapshotStore) Create(state *State) (abci.Snapshot, error) { return snapshot, nil } +// Prune removes old snapshots ensuring only the most recent n snapshots remain +func (s *SnapshotStore) Prune(n int) error { + s.Lock() + defer s.Unlock() + // snapshots are appended to the metadata struct, hence pruning removes from + // the front of the array + i := 0 + for ; i < len(s.metadata)-n; i++ { + h := s.metadata[i].Height + if err := os.Remove(filepath.Join(s.dir, fmt.Sprintf("%v.json", h))); err != nil { + return err + } + } + + // update metadata by removing the deleted snapshots + pruned := make([]abci.Snapshot, len(s.metadata[i:])) + copy(pruned, s.metadata[i:]) + s.metadata = pruned + return nil +} + // List lists available snapshots. func (s *SnapshotStore) List() ([]*abci.Snapshot, error) { s.RLock() @@ -122,7 +145,7 @@ func (s *SnapshotStore) LoadChunk(height uint64, format uint32, chunk uint32) ([ defer s.RUnlock() for _, snapshot := range s.metadata { if snapshot.Height == height && snapshot.Format == format { - bz, err := ioutil.ReadFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", height))) + bz, err := os.ReadFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", height))) if err != nil { return nil, err } diff --git a/test/e2e/app/state.go b/test/e2e/app/state.go index b34680c1b..7376b8776 100644 --- a/test/e2e/app/state.go +++ b/test/e2e/app/state.go @@ -1,12 +1,11 @@ //nolint: gosec -package main +package app import ( "crypto/sha256" "encoding/json" "errors" "fmt" - "io/ioutil" "os" "sort" "sync" @@ -45,7 +44,7 @@ func NewState(file string, persistInterval uint64) (*State, error) { // load loads state from disk. It does not take out a lock, since it is called // during construction. func (s *State) load() error { - bz, err := ioutil.ReadFile(s.file) + bz, err := os.ReadFile(s.file) if err != nil { return fmt.Errorf("failed to read state from %q: %w", s.file, err) } @@ -66,7 +65,7 @@ func (s *State) save() error { // We write the state to a separate file and move it to the destination, to // make it atomic. newFile := fmt.Sprintf("%v.new", s.file) - err = ioutil.WriteFile(newFile, bz, 0644) + err = os.WriteFile(newFile, bz, 0644) if err != nil { return fmt.Errorf("failed to write state to %q: %w", s.file, err) } diff --git a/test/e2e/docker/Dockerfile b/test/e2e/docker/Dockerfile index 68c7bc836..260df23f3 100644 --- a/test/e2e/docker/Dockerfile +++ b/test/e2e/docker/Dockerfile @@ -19,7 +19,7 @@ COPY . . RUN make build && cp build/tendermint /usr/bin/tendermint COPY test/e2e/docker/entrypoint* /usr/bin/ -RUN cd test/e2e && make app && cp build/app /usr/bin/app +RUN cd test/e2e && make node && cp build/node /usr/bin/app # Set up runtime directory. We don't use a separate runtime image since we need # e.g. leveldb and rocksdb which are already installed in the build image. diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 2d6945e65..d4b581928 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -15,8 +15,7 @@ var ( // separate testnet for each combination (Cartesian product) of options. testnetCombinations = map[string][]interface{}{ "topology": {"single", "quad", "large"}, - "p2p": {NewP2PMode, LegacyP2PMode, HybridP2PMode}, - "queueType": {"priority"}, // "fifo", "wdrr" + "queueType": {"priority"}, // "fifo" "initialHeight": {0, 1000}, "initialState": { map[string]string{}, @@ -45,21 +44,26 @@ var ( "tcp": 20, "unix": 10, } - // FIXME: v2 disabled due to flake - nodeBlockSyncs = uniformChoice{"v0"} // "v2" - nodeMempools = uniformChoice{"v0", "v1"} - nodeStateSyncs = uniformChoice{e2e.StateSyncDisabled, e2e.StateSyncP2P, e2e.StateSyncRPC} + nodeStateSyncs = weightedChoice{ + e2e.StateSyncDisabled: 10, + e2e.StateSyncP2P: 45, + e2e.StateSyncRPC: 45, + } nodePersistIntervals = uniformChoice{0, 1, 5} - nodeSnapshotIntervals = uniformChoice{0, 3} - nodeRetainBlocks = uniformChoice{0, 2 * int(e2e.EvidenceAgeHeight), 4 * int(e2e.EvidenceAgeHeight)} - nodePerturbations = probSetChoice{ + nodeSnapshotIntervals = uniformChoice{0, 5} + nodeRetainBlocks = uniformChoice{ + 0, + 2 * int(e2e.EvidenceAgeHeight), + 4 * int(e2e.EvidenceAgeHeight), + } + nodePerturbations = probSetChoice{ "disconnect": 0.1, "pause": 0.1, "kill": 0.1, "restart": 0.1, } evidence = uniformChoice{0, 1, 10} - txSize = uniformChoice{1024, 10240} // either 1kb or 10kb + txSize = uniformChoice{1024, 4096} // either 1kb or 4kb ipv6 = uniformChoice{false, true} keyType = uniformChoice{types.ABCIPubKeyTypeEd25519, types.ABCIPubKeyTypeSecp256k1} ) @@ -67,12 +71,6 @@ var ( // Generate generates random testnets using the given RNG. func Generate(r *rand.Rand, opts Options) ([]e2e.Manifest, error) { manifests := []e2e.Manifest{} - switch opts.P2P { - case NewP2PMode, LegacyP2PMode, HybridP2PMode: - testnetCombinations["p2p"] = []interface{}{opts.P2P} - default: - testnetCombinations["p2p"] = []interface{}{NewP2PMode, LegacyP2PMode, HybridP2PMode} - } for _, opt := range combinations(testnetCombinations) { manifest, err := generateTestnet(r, opt) @@ -80,42 +78,33 @@ func Generate(r *rand.Rand, opts Options) ([]e2e.Manifest, error) { return nil, err } - if len(manifest.Nodes) == 1 { - if opt["p2p"] == HybridP2PMode { - continue - } + if len(manifest.Nodes) < opts.MinNetworkSize { + continue } - manifests = append(manifests, manifest) - } - if opts.Sorted { - // When the sorted flag is set (generally, as long as - // groups aren't set), - e2e.SortManifests(manifests) + if opts.MaxNetworkSize > 0 && len(manifest.Nodes) >= opts.MaxNetworkSize { + continue + } + + manifests = append(manifests, manifest) } return manifests, nil } type Options struct { - P2P P2PMode - Sorted bool + MinNetworkSize int + MaxNetworkSize int + NumGroups int + Directory string + Reverse bool } -type P2PMode string - -const ( - NewP2PMode P2PMode = "new" - LegacyP2PMode P2PMode = "legacy" - HybridP2PMode P2PMode = "hybrid" - // mixed means that all combination are generated - MixedP2PMode P2PMode = "mixed" -) - // generateTestnet generates a single testnet with the given options. func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, error) { manifest := e2e.Manifest{ IPv6: ipv6.Choose(r).(bool), + ABCIProtocol: nodeABCIProtocols.Choose(r), InitialHeight: int64(opt["initialHeight"].(int)), InitialState: opt["initialState"].(map[string]string), Validators: &map[string]int64{}, @@ -127,13 +116,6 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er TxSize: int64(txSize.Choose(r).(int)), } - p2pMode := opt["p2p"].(P2PMode) - switch p2pMode { - case NewP2PMode, LegacyP2PMode, HybridP2PMode: - default: - return manifest, fmt.Errorf("unknown p2p mode %s", p2pMode) - } - var numSeeds, numValidators, numFulls, numLightClients int switch opt["topology"].(string) { case "single": @@ -142,8 +124,8 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er numValidators = 4 case "large": // FIXME Networks are kept small since large ones use too much CPU. - numSeeds = r.Intn(2) - numLightClients = r.Intn(3) + numSeeds = r.Intn(1) + numLightClients = r.Intn(2) numValidators = 4 + r.Intn(4) numFulls = r.Intn(4) default: @@ -152,18 +134,12 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er // First we generate seed nodes, starting at the initial height. for i := 1; i <= numSeeds; i++ { - node := generateNode(r, e2e.ModeSeed, 0, manifest.InitialHeight, false) - - switch p2pMode { - case LegacyP2PMode: - node.UseLegacyP2P = true - case HybridP2PMode: - node.UseLegacyP2P = r.Intn(2) == 1 - } - + node := generateNode(r, manifest, e2e.ModeSeed, 0, false) manifest.Nodes[fmt.Sprintf("seed%02d", i)] = node } + var numSyncingNodes = 0 + // Next, we generate validators. We make sure a BFT quorum of validators start // at the initial height, and that we have two archive nodes. We also set up // the initial validator set, and validator set updates for delayed nodes. @@ -171,20 +147,13 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er quorum := numValidators*2/3 + 1 for i := 1; i <= numValidators; i++ { startAt := int64(0) - if i > quorum { + if i > quorum && numSyncingNodes < 2 && r.Float64() >= 0.25 { + numSyncingNodes++ startAt = nextStartAt nextStartAt += 5 } name := fmt.Sprintf("validator%02d", i) - node := generateNode( - r, e2e.ModeValidator, startAt, manifest.InitialHeight, i <= 2) - - switch p2pMode { - case LegacyP2PMode: - node.UseLegacyP2P = true - case HybridP2PMode: - node.UseLegacyP2P = r.Intn(2) == 1 - } + node := generateNode(r, manifest, e2e.ModeValidator, startAt, i <= 2) manifest.Nodes[name] = node @@ -210,18 +179,12 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er // Finally, we generate random full nodes. for i := 1; i <= numFulls; i++ { startAt := int64(0) - if r.Float64() >= 0.5 { + if numSyncingNodes < 2 && r.Float64() >= 0.5 { + numSyncingNodes++ startAt = nextStartAt nextStartAt += 5 } - node := generateNode(r, e2e.ModeFull, startAt, manifest.InitialHeight, false) - - switch p2pMode { - case LegacyP2PMode: - node.UseLegacyP2P = true - case HybridP2PMode: - node.UseLegacyP2P = r.Intn(2) == 1 - } + node := generateNode(r, manifest, e2e.ModeFull, startAt, false) manifest.Nodes[fmt.Sprintf("full%02d", i)] = node } @@ -263,19 +226,32 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er } }) for i, name := range peerNames { - if len(seedNames) > 0 && (i == 0 || r.Float64() >= 0.5) { + // there are seeds, statesync is disabled, and it's + // either the first peer by the sort order, and + // (randomly half of the remaining peers use a seed + // node; otherwise, choose some remaining set of the + // peers. + + if len(seedNames) > 0 && + manifest.Nodes[name].StateSync == e2e.StateSyncDisabled && + (i == 0 || r.Float64() >= 0.5) { + + // choose one of the seeds manifest.Nodes[name].Seeds = uniformSetChoice(seedNames).Choose(r) - } else if i > 0 { - manifest.Nodes[name].PersistentPeers = uniformSetChoice(peerNames[:i]).Choose(r) + } else if i > 1 && r.Float64() >= 0.5 { + peers := uniformSetChoice(peerNames[:i]) + manifest.Nodes[name].PersistentPeers = peers.ChooseAtLeast(r, 2) } } // lastly, set up the light clients for i := 1; i <= numLightClients; i++ { startAt := manifest.InitialHeight + 5 - manifest.Nodes[fmt.Sprintf("light%02d", i)] = generateLightNode( - r, startAt+(5*int64(i)), lightProviders, - ) + + node := generateLightNode(r, startAt+(5*int64(i)), lightProviders) + + manifest.Nodes[fmt.Sprintf("light%02d", i)] = node + } return manifest, nil @@ -286,16 +262,17 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er // here, since we need to know the overall network topology and startup // sequencing. func generateNode( - r *rand.Rand, mode e2e.Mode, startAt int64, initialHeight int64, forceArchive bool, + r *rand.Rand, + manifest e2e.Manifest, + mode e2e.Mode, + startAt int64, + forceArchive bool, ) *e2e.ManifestNode { node := e2e.ManifestNode{ Mode: string(mode), StartAt: startAt, Database: nodeDatabases.Choose(r), - ABCIProtocol: nodeABCIProtocols.Choose(r), PrivvalProtocol: nodePrivvalProtocols.Choose(r), - BlockSync: nodeBlockSyncs.Choose(r).(string), - Mempool: nodeMempools.Choose(r).(string), StateSync: e2e.StateSyncDisabled, PersistInterval: ptrUint64(uint64(nodePersistIntervals.Choose(r).(int))), SnapshotInterval: uint64(nodeSnapshotIntervals.Choose(r).(int)), @@ -303,8 +280,19 @@ func generateNode( Perturb: nodePerturbations.Choose(r), } + if node.PrivvalProtocol == "" { + node.PrivvalProtocol = "file" + } + if startAt > 0 { - node.StateSync = nodeStateSyncs.Choose(r).(string) + node.StateSync = nodeStateSyncs.Choose(r) + if manifest.InitialHeight-startAt <= 5 && node.StateSync == e2e.StateSyncDisabled { + // avoid needing to blocsync more than five total blocks. + node.StateSync = uniformSetChoice([]string{ + e2e.StateSyncP2P, + e2e.StateSyncRPC, + }).Choose(r)[0] + } } // If this node is forced to be an archive node, retain all blocks and @@ -335,10 +323,6 @@ func generateNode( } } - if node.StateSync != e2e.StateSyncDisabled { - node.BlockSync = "v0" - } - return &node } @@ -347,7 +331,6 @@ func generateLightNode(r *rand.Rand, startAt int64, providers []string) *e2e.Man Mode: string(e2e.ModeLight), StartAt: startAt, Database: nodeDatabases.Choose(r), - ABCIProtocol: "builtin", PersistInterval: ptrUint64(0), PersistentPeers: providers, } diff --git a/test/e2e/generator/generate_test.go b/test/e2e/generator/generate_test.go new file mode 100644 index 000000000..0e0e66baa --- /dev/null +++ b/test/e2e/generator/generate_test.go @@ -0,0 +1,49 @@ +package main + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + e2e "github.com/tendermint/tendermint/test/e2e/pkg" +) + +func TestGenerator(t *testing.T) { + manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{}) + require.NoError(t, err) + require.True(t, len(manifests) >= 24, "insufficient combinations %d", len(manifests)) + + // this just means that the numbers reported by the test + // failures map to the test cases that you'd see locally. + e2e.SortManifests(manifests, false /* ascending */) + + for idx, m := range manifests { + t.Run(fmt.Sprintf("Case%04d", idx), func(t *testing.T) { + numStateSyncs := 0 + for name, node := range m.Nodes { + if node.StateSync != e2e.StateSyncDisabled { + numStateSyncs++ + } + t.Run(name, func(t *testing.T) { + t.Run("StateSync", func(t *testing.T) { + if node.StartAt > m.InitialHeight+5 && !node.Stateless() { + require.NotEqual(t, node.StateSync, e2e.StateSyncDisabled) + } + if node.StateSync != e2e.StateSyncDisabled { + require.Zero(t, node.Seeds, node.StateSync) + require.True(t, len(node.PersistentPeers) >= 2 || len(node.PersistentPeers) == 0, + "peers: %v", node.PersistentPeers) + } + }) + if e2e.Mode(node.Mode) != e2e.ModeLight { + t.Run("PrivvalProtocol", func(t *testing.T) { + require.NotZero(t, node.PrivvalProtocol) + }) + } + }) + } + require.True(t, numStateSyncs <= 2) + }) + } +} diff --git a/test/e2e/generator/main.go b/test/e2e/generator/main.go index 7dd096760..38f36d0da 100644 --- a/test/e2e/generator/main.go +++ b/test/e2e/generator/main.go @@ -3,7 +3,6 @@ package main import ( "fmt" - "math" "math/rand" "os" "path/filepath" @@ -11,6 +10,7 @@ import ( "github.com/spf13/cobra" "github.com/tendermint/tendermint/libs/log" + e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) const ( @@ -26,6 +26,7 @@ func main() { // CLI is the Cobra-based command-line interface. type CLI struct { root *cobra.Command + opts Options } // NewCLI sets up the CLI. @@ -37,73 +38,54 @@ func NewCLI() *CLI { SilenceUsage: true, SilenceErrors: true, // we'll output them ourselves in Run() RunE: func(cmd *cobra.Command, args []string) error { - dir, err := cmd.Flags().GetString("dir") - if err != nil { - return err - } - groups, err := cmd.Flags().GetInt("groups") - if err != nil { - return err - } - p2pMode, err := cmd.Flags().GetString("p2p") - if err != nil { - return err - } - var opts Options - switch mode := P2PMode(p2pMode); mode { - case NewP2PMode, LegacyP2PMode, HybridP2PMode, MixedP2PMode: - opts = Options{P2P: mode} - default: - return fmt.Errorf("p2p mode must be either new, legacy, hybrid or mixed got %s", p2pMode) - } - - if groups == 0 { - opts.Sorted = true - } - - return cli.generate(dir, groups, opts) + return cli.generate() }, } - cli.root.PersistentFlags().StringP("dir", "d", "", "Output directory for manifests") + cli.root.PersistentFlags().StringVarP(&cli.opts.Directory, "dir", "d", "", "Output directory for manifests") _ = cli.root.MarkPersistentFlagRequired("dir") - cli.root.PersistentFlags().IntP("groups", "g", 0, "Number of groups") - cli.root.PersistentFlags().StringP("p2p", "p", string(MixedP2PMode), - "P2P typology to be generated [\"new\", \"legacy\", \"hybrid\" or \"mixed\" ]") + cli.root.Flags().BoolVarP(&cli.opts.Reverse, "reverse", "r", false, "Reverse sort order") + cli.root.PersistentFlags().IntVarP(&cli.opts.NumGroups, "groups", "g", 0, "Number of groups") + cli.root.PersistentFlags().IntVarP(&cli.opts.MinNetworkSize, "min-size", "", 1, + "Minimum network size (nodes)") + cli.root.PersistentFlags().IntVarP(&cli.opts.MaxNetworkSize, "max-size", "", 0, + "Maxmum network size (nodes), 0 is unlimited") return cli } // generate generates manifests in a directory. -func (cli *CLI) generate(dir string, groups int, opts Options) error { - err := os.MkdirAll(dir, 0755) +func (cli *CLI) generate() error { + err := os.MkdirAll(cli.opts.Directory, 0755) if err != nil { return err } - manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), opts) + manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), cli.opts) if err != nil { return err } - if groups <= 0 { - for i, manifest := range manifests { - err = manifest.Save(filepath.Join(dir, fmt.Sprintf("gen-%04d.toml", i))) - if err != nil { + + switch { + case cli.opts.NumGroups <= 0: + e2e.SortManifests(manifests, cli.opts.Reverse) + + if err := e2e.WriteManifests(filepath.Join(cli.opts.Directory, "gen"), manifests); err != nil { + return err + } + default: + groupManifests := e2e.SplitGroups(cli.opts.NumGroups, manifests) + + for idx, gm := range groupManifests { + e2e.SortManifests(gm, cli.opts.Reverse) + + prefix := filepath.Join(cli.opts.Directory, fmt.Sprintf("gen-group%02d", idx)) + if err := e2e.WriteManifests(prefix, gm); err != nil { return err } } - } else { - groupSize := int(math.Ceil(float64(len(manifests)) / float64(groups))) - for g := 0; g < groups; g++ { - for i := 0; i < groupSize && g*groupSize+i < len(manifests); i++ { - manifest := manifests[g*groupSize+i] - err = manifest.Save(filepath.Join(dir, fmt.Sprintf("gen-group%02d-%04d.toml", g, i))) - if err != nil { - return err - } - } - } } + return nil } diff --git a/test/e2e/generator/random.go b/test/e2e/generator/random.go index d6c84d46c..c00d56964 100644 --- a/test/e2e/generator/random.go +++ b/test/e2e/generator/random.go @@ -74,18 +74,24 @@ func (pc probSetChoice) Choose(r *rand.Rand) []string { // uniformSetChoice picks a set of strings with uniform probability, picking at least one. type uniformSetChoice []string -func (usc uniformSetChoice) Choose(r *rand.Rand) []string { +func (usc uniformSetChoice) Choose(r *rand.Rand) []string { return usc.ChooseAtLeast(r, 1) } + +func (usc uniformSetChoice) ChooseAtLeast(r *rand.Rand, num int) []string { choices := []string{} indexes := r.Perm(len(usc)) - if len(indexes) > 1 { - indexes = indexes[:1+r.Intn(len(indexes)-1)] + if num < len(indexes) { + indexes = indexes[:1+randomInRange(r, num, len(indexes)-1)] } + for _, i := range indexes { choices = append(choices, usc[i]) } + return choices } +func randomInRange(r *rand.Rand, min, max int) int { return r.Intn(max-min+1) + min } + type weightedChoice map[string]uint func (wc weightedChoice) Choose(r *rand.Rand) string { diff --git a/test/e2e/generator/random_test.go b/test/e2e/generator/random_test.go index 3fbb19ab5..48b04f2d1 100644 --- a/test/e2e/generator/random_test.go +++ b/test/e2e/generator/random_test.go @@ -1,9 +1,12 @@ package main import ( + "fmt" + "math/rand" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestCombinations(t *testing.T) { @@ -29,3 +32,28 @@ func TestCombinations(t *testing.T) { {"bool": true, "int": 3, "string": "bar"}, }, c) } + +func TestUniformSetChoice(t *testing.T) { + set := uniformSetChoice([]string{"a", "b", "c"}) + r := rand.New(rand.NewSource(2384)) + + for i := 0; i < 100; i++ { + t.Run(fmt.Sprintf("Iteration%03d", i), func(t *testing.T) { + set = append(set, t.Name()) + + t.Run("ChooseAtLeastSubset", func(t *testing.T) { + require.True(t, len(set.ChooseAtLeast(r, 1)) >= 1) + require.True(t, len(set.ChooseAtLeast(r, 2)) >= 2) + require.True(t, len(set.ChooseAtLeast(r, len(set)/2)) >= len(set)/2) + }) + t.Run("ChooseAtLeastEqualOrGreaterToLength", func(t *testing.T) { + require.Len(t, set.ChooseAtLeast(r, len(set)), len(set)) + require.Len(t, set.ChooseAtLeast(r, len(set)+1), len(set)) + require.Len(t, set.ChooseAtLeast(r, len(set)*10), len(set)) + }) + t.Run("ChooseSingle", func(t *testing.T) { + require.True(t, len(set.Choose(r)) >= 1) + }) + }) + } +} diff --git a/test/e2e/networks/ci.toml b/test/e2e/networks/ci.toml index 7e07febd5..f73a18859 100644 --- a/test/e2e/networks/ci.toml +++ b/test/e2e/networks/ci.toml @@ -1,7 +1,6 @@ # This testnet is run by CI, and attempts to cover a broad range of # functionality with a single network. -disable_legacy_p2p = false evidence = 5 initial_height = 1000 initial_state = {initial01 = "a", initial02 = "b", initial03 = "c"} @@ -35,6 +34,7 @@ perturb = ["restart"] perturb = ["disconnect"] seeds = ["seed01"] snapshot_interval = 5 +block_sync = "v0" [node.validator02] abci_protocol = "tcp" diff --git a/test/e2e/node/built-in.toml b/test/e2e/node/built-in.toml new file mode 100644 index 000000000..0a2146a58 --- /dev/null +++ b/test/e2e/node/built-in.toml @@ -0,0 +1,4 @@ +snapshot_interval = 100 +persist_interval = 1 +chain_id = "test-chain" +protocol = "builtin" diff --git a/test/e2e/app/config.go b/test/e2e/node/config.go similarity index 80% rename from test/e2e/app/config.go rename to test/e2e/node/config.go index d7e776538..fa7dcc497 100644 --- a/test/e2e/app/config.go +++ b/test/e2e/node/config.go @@ -6,6 +6,8 @@ import ( "fmt" "github.com/BurntSushi/toml" + + "github.com/tendermint/tendermint/test/e2e/app" ) // Config is the application configuration. @@ -22,10 +24,21 @@ type Config struct { PrivValServer string `toml:"privval_server"` PrivValKey string `toml:"privval_key"` PrivValState string `toml:"privval_state"` - Misbehaviors map[string]string `toml:"misbehaviors"` KeyType string `toml:"key_type"` } +// App extracts out the application specific configuration parameters +func (cfg *Config) App() *app.Config { + return &app.Config{ + Dir: cfg.Dir, + SnapshotInterval: cfg.SnapshotInterval, + RetainBlocks: cfg.RetainBlocks, + KeyType: cfg.KeyType, + ValidatorUpdates: cfg.ValidatorUpdates, + PersistInterval: cfg.PersistInterval, + } +} + // LoadConfig loads the configuration from disk. func LoadConfig(file string) (*Config, error) { cfg := &Config{ diff --git a/test/e2e/app/main.go b/test/e2e/node/main.go similarity index 86% rename from test/e2e/app/main.go rename to test/e2e/node/main.go index fd464220d..5d25b0195 100644 --- a/test/e2e/app/main.go +++ b/test/e2e/node/main.go @@ -14,6 +14,7 @@ import ( "github.com/spf13/viper" "google.golang.org/grpc" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/server" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/ed25519" @@ -28,8 +29,8 @@ import ( "github.com/tendermint/tendermint/privval" grpcprivval "github.com/tendermint/tendermint/privval/grpc" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" - "github.com/tendermint/tendermint/proxy" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" + "github.com/tendermint/tendermint/test/e2e/app" e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) @@ -37,6 +38,9 @@ var logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, fals // main is the binary entrypoint. func main() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if len(os.Args) != 2 { fmt.Printf("Usage: %v ", os.Args[0]) return @@ -46,14 +50,14 @@ func main() { configFile = os.Args[1] } - if err := run(configFile); err != nil { + if err := run(ctx, configFile); err != nil { logger.Error(err.Error()) os.Exit(1) } } // run runs the application - basically like main() with error handling. -func run(configFile string) error { +func run(ctx context.Context, configFile string) error { cfg, err := LoadConfig(configFile) if err != nil { return err @@ -61,7 +65,7 @@ func run(configFile string) error { // Start remote signer (must start before node if running builtin). if cfg.PrivValServer != "" { - if err = startSigner(cfg); err != nil { + if err = startSigner(ctx, cfg); err != nil { return err } if cfg.Protocol == "builtin" { @@ -72,15 +76,15 @@ func run(configFile string) error { // Start app server. switch cfg.Protocol { case "socket", "grpc": - err = startApp(cfg) + err = startApp(ctx, cfg) case "builtin": switch cfg.Mode { case string(e2e.ModeLight): - err = startLightNode(cfg) + err = startLightNode(ctx, cfg) case string(e2e.ModeSeed): - err = startSeedNode(cfg) + err = startSeedNode(ctx) default: - err = startNode(cfg) + err = startNode(ctx, cfg) } default: err = fmt.Errorf("invalid protocol %q", cfg.Protocol) @@ -96,16 +100,16 @@ func run(configFile string) error { } // startApp starts the application server, listening for connections from Tendermint. -func startApp(cfg *Config) error { - app, err := NewApplication(cfg) +func startApp(ctx context.Context, cfg *Config) error { + app, err := app.NewApplication(cfg.App()) if err != nil { return err } - server, err := server.NewServer(cfg.Listen, cfg.Protocol, app) + server, err := server.NewServer(logger, cfg.Listen, cfg.Protocol, app) if err != nil { return err } - err = server.Start() + err = server.Start(ctx) if err != nil { return err } @@ -117,8 +121,8 @@ func startApp(cfg *Config) error { // configuration is in $TMHOME/config/tendermint.toml. // // FIXME There is no way to simply load the configuration from a file, so we need to pull in Viper. -func startNode(cfg *Config) error { - app, err := NewApplication(cfg) +func startNode(ctx context.Context, cfg *Config) error { + app, err := app.NewApplication(cfg.App()) if err != nil { return err } @@ -128,18 +132,20 @@ func startNode(cfg *Config) error { return fmt.Errorf("failed to setup config: %w", err) } - n, err := node.New(tmcfg, + n, err := node.New( + ctx, + tmcfg, nodeLogger, - proxy.NewLocalClientCreator(app), + abciclient.NewLocalCreator(app), nil, ) if err != nil { return err } - return n.Start() + return n.Start(ctx) } -func startSeedNode(cfg *Config) error { +func startSeedNode(ctx context.Context) error { tmcfg, nodeLogger, err := setupNode() if err != nil { return fmt.Errorf("failed to setup config: %w", err) @@ -147,14 +153,14 @@ func startSeedNode(cfg *Config) error { tmcfg.Mode = config.ModeSeed - n, err := node.New(tmcfg, nodeLogger, nil, nil) + n, err := node.New(ctx, tmcfg, nodeLogger, nil, nil) if err != nil { return err } - return n.Start() + return n.Start(ctx) } -func startLightNode(cfg *Config) error { +func startLightNode(ctx context.Context, cfg *Config) error { tmcfg, nodeLogger, err := setupNode() if err != nil { return err @@ -203,7 +209,7 @@ func startLightNode(cfg *Config) error { } logger.Info("Starting proxy...", "laddr", tmcfg.RPC.ListenAddress) - if err := p.ListenAndServe(); err != http.ErrServerClosed { + if err := p.ListenAndServe(ctx); err != http.ErrServerClosed { // Error starting or closing listener: logger.Error("proxy ListenAndServe", "err", err) } @@ -212,7 +218,7 @@ func startLightNode(cfg *Config) error { } // startSigner starts a signer server connecting to the given endpoint. -func startSigner(cfg *Config) error { +func startSigner(ctx context.Context, cfg *Config) error { filePV, err := privval.LoadFilePV(cfg.PrivValKey, cfg.PrivValState) if err != nil { return err @@ -250,7 +256,8 @@ func startSigner(cfg *Config) error { endpoint := privval.NewSignerDialerEndpoint(logger, dialFn, privval.SignerDialerEndpointRetryWaitInterval(1*time.Second), privval.SignerDialerEndpointConnRetries(100)) - err = privval.NewSignerServer(endpoint, cfg.ChainID, filePV).Start() + + err = privval.NewSignerServer(endpoint, cfg.ChainID, filePV).Start(ctx) if err != nil { return err } diff --git a/test/e2e/node/socket.toml b/test/e2e/node/socket.toml new file mode 100644 index 000000000..2f7913e62 --- /dev/null +++ b/test/e2e/node/socket.toml @@ -0,0 +1,5 @@ +snapshot_interval = 100 +persist_interval = 1 +chain_id = "test-chain" +protocol = "socket" +listen = "tcp://127.0.0.1:26658" diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index 2a8f73127..b57b9ac4a 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -65,6 +65,12 @@ type Manifest struct { // Number of bytes per tx. Default is 1kb (1024) TxSize int64 + + // ABCIProtocol specifies the protocol used to communicate with the ABCI + // application: "unix", "tcp", "grpc", or "builtin". Defaults to builtin. + // builtin will build a complete Tendermint node into the application and + // launch it instead of launching a separate Tendermint process. + ABCIProtocol string `toml:"abci_protocol"` } // ManifestNode represents a node in a testnet manifest. @@ -87,12 +93,6 @@ type ManifestNode struct { // "rocksdb", "boltdb", or "badgerdb". Defaults to goleveldb. Database string `toml:"database"` - // ABCIProtocol specifies the protocol used to communicate with the ABCI - // application: "unix", "tcp", "grpc", or "builtin". Defaults to unix. - // builtin will build a complete Tendermint node into the application and - // launch it instead of launching a separate Tendermint process. - ABCIProtocol string `toml:"abci_protocol"` - // PrivvalProtocol specifies the protocol used to sign consensus messages: // "file", "unix", "tcp", or "grpc". Defaults to "file". For tcp and unix, the ABCI // application will launch a remote signer client in a separate goroutine. @@ -104,10 +104,6 @@ type ManifestNode struct { // runner will wait for the network to reach at least this block height. StartAt int64 `toml:"start_at"` - // BlockSync specifies the block sync mode: "" (disable), "v0" or "v2". - // Defaults to disabled. - BlockSync string `toml:"block_sync"` - // Mempool specifies which version of mempool to use. Either "v0" or "v1" Mempool string `toml:"mempool_version"` @@ -145,9 +141,11 @@ type ManifestNode struct { // This is helpful when debugging a specific problem. This overrides the network // level. LogLevel string `toml:"log_level"` +} - // UseLegacyP2P enables use of the legacy p2p layer for this node. - UseLegacyP2P bool `toml:"use_legacy_p2p"` +// Stateless reports whether m is a node that does not own state, including light and seed nodes. +func (m ManifestNode) Stateless() bool { + return m.Mode == string(ModeLight) || m.Mode == string(ModeSeed) } // Save saves the testnet manifest to a file. @@ -170,41 +168,91 @@ func LoadManifest(file string) (Manifest, error) { } // SortManifests orders (in-place) a list of manifests such that the -// manifests will be ordered (vaguely) from least complex to most -// complex. -func SortManifests(manifests []Manifest) { +// manifests will be ordered in terms of complexity (or expected +// runtime). Complexity is determined first by the number of nodes, +// and then by the total number of perturbations in the network. +// +// If reverse is true, then the manifests are ordered with the most +// complex networks before the less complex networks. +func SortManifests(manifests []Manifest, reverse bool) { sort.SliceStable(manifests, func(i, j int) bool { - left, right := manifests[i], manifests[j] - - if len(left.Nodes) < len(right.Nodes) { - return true - } - - if left.InitialHeight < right.InitialHeight { - return true - } - - if left.TxSize < right.TxSize { - return true - } - - if left.Evidence < right.Evidence { - return true - } - + // sort based on a point-based comparison between two + // manifests. var ( - leftPerturb int - rightPerturb int + left = manifests[i] + right = manifests[j] ) + // scores start with 100 points for each node. The + // number of nodes in a network is the most important + // factor in the complexity of the test. + leftScore := len(left.Nodes) * 100 + rightScore := len(right.Nodes) * 100 + + // add two points for every node perturbation, and one + // point for every node that starts after genesis. for _, n := range left.Nodes { - leftPerturb += len(n.Perturb) + leftScore += (len(n.Perturb) * 2) + + if n.StartAt > 0 { + leftScore += 3 + } } for _, n := range right.Nodes { - rightPerturb += len(n.Perturb) + rightScore += (len(n.Perturb) * 2) + if n.StartAt > 0 { + rightScore += 3 + } } - return leftPerturb < rightPerturb + // add one point if the network has evidence. + if left.Evidence > 0 { + leftScore += 2 + } + if right.Evidence > 0 { + rightScore += 2 + } + + if left.TxSize > right.TxSize { + leftScore++ + } + + if right.TxSize > left.TxSize { + rightScore++ + } + + if reverse { + return leftScore >= rightScore + } + + return leftScore < rightScore }) } + +// SplitGroups divides a list of manifests into n groups of +// manifests. +func SplitGroups(groups int, manifests []Manifest) [][]Manifest { + groupSize := (len(manifests) + groups - 1) / groups + splitManifests := make([][]Manifest, 0, groups) + + for i := 0; i < len(manifests); i += groupSize { + grp := make([]Manifest, groupSize) + n := copy(grp, manifests[i:]) + splitManifests = append(splitManifests, grp[:n]) + } + + return splitManifests +} + +// WriteManifests writes a collection of manifests into files with the +// specified path prefix. +func WriteManifests(prefix string, manifests []Manifest) error { + for i, manifest := range manifests { + if err := manifest.Save(fmt.Sprintf("%s-%04d.toml", prefix, i)); err != nil { + return err + } + } + + return nil +} diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index b54dd2bf0..3fd9e77a2 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -71,6 +71,7 @@ type Testnet struct { Evidence int LogLevel string TxSize int64 + ABCIProtocol string } // Node represents a Tendermint node in a testnet. @@ -83,7 +84,6 @@ type Node struct { IP net.IP ProxyPort uint32 StartAt int64 - BlockSync string Mempool string StateSync string Database string @@ -96,7 +96,6 @@ type Node struct { PersistentPeers []*Node Perturbations []Perturbation LogLevel string - UseLegacyP2P bool QueueType string HasStarted bool } @@ -141,6 +140,7 @@ func LoadTestnet(file string) (*Testnet, error) { KeyType: "ed25519", LogLevel: manifest.LogLevel, TxSize: manifest.TxSize, + ABCIProtocol: manifest.ABCIProtocol, } if len(manifest.KeyType) != 0 { testnet.KeyType = manifest.KeyType @@ -151,6 +151,9 @@ func LoadTestnet(file string) (*Testnet, error) { if manifest.InitialHeight > 0 { testnet.InitialHeight = manifest.InitialHeight } + if testnet.ABCIProtocol == "" { + testnet.ABCIProtocol = string(ProtocolBuiltin) + } // Set up nodes, in alphabetical order (IPs and ports get same order). nodeNames := []string{} @@ -170,10 +173,9 @@ func LoadTestnet(file string) (*Testnet, error) { ProxyPort: proxyPortGen.Next(), Mode: ModeValidator, Database: "goleveldb", - ABCIProtocol: ProtocolBuiltin, + ABCIProtocol: Protocol(testnet.ABCIProtocol), PrivvalProtocol: ProtocolFile, StartAt: nodeManifest.StartAt, - BlockSync: nodeManifest.BlockSync, Mempool: nodeManifest.Mempool, StateSync: nodeManifest.StateSync, PersistInterval: 1, @@ -182,21 +184,19 @@ func LoadTestnet(file string) (*Testnet, error) { Perturbations: []Perturbation{}, LogLevel: manifest.LogLevel, QueueType: manifest.QueueType, - UseLegacyP2P: nodeManifest.UseLegacyP2P, } - if node.StartAt == testnet.InitialHeight { node.StartAt = 0 // normalize to 0 for initial nodes, since code expects this } if nodeManifest.Mode != "" { node.Mode = Mode(nodeManifest.Mode) } + if node.Mode == ModeLight { + node.ABCIProtocol = ProtocolBuiltin + } if nodeManifest.Database != "" { node.Database = nodeManifest.Database } - if nodeManifest.ABCIProtocol != "" { - node.ABCIProtocol = Protocol(nodeManifest.ABCIProtocol) - } if nodeManifest.PrivvalProtocol != "" { node.PrivvalProtocol = Protocol(nodeManifest.PrivvalProtocol) } @@ -333,11 +333,6 @@ func (n Node) Validate(testnet Testnet) error { } } } - switch n.BlockSync { - case "", "v0", "v2": - default: - return fmt.Errorf("invalid block sync setting %q", n.BlockSync) - } switch n.StateSync { case StateSyncDisabled, StateSyncP2P, StateSyncRPC: default: @@ -349,7 +344,7 @@ func (n Node) Validate(testnet Testnet) error { return fmt.Errorf("invalid mempool version %q", n.Mempool) } switch n.QueueType { - case "", "priority", "wdrr", "fifo": + case "", "priority", "fifo": default: return fmt.Errorf("unsupported p2p queue type: %s", n.QueueType) } diff --git a/test/e2e/runner/evidence.go b/test/e2e/runner/evidence.go index c907b4926..192bc1555 100644 --- a/test/e2e/runner/evidence.go +++ b/test/e2e/runner/evidence.go @@ -5,8 +5,8 @@ import ( "context" "errors" "fmt" - "io/ioutil" "math/rand" + "os" "path/filepath" "time" @@ -28,19 +28,15 @@ const lightClientEvidenceRatio = 4 // evidence and broadcasts it to a random node through the rpc endpoint `/broadcast_evidence`. // Evidence is random and can be a mixture of LightClientAttackEvidence and // DuplicateVoteEvidence. -func InjectEvidence(ctx context.Context, testnet *e2e.Testnet, amount int) error { +func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amount int) error { // select a random node var targetNode *e2e.Node - for _, idx := range rand.Perm(len(testnet.Nodes)) { - targetNode = testnet.Nodes[idx] - - if targetNode.Mode == e2e.ModeSeed { - targetNode = nil - continue + for _, idx := range r.Perm(len(testnet.Nodes)) { + if !testnet.Nodes[idx].Stateless() { + targetNode = testnet.Nodes[idx] + break } - - break } if targetNode == nil { @@ -55,15 +51,14 @@ func InjectEvidence(ctx context.Context, testnet *e2e.Testnet, amount int) error } // request the latest block and validator set from the node - blockRes, err := client.Block(context.Background(), nil) + blockRes, err := client.Block(ctx, nil) if err != nil { return err } - evidenceHeight := blockRes.Block.Height - waitHeight := blockRes.Block.Height + 3 + evidenceHeight := blockRes.Block.Height - 3 nValidators := 100 - valRes, err := client.Validators(context.Background(), &evidenceHeight, nil, &nValidators) + valRes, err := client.Validators(ctx, &evidenceHeight, nil, &nValidators) if err != nil { return err } @@ -79,12 +74,8 @@ func InjectEvidence(ctx context.Context, testnet *e2e.Testnet, amount int) error return err } - wctx, cancel := context.WithTimeout(ctx, time.Minute) - defer cancel() - - // wait for the node to reach the height above the forged height so that - // it is able to validate the evidence - _, err = waitForNode(wctx, targetNode, waitHeight) + // request the latest block and validator set from the node + blockRes, err = client.Block(ctx, &evidenceHeight) if err != nil { return err } @@ -104,24 +95,28 @@ func InjectEvidence(ctx context.Context, testnet *e2e.Testnet, amount int) error return err } - _, err := client.BroadcastEvidence(context.Background(), ev) + _, err := client.BroadcastEvidence(ctx, ev) if err != nil { return err } } - wctx, cancel = context.WithTimeout(ctx, 30*time.Second) + logger.Info("Finished sending evidence", + "node", testnet.Name, + "amount", amount, + "height", evidenceHeight, + ) + + wctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() - // wait for the node to reach the height above the forged height so that - // it is able to validate the evidence - _, err = waitForNode(wctx, targetNode, blockRes.Block.Height+2) + // wait for the node to make progress after submitting + // evidence (3 (forged height) + 1 (progress)) + _, err = waitForNode(wctx, targetNode, evidenceHeight+4) if err != nil { return err } - logger.Info(fmt.Sprintf("Finished sending evidence (height %d)", blockRes.Block.Height+2)) - return nil } @@ -237,7 +232,7 @@ func getRandomValidatorIndex(privVals []types.MockPV, vals *types.ValidatorSet) } func readPrivKey(keyFilePath string) (crypto.PrivKey, error) { - keyJSONBytes, err := ioutil.ReadFile(keyFilePath) + keyJSONBytes, err := os.ReadFile(keyFilePath) if err != nil { return nil, err } diff --git a/test/e2e/runner/load.go b/test/e2e/runner/load.go index 23050c636..f31b436dd 100644 --- a/test/e2e/runner/load.go +++ b/test/e2e/runner/load.go @@ -3,7 +3,6 @@ package main import ( "container/ring" "context" - "errors" "fmt" "math/rand" "time" @@ -15,15 +14,15 @@ import ( // Load generates transactions against the network until the given context is // canceled. -func Load(ctx context.Context, testnet *e2e.Testnet) error { +func Load(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet) error { // Since transactions are executed across all nodes in the network, we need // to reduce transaction load for larger networks to avoid using too much // CPU. This gives high-throughput small networks and low-throughput large ones. // This also limits the number of TCP connections, since each worker has // a connection to all nodes. - concurrency := 64 / len(testnet.Nodes) - if concurrency == 0 { - concurrency = 1 + concurrency := len(testnet.Nodes) * 2 + if concurrency > 32 { + concurrency = 32 } chTx := make(chan types.Tx) @@ -32,10 +31,14 @@ func Load(ctx context.Context, testnet *e2e.Testnet) error { defer cancel() // Spawn job generator and processors. - logger.Info(fmt.Sprintf("Starting transaction load (%v workers)...", concurrency)) + logger.Info("starting transaction load", + "workers", concurrency, + "nodes", len(testnet.Nodes), + "tx", testnet.TxSize) + started := time.Now() - go loadGenerate(ctx, chTx, testnet.TxSize) + go loadGenerate(ctx, r, chTx, testnet.TxSize, len(testnet.Nodes)) for w := 0; w < concurrency; w++ { go loadProcess(ctx, testnet, chTx, chSuccess) @@ -54,19 +57,9 @@ func Load(ctx context.Context, testnet *e2e.Testnet) error { case numSeen := <-chSuccess: success += numSeen case <-ctx.Done(): - // if we couldn't submit any transactions, - // that's probably a problem and the test - // should error; however, for very short tests - // we shouldn't abort. - // - // The 2s cut off, is a rough guess based on - // the expected value of - // loadGenerateWaitTime. If the implementation - // of that function changes, then this might - // also need to change without more - // refactoring. - if success == 0 && time.Since(started) > 2*time.Second { - return errors.New("failed to submit any transactions") + if success == 0 { + return fmt.Errorf("failed to submit transactions in %s by %d workers", + time.Since(started), concurrency) } // TODO perhaps allow test networks to @@ -78,8 +71,8 @@ func Load(ctx context.Context, testnet *e2e.Testnet) error { logger.Info("ending transaction load", "dur_secs", time.Since(started).Seconds(), "txns", success, - "rate", rate, - "slow", rate < 1) + "workers", concurrency, + "rate", rate) return nil } @@ -92,7 +85,7 @@ func Load(ctx context.Context, testnet *e2e.Testnet) error { // generation is primarily the result of backpressure from the // broadcast transaction, though there is still some timer-based // limiting. -func loadGenerate(ctx context.Context, chTx chan<- types.Tx, size int64) { +func loadGenerate(ctx context.Context, r *rand.Rand, chTx chan<- types.Tx, txSize int64, networkSize int) { timer := time.NewTimer(0) defer timer.Stop() defer close(chTx) @@ -108,8 +101,8 @@ func loadGenerate(ctx context.Context, chTx chan<- types.Tx, size int64) { // This gives a reasonable load without putting too much data in the app. id := rand.Int63() % 100 // nolint: gosec - bz := make([]byte, size) - _, err := rand.Read(bz) // nolint: gosec + bz := make([]byte, txSize) + _, err := r.Read(bz) if err != nil { panic(fmt.Sprintf("Failed to read random bytes: %v", err)) } @@ -121,22 +114,22 @@ func loadGenerate(ctx context.Context, chTx chan<- types.Tx, size int64) { case chTx <- tx: // sleep for a bit before sending the // next transaction. - timer.Reset(loadGenerateWaitTime(size)) + timer.Reset(loadGenerateWaitTime(r, networkSize)) } } } -func loadGenerateWaitTime(size int64) time.Duration { +func loadGenerateWaitTime(r *rand.Rand, size int) time.Duration { const ( - min = int64(100 * time.Millisecond) + min = int64(250 * time.Millisecond) max = int64(time.Second) ) var ( - baseJitter = rand.Int63n(max-min+1) + min // nolint: gosec - sizeFactor = size * int64(time.Millisecond) - sizeJitter = rand.Int63n(sizeFactor-min+1) + min // nolint: gosec + baseJitter = r.Int63n(max-min+1) + min + sizeFactor = int64(size) * min + sizeJitter = r.Int63n(sizeFactor-min+1) + min ) return time.Duration(baseJitter + sizeJitter) diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go index f65b6d0b1..fb6ce4a8c 100644 --- a/test/e2e/runner/main.go +++ b/test/e2e/runner/main.go @@ -3,8 +3,10 @@ package main import ( "context" "fmt" + "math/rand" "os" "strconv" + "time" "github.com/spf13/cobra" @@ -12,9 +14,9 @@ import ( e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) -var ( - logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) -) +const randomSeed = 2308084734268 + +var logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) func main() { NewCLI().Run() @@ -48,14 +50,26 @@ func NewCLI() *CLI { cli.testnet = testnet return nil }, - RunE: func(cmd *cobra.Command, args []string) error { - if err := Cleanup(cli.testnet); err != nil { + RunE: func(cmd *cobra.Command, args []string) (err error) { + if err = Cleanup(cli.testnet); err != nil { return err } - if err := Setup(cli.testnet); err != nil { + defer func() { + if cli.preserve { + logger.Info("Preserving testnet contents because -preserve=true") + } else if err != nil { + logger.Info("Preserving testnet that encountered error", + "err", err) + } else if err := Cleanup(cli.testnet); err != nil { + logger.Error("Error cleaning up testnet contents", "err", err) + } + }() + if err = Setup(cli.testnet); err != nil { return err } + r := rand.New(rand.NewSource(randomSeed)) // nolint: gosec + chLoadResult := make(chan error) ctx, cancel := context.WithCancel(cmd.Context()) defer cancel() @@ -63,51 +77,59 @@ func NewCLI() *CLI { lctx, loadCancel := context.WithCancel(ctx) defer loadCancel() go func() { - chLoadResult <- Load(lctx, cli.testnet) + chLoadResult <- Load(lctx, r, cli.testnet) }() - - if err := Start(ctx, cli.testnet); err != nil { + startAt := time.Now() + if err = Start(ctx, cli.testnet); err != nil { return err } - if err := Wait(ctx, cli.testnet, 5); err != nil { // allow some txs to go through + if err = Wait(ctx, cli.testnet, 5); err != nil { // allow some txs to go through return err } if cli.testnet.HasPerturbations() { - if err := Perturb(ctx, cli.testnet); err != nil { + if err = Perturb(ctx, cli.testnet); err != nil { return err } - if err := Wait(ctx, cli.testnet, 5); err != nil { // allow some txs to go through + if err = Wait(ctx, cli.testnet, 5); err != nil { // allow some txs to go through return err } } if cli.testnet.Evidence > 0 { - if err := InjectEvidence(ctx, cli.testnet, cli.testnet.Evidence); err != nil { + if err = InjectEvidence(ctx, r, cli.testnet, cli.testnet.Evidence); err != nil { return err } - if err := Wait(ctx, cli.testnet, 5); err != nil { // ensure chain progress + if err = Wait(ctx, cli.testnet, 5); err != nil { // ensure chain progress return err } } + // to help make sure that we don't run into + // situations where 0 transactions have + // happened on quick cases, we make sure that + // it's been at least 10s before canceling the + // load generator. + // + // TODO allow the load generator to report + // successful transactions to avoid needing + // this sleep. + if rest := time.Since(startAt); rest < 15*time.Second { + time.Sleep(15*time.Second - rest) + } + loadCancel() - if err := <-chLoadResult; err != nil { + if err = <-chLoadResult; err != nil { return fmt.Errorf("transaction load failed: %w", err) } - if err := Wait(ctx, cli.testnet, 5); err != nil { // wait for network to settle before tests + if err = Wait(ctx, cli.testnet, 5); err != nil { // wait for network to settle before tests return err } if err := Test(cli.testnet); err != nil { return err } - if !cli.preserve { - if err := Cleanup(cli.testnet); err != nil { - return err - } - } return nil }, } @@ -193,7 +215,11 @@ func NewCLI() *CLI { Use: "load", Short: "Generates transaction load until the command is canceled", RunE: func(cmd *cobra.Command, args []string) (err error) { - return Load(context.Background(), cli.testnet) + return Load( + cmd.Context(), + rand.New(rand.NewSource(randomSeed)), // nolint: gosec + cli.testnet, + ) }, }) @@ -211,7 +237,12 @@ func NewCLI() *CLI { } } - return InjectEvidence(cmd.Context(), cli.testnet, amount) + return InjectEvidence( + cmd.Context(), + rand.New(rand.NewSource(randomSeed)), // nolint: gosec + cli.testnet, + amount, + ) }, }) @@ -269,6 +300,12 @@ Does not run any perbutations. if err := Cleanup(cli.testnet); err != nil { return err } + defer func() { + if err := Cleanup(cli.testnet); err != nil { + logger.Error("Error cleaning up testnet contents", "err", err) + } + }() + if err := Setup(cli.testnet); err != nil { return err } @@ -277,10 +314,12 @@ Does not run any perbutations. ctx, cancel := context.WithCancel(cmd.Context()) defer cancel() + r := rand.New(rand.NewSource(randomSeed)) // nolint: gosec + lctx, loadCancel := context.WithCancel(ctx) defer loadCancel() go func() { - err := Load(lctx, cli.testnet) + err := Load(lctx, r, cli.testnet) chLoadResult <- err }() @@ -302,10 +341,6 @@ Does not run any perbutations. return err } - if err := Cleanup(cli.testnet); err != nil { - return err - } - return nil }, }) diff --git a/test/e2e/runner/perturb.go b/test/e2e/runner/perturb.go index 900f75d73..ccb3f6c51 100644 --- a/test/e2e/runner/perturb.go +++ b/test/e2e/runner/perturb.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - rpctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/coretypes" e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) diff --git a/test/e2e/runner/rpc.go b/test/e2e/runner/rpc.go index ca6b743eb..f6a32b114 100644 --- a/test/e2e/runner/rpc.go +++ b/test/e2e/runner/rpc.go @@ -7,7 +7,7 @@ import ( "time" rpchttp "github.com/tendermint/tendermint/rpc/client/http" - rpctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/coretypes" e2e "github.com/tendermint/tendermint/test/e2e/pkg" "github.com/tendermint/tendermint/types" ) @@ -70,9 +70,7 @@ func waitForHeight(ctx context.Context, testnet *e2e.Testnet, height int64) (*ty clients[node.Name] = client } - wctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - result, err := client.Status(wctx) + result, err := client.Status(ctx) if err != nil { continue } @@ -171,18 +169,18 @@ func waitForNode(ctx context.Context, node *e2e.Node, height int64) (*rpctypes.R return nil, err case err == nil && status.SyncInfo.LatestBlockHeight >= height: return status, nil - case counter%100 == 0: + case counter%500 == 0: switch { case err != nil: lastFailed = true logger.Error("node not yet ready", "iter", counter, "node", node.Name, - "err", err, "target", height, + "err", err, ) case status != nil: - logger.Error("node not yet ready", + logger.Info("node not yet ready", "iter", counter, "node", node.Name, "height", status.SyncInfo.LatestBlockHeight, diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 3af7a9944..cb9ddbc6d 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -7,7 +7,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "os" "path/filepath" "regexp" @@ -51,7 +50,7 @@ func Setup(testnet *e2e.Testnet) error { if err != nil { return err } - err = ioutil.WriteFile(filepath.Join(testnet.Dir, "docker-compose.yml"), compose, 0644) + err = os.WriteFile(filepath.Join(testnet.Dir, "docker-compose.yml"), compose, 0644) if err != nil { return err } @@ -84,13 +83,15 @@ func Setup(testnet *e2e.Testnet) error { if err != nil { return err } - config.WriteConfigFile(nodeDir, cfg) // panics + if err := config.WriteConfigFile(nodeDir, cfg); err != nil { + return err + } appCfg, err := MakeAppConfig(node) if err != nil { return err } - err = ioutil.WriteFile(filepath.Join(nodeDir, "config", "app.toml"), appCfg, 0644) + err = os.WriteFile(filepath.Join(nodeDir, "config", "app.toml"), appCfg, 0644) if err != nil { return err } @@ -237,8 +238,6 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.RPC.ListenAddress = "tcp://0.0.0.0:26657" cfg.RPC.PprofListenAddress = ":6060" cfg.P2P.ExternalAddress = fmt.Sprintf("tcp://%v", node.AddressP2P(false)) - cfg.P2P.AddrBookStrict = false - cfg.P2P.UseLegacy = node.UseLegacyP2P cfg.P2P.QueueType = node.QueueType cfg.DBBackend = node.Database cfg.StateSync.DiscoveryTime = 5 * time.Second @@ -292,16 +291,6 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { return nil, fmt.Errorf("unexpected mode %q", node.Mode) } - if node.Mempool != "" { - cfg.Mempool.Version = node.Mempool - } - - if node.BlockSync == "" { - cfg.BlockSync.Enable = false - } else { - cfg.BlockSync.Version = node.BlockSync - } - switch node.StateSync { case e2e.StateSyncP2P: cfg.StateSync.Enable = true @@ -355,7 +344,6 @@ func MakeAppConfig(node *e2e.Node) ([]byte, error) { "snapshot_interval": node.SnapshotInterval, "retain_blocks": node.RetainBlocks, "key_type": node.PrivvalKey.Type(), - "use_legacy_p2p": node.UseLegacyP2P, } switch node.ABCIProtocol { case e2e.ProtocolUNIX: @@ -417,11 +405,11 @@ func UpdateConfigStateSync(node *e2e.Node, height int64, hash []byte) error { // FIXME Apparently there's no function to simply load a config file without // involving the entire Viper apparatus, so we'll just resort to regexps. - bz, err := ioutil.ReadFile(cfgPath) + bz, err := os.ReadFile(cfgPath) if err != nil { return err } bz = regexp.MustCompile(`(?m)^trust-height =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust-height = %v`, height))) bz = regexp.MustCompile(`(?m)^trust-hash =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust-hash = "%X"`, hash))) - return ioutil.WriteFile(cfgPath, bz, 0644) + return os.WriteFile(cfgPath, bz, 0644) } diff --git a/test/e2e/tests/app_test.go b/test/e2e/tests/app_test.go index 9a2b289b4..ab6f9739e 100644 --- a/test/e2e/tests/app_test.go +++ b/test/e2e/tests/app_test.go @@ -44,15 +44,19 @@ func TestApp_Hash(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, info.Response.LastBlockAppHash, "expected app to return app hash") - block, err := client.Block(ctx, nil) - require.NoError(t, err) - require.EqualValues(t, info.Response.LastBlockAppHash, block.Block.AppHash.Bytes(), - "app hash does not match last block's app hash") - status, err := client.Status(ctx) require.NoError(t, err) - require.EqualValues(t, info.Response.LastBlockAppHash, status.SyncInfo.LatestAppHash, - "app hash does not match node status") + + block, err := client.Block(ctx, &info.Response.LastBlockHeight) + require.NoError(t, err) + + if info.Response.LastBlockHeight == block.Block.Height { + require.EqualValues(t, info.Response.LastBlockAppHash, block.Block.AppHash.Bytes(), + "app hash does not match last block's app hash") + } + + require.True(t, status.SyncInfo.LatestBlockHeight >= info.Response.LastBlockHeight, + "status out of sync with application") }) } diff --git a/test/e2e/tests/e2e_test.go b/test/e2e/tests/e2e_test.go index acc3ac78a..a645fd7c1 100644 --- a/test/e2e/tests/e2e_test.go +++ b/test/e2e/tests/e2e_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" rpchttp "github.com/tendermint/tendermint/rpc/client/http" - rpctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/coretypes" e2e "github.com/tendermint/tendermint/test/e2e/pkg" "github.com/tendermint/tendermint/types" ) diff --git a/test/e2e/tests/validator_test.go b/test/e2e/tests/validator_test.go index 6e836ff78..8292e86ee 100644 --- a/test/e2e/tests/validator_test.go +++ b/test/e2e/tests/validator_test.go @@ -20,6 +20,14 @@ func TestValidator_Sets(t *testing.T) { require.NoError(t, err) first := status.SyncInfo.EarliestBlockHeight + + // for nodes that have to catch up, we should only + // check the validator sets for nodes after this + // point, to avoid inconsistencies with backfill. + if node.StartAt > first { + first = node.StartAt + } + last := status.SyncInfo.LatestBlockHeight // skip first block if node is pruning blocks, to avoid race conditions diff --git a/test/fuzz/Makefile b/test/fuzz/Makefile index 3d34e0a43..3bf4486b8 100644 --- a/test/fuzz/Makefile +++ b/test/fuzz/Makefile @@ -1,38 +1,15 @@ #!/usr/bin/make -f -.PHONY: fuzz-mempool-v1 -fuzz-mempool-v1: - cd mempool/v1 && \ +.PHONY: fuzz-mempool +fuzz-mempool: + cd mempool && \ rm -f *-fuzz.zip && \ go-fuzz-build && \ go-fuzz -.PHONY: fuzz-mempool-v0 -fuzz-mempool-v0: - cd mempool/v0 && \ - rm -f *-fuzz.zip && \ - go-fuzz-build && \ - go-fuzz - -.PHONY: fuzz-p2p-addrbook -fuzz-p2p-addrbook: - cd p2p/addrbook && \ - rm -f *-fuzz.zip && \ - go run ./init-corpus/main.go && \ - go-fuzz-build && \ - go-fuzz - -.PHONY: fuzz-p2p-pex -fuzz-p2p-pex: - cd p2p/pex && \ - rm -f *-fuzz.zip && \ - go run ./init-corpus/main.go && \ - go-fuzz-build && \ - go-fuzz - .PHONY: fuzz-p2p-sc fuzz-p2p-sc: - cd p2p/secret_connection && \ + cd p2p/secretconnection && \ rm -f *-fuzz.zip && \ go run ./init-corpus/main.go && \ go-fuzz-build && \ diff --git a/test/fuzz/mempool/checktx.go b/test/fuzz/mempool/checktx.go new file mode 100644 index 000000000..ba60d72cc --- /dev/null +++ b/test/fuzz/mempool/checktx.go @@ -0,0 +1,49 @@ +package mempool + +import ( + "context" + + abciclient "github.com/tendermint/tendermint/abci/client" + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/libs/log" +) + +var mp *mempool.TxMempool +var getMp func() mempool.Mempool + +func init() { + app := kvstore.NewApplication() + cc := abciclient.NewLocalCreator(app) + appConnMem, _ := cc(log.NewNopLogger()) + err := appConnMem.Start(context.TODO()) + if err != nil { + panic(err) + } + + cfg := config.DefaultMempoolConfig() + cfg.Broadcast = false + + getMp = func() mempool.Mempool { + if mp == nil { + mp = mempool.NewTxMempool( + log.NewNopLogger(), + cfg, + appConnMem, + 0, + ) + + } + return mp + } +} + +func Fuzz(data []byte) int { + err := getMp().CheckTx(context.Background(), data, nil, mempool.TxInfo{}) + if err != nil { + return 0 + } + + return 1 +} diff --git a/test/fuzz/mempool/v0/fuzz_test.go b/test/fuzz/mempool/fuzz_test.go similarity index 78% rename from test/fuzz/mempool/v0/fuzz_test.go rename to test/fuzz/mempool/fuzz_test.go index 4f8f1e9c8..8af0326dd 100644 --- a/test/fuzz/mempool/v0/fuzz_test.go +++ b/test/fuzz/mempool/fuzz_test.go @@ -1,13 +1,13 @@ -package v0_test +package mempool_test import ( - "io/ioutil" + "io" "os" "path/filepath" "testing" "github.com/stretchr/testify/require" - mempoolv0 "github.com/tendermint/tendermint/test/fuzz/mempool/v0" + mempool "github.com/tendermint/tendermint/test/fuzz/mempool" ) const testdataCasesDir = "testdata/cases" @@ -25,9 +25,9 @@ func TestMempoolTestdataCases(t *testing.T) { }() f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) require.NoError(t, err) - input, err := ioutil.ReadAll(f) + input, err := io.ReadAll(f) require.NoError(t, err) - mempoolv0.Fuzz(input) + mempool.Fuzz(input) }) } } diff --git a/test/fuzz/mempool/v0/testdata/cases/empty b/test/fuzz/mempool/testdata/cases/empty similarity index 100% rename from test/fuzz/mempool/v0/testdata/cases/empty rename to test/fuzz/mempool/testdata/cases/empty diff --git a/test/fuzz/mempool/v0/checktx.go b/test/fuzz/mempool/v0/checktx.go deleted file mode 100644 index a90ec2290..000000000 --- a/test/fuzz/mempool/v0/checktx.go +++ /dev/null @@ -1,37 +0,0 @@ -package v0 - -import ( - "context" - - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/mempool" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" - "github.com/tendermint/tendermint/proxy" -) - -var mp mempool.Mempool - -func init() { - app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - appConnMem, _ := cc.NewABCIClient() - err := appConnMem.Start() - if err != nil { - panic(err) - } - - cfg := config.DefaultMempoolConfig() - cfg.Broadcast = false - - mp = mempoolv0.NewCListMempool(cfg, appConnMem, 0) -} - -func Fuzz(data []byte) int { - err := mp.CheckTx(context.Background(), data, nil, mempool.TxInfo{}) - if err != nil { - return 0 - } - - return 1 -} diff --git a/test/fuzz/mempool/v1/checktx.go b/test/fuzz/mempool/v1/checktx.go deleted file mode 100644 index 6194f3bcb..000000000 --- a/test/fuzz/mempool/v1/checktx.go +++ /dev/null @@ -1,37 +0,0 @@ -package v1 - -import ( - "context" - - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/mempool" - mempoolv1 "github.com/tendermint/tendermint/internal/mempool/v0" - "github.com/tendermint/tendermint/proxy" -) - -var mp mempool.Mempool - -func init() { - app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - appConnMem, _ := cc.NewABCIClient() - err := appConnMem.Start() - if err != nil { - panic(err) - } - - cfg := config.DefaultMempoolConfig() - cfg.Broadcast = false - - mp = mempoolv1.NewCListMempool(cfg, appConnMem, 0) -} - -func Fuzz(data []byte) int { - err := mp.CheckTx(context.Background(), data, nil, mempool.TxInfo{}) - if err != nil { - return 0 - } - - return 1 -} diff --git a/test/fuzz/mempool/v1/fuzz_test.go b/test/fuzz/mempool/v1/fuzz_test.go deleted file mode 100644 index 863697a0a..000000000 --- a/test/fuzz/mempool/v1/fuzz_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package v1_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" - mempoolv1 "github.com/tendermint/tendermint/test/fuzz/mempool/v1" -) - -const testdataCasesDir = "testdata/cases" - -func TestMempoolTestdataCases(t *testing.T) { - entries, err := os.ReadDir(testdataCasesDir) - require.NoError(t, err) - - for _, e := range entries { - entry := e - t.Run(entry.Name(), func(t *testing.T) { - defer func() { - r := recover() - require.Nilf(t, r, "testdata/cases test panic") - }() - f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) - require.NoError(t, err) - input, err := ioutil.ReadAll(f) - require.NoError(t, err) - mempoolv1.Fuzz(input) - }) - } -} diff --git a/test/fuzz/mempool/v1/testdata/cases/empty b/test/fuzz/mempool/v1/testdata/cases/empty deleted file mode 100644 index e69de29bb..000000000 diff --git a/test/fuzz/p2p/addrbook/fuzz.go b/test/fuzz/p2p/addrbook/fuzz.go deleted file mode 100644 index 6d5548fc7..000000000 --- a/test/fuzz/p2p/addrbook/fuzz.go +++ /dev/null @@ -1,35 +0,0 @@ -// nolint: gosec -package addrbook - -import ( - "encoding/json" - "fmt" - "math/rand" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/pex" -) - -var addrBook = pex.NewAddrBook("./testdata/addrbook.json", true) - -func Fuzz(data []byte) int { - addr := new(p2p.NetAddress) - if err := json.Unmarshal(data, addr); err != nil { - return -1 - } - - // Fuzz AddAddress. - err := addrBook.AddAddress(addr, addr) - if err != nil { - return 0 - } - - // Also, make sure PickAddress always returns a non-nil address. - bias := rand.Intn(100) - if p := addrBook.PickAddress(bias); p == nil { - panic(fmt.Sprintf("picked a nil address (bias: %d, addrBook size: %v)", - bias, addrBook.Size())) - } - - return 1 -} diff --git a/test/fuzz/p2p/addrbook/fuzz_test.go b/test/fuzz/p2p/addrbook/fuzz_test.go deleted file mode 100644 index 4ec7aebd9..000000000 --- a/test/fuzz/p2p/addrbook/fuzz_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package addrbook_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/test/fuzz/p2p/addrbook" -) - -const testdataCasesDir = "testdata/cases" - -func TestAddrbookTestdataCases(t *testing.T) { - entries, err := os.ReadDir(testdataCasesDir) - require.NoError(t, err) - - for _, e := range entries { - entry := e - t.Run(entry.Name(), func(t *testing.T) { - defer func() { - r := recover() - require.Nilf(t, r, "testdata/cases test panic") - }() - f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) - require.NoError(t, err) - input, err := ioutil.ReadAll(f) - require.NoError(t, err) - addrbook.Fuzz(input) - }) - } -} diff --git a/test/fuzz/p2p/addrbook/init-corpus/main.go b/test/fuzz/p2p/addrbook/init-corpus/main.go deleted file mode 100644 index 1166f9bd7..000000000 --- a/test/fuzz/p2p/addrbook/init-corpus/main.go +++ /dev/null @@ -1,59 +0,0 @@ -// nolint: gosec -package main - -import ( - "encoding/json" - "flag" - "fmt" - "io/ioutil" - "log" - "net" - "os" - "path/filepath" - - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/types" -) - -func main() { - baseDir := flag.String("base", ".", `where the "corpus" directory will live`) - flag.Parse() - - initCorpus(*baseDir) -} - -func initCorpus(baseDir string) { - log.SetFlags(0) - - // create "corpus" directory - corpusDir := filepath.Join(baseDir, "corpus") - if err := os.MkdirAll(corpusDir, 0755); err != nil { - log.Fatalf("Creating %q err: %v", corpusDir, err) - } - - // create corpus - privKey := ed25519.GenPrivKey() - addrs := []*p2p.NetAddress{ - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(0, 0, 0, 0), Port: 0}, - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(127, 0, 0, 0), Port: 80}, - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(213, 87, 10, 200), Port: 8808}, - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(111, 111, 111, 111), Port: 26656}, - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.ParseIP("2001:db8::68"), Port: 26656}, - } - - for i, addr := range addrs { - filename := filepath.Join(corpusDir, fmt.Sprintf("%d.json", i)) - - bz, err := json.Marshal(addr) - if err != nil { - log.Fatalf("can't marshal %v: %v", addr, err) - } - - if err := ioutil.WriteFile(filename, bz, 0644); err != nil { - log.Fatalf("can't write %v to %q: %v", addr, filename, err) - } - - log.Printf("wrote %q", filename) - } -} diff --git a/test/fuzz/p2p/addrbook/testdata/cases/empty b/test/fuzz/p2p/addrbook/testdata/cases/empty deleted file mode 100644 index e69de29bb..000000000 diff --git a/test/fuzz/p2p/pex/fuzz_test.go b/test/fuzz/p2p/pex/fuzz_test.go deleted file mode 100644 index 8a194e730..000000000 --- a/test/fuzz/p2p/pex/fuzz_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package pex_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/test/fuzz/p2p/pex" -) - -const testdataCasesDir = "testdata/cases" - -func TestPexTestdataCases(t *testing.T) { - entries, err := os.ReadDir(testdataCasesDir) - require.NoError(t, err) - - for _, e := range entries { - entry := e - t.Run(entry.Name(), func(t *testing.T) { - defer func() { - r := recover() - require.Nilf(t, r, "testdata/cases test panic") - }() - f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) - require.NoError(t, err) - input, err := ioutil.ReadAll(f) - require.NoError(t, err) - pex.Fuzz(input) - }) - } -} diff --git a/test/fuzz/p2p/pex/init-corpus/main.go b/test/fuzz/p2p/pex/init-corpus/main.go deleted file mode 100644 index e90216864..000000000 --- a/test/fuzz/p2p/pex/init-corpus/main.go +++ /dev/null @@ -1,84 +0,0 @@ -// nolint: gosec -package main - -import ( - "flag" - "fmt" - "io/ioutil" - "log" - "math/rand" - "os" - "path/filepath" - - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/pex" - tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/types" -) - -func main() { - baseDir := flag.String("base", ".", `where the "corpus" directory will live`) - flag.Parse() - - initCorpus(*baseDir) -} - -func initCorpus(rootDir string) { - log.SetFlags(0) - - corpusDir := filepath.Join(rootDir, "corpus") - if err := os.MkdirAll(corpusDir, 0755); err != nil { - log.Fatalf("Creating %q err: %v", corpusDir, err) - } - sizes := []int{0, 1, 2, 17, 5, 31} - - // Make the PRNG predictable - rand.Seed(10) - - for _, n := range sizes { - var addrs []*p2p.NetAddress - - // IPv4 addresses - for i := 0; i < n; i++ { - privKey := ed25519.GenPrivKey() - addr := fmt.Sprintf( - "%s@%v.%v.%v.%v:26656", - types.NodeIDFromPubKey(privKey.PubKey()), - rand.Int()%256, - rand.Int()%256, - rand.Int()%256, - rand.Int()%256, - ) - netAddr, _ := types.NewNetAddressString(addr) - addrs = append(addrs, netAddr) - } - - // IPv6 addresses - privKey := ed25519.GenPrivKey() - ipv6a, err := types.NewNetAddressString( - fmt.Sprintf("%s@[ff02::1:114]:26656", types.NodeIDFromPubKey(privKey.PubKey()))) - if err != nil { - log.Fatalf("can't create a new netaddress: %v", err) - } - addrs = append(addrs, ipv6a) - - msg := tmp2p.PexMessage{ - Sum: &tmp2p.PexMessage_PexResponse{ - PexResponse: &tmp2p.PexResponse{Addresses: pex.NetAddressesToProto(addrs)}, - }, - } - bz, err := msg.Marshal() - if err != nil { - log.Fatalf("unable to marshal: %v", err) - } - - filename := filepath.Join(rootDir, "corpus", fmt.Sprintf("%d", n)) - - if err := ioutil.WriteFile(filename, bz, 0644); err != nil { - log.Fatalf("can't write %X to %q: %v", bz, filename, err) - } - - log.Printf("wrote %q", filename) - } -} diff --git a/test/fuzz/p2p/pex/reactor_receive.go b/test/fuzz/p2p/pex/reactor_receive.go deleted file mode 100644 index 388361a4e..000000000 --- a/test/fuzz/p2p/pex/reactor_receive.go +++ /dev/null @@ -1,95 +0,0 @@ -package pex - -import ( - "net" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/pex" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/version" -) - -var ( - pexR *pex.Reactor - peer p2p.Peer - logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) -) - -func init() { - addrB := pex.NewAddrBook("./testdata/addrbook1", false) - pexR = pex.NewReactor(addrB, &pex.ReactorConfig{SeedMode: false}) - pexR.SetLogger(logger) - peer = newFuzzPeer() - pexR.AddPeer(peer) - - cfg := config.DefaultP2PConfig() - cfg.PexReactor = true - sw := p2p.MakeSwitch(cfg, 0, "127.0.0.1", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { - return sw - }, logger) - pexR.SetSwitch(sw) -} - -func Fuzz(data []byte) int { - if len(data) == 0 { - return -1 - } - - pexR.Receive(pex.PexChannel, peer, data) - - if !peer.IsRunning() { - // do not increase priority for msgs which lead to peer being stopped - return 0 - } - - return 1 -} - -type fuzzPeer struct { - *service.BaseService - m map[string]interface{} -} - -var _ p2p.Peer = (*fuzzPeer)(nil) - -func newFuzzPeer() *fuzzPeer { - fp := &fuzzPeer{m: make(map[string]interface{})} - fp.BaseService = service.NewBaseService(nil, "fuzzPeer", fp) - return fp -} - -var privKey = ed25519.GenPrivKey() -var nodeID = types.NodeIDFromPubKey(privKey.PubKey()) -var defaultNodeInfo = types.NodeInfo{ - ProtocolVersion: types.ProtocolVersion{ - P2P: version.P2PProtocol, - Block: version.BlockProtocol, - App: 0, - }, - NodeID: nodeID, - ListenAddr: "127.0.0.1:0", - Moniker: "foo1", -} - -func (fp *fuzzPeer) FlushStop() {} -func (fp *fuzzPeer) ID() types.NodeID { return nodeID } -func (fp *fuzzPeer) RemoteIP() net.IP { return net.IPv4(198, 163, 190, 214) } -func (fp *fuzzPeer) RemoteAddr() net.Addr { - return &net.TCPAddr{IP: fp.RemoteIP(), Port: 26656, Zone: ""} -} -func (fp *fuzzPeer) IsOutbound() bool { return false } -func (fp *fuzzPeer) IsPersistent() bool { return false } -func (fp *fuzzPeer) CloseConn() error { return nil } -func (fp *fuzzPeer) NodeInfo() types.NodeInfo { return defaultNodeInfo } -func (fp *fuzzPeer) Status() p2p.ConnectionStatus { var cs p2p.ConnectionStatus; return cs } -func (fp *fuzzPeer) SocketAddr() *p2p.NetAddress { - return types.NewNetAddress(fp.ID(), fp.RemoteAddr()) -} -func (fp *fuzzPeer) Send(byte, []byte) bool { return true } -func (fp *fuzzPeer) TrySend(byte, []byte) bool { return true } -func (fp *fuzzPeer) Set(key string, value interface{}) { fp.m[key] = value } -func (fp *fuzzPeer) Get(key string) interface{} { return fp.m[key] } diff --git a/test/fuzz/p2p/pex/testdata/addrbook1 b/test/fuzz/p2p/pex/testdata/addrbook1 deleted file mode 100644 index acf3e721d..000000000 --- a/test/fuzz/p2p/pex/testdata/addrbook1 +++ /dev/null @@ -1,1705 +0,0 @@ -{ - "Key": "badd73ebd4eeafbaefc01e0c", - "Addrs": [ - { - "Addr": { - "IP": "233.174.138.192", - "Port": 48186 - }, - "Src": { - "IP": "198.37.90.115", - "Port": 29492 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692278-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 55 - ] - }, - { - "Addr": { - "IP": "181.28.96.104", - "Port": 26776 - }, - "Src": { - "IP": "183.12.35.241", - "Port": 26794 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692289-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 18 - ] - }, - { - "Addr": { - "IP": "141.85.194.118", - "Port": 39768 - }, - "Src": { - "IP": "120.130.90.63", - "Port": 61750 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692383-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 185 - ] - }, - { - "Addr": { - "IP": "167.72.9.155", - "Port": 9542 - }, - "Src": { - "IP": "95.158.40.108", - "Port": 14929 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692604-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 250 - ] - }, - { - "Addr": { - "IP": "124.118.94.27", - "Port": 50333 - }, - "Src": { - "IP": "208.169.57.96", - "Port": 19754 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692046-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 0 - ] - }, - { - "Addr": { - "IP": "158.197.4.226", - "Port": 25979 - }, - "Src": { - "IP": "3.129.219.107", - "Port": 50374 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692211-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 174 - ] - }, - { - "Addr": { - "IP": "170.42.135.37", - "Port": 34524 - }, - "Src": { - "IP": "73.125.53.212", - "Port": 49691 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692241-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 14 - ] - }, - { - "Addr": { - "IP": "234.69.254.147", - "Port": 31885 - }, - "Src": { - "IP": "167.106.61.34", - "Port": 22187 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692609-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 213 - ] - }, - { - "Addr": { - "IP": "32.176.173.90", - "Port": 17250 - }, - "Src": { - "IP": "118.91.243.12", - "Port": 26781 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692273-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 35 - ] - }, - { - "Addr": { - "IP": "162.154.114.145", - "Port": 13875 - }, - "Src": { - "IP": "198.178.108.166", - "Port": 59623 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692373-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 216 - ] - }, - { - "Addr": { - "IP": "67.128.167.93", - "Port": 50513 - }, - "Src": { - "IP": "104.93.115.28", - "Port": 48298 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692399-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 18 - ] - }, - { - "Addr": { - "IP": "132.175.221.206", - "Port": 61037 - }, - "Src": { - "IP": "112.49.189.65", - "Port": 56186 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692422-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 31 - ] - }, - { - "Addr": { - "IP": "155.49.24.238", - "Port": 26261 - }, - "Src": { - "IP": "97.10.121.246", - "Port": 8694 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692473-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 23 - ] - }, - { - "Addr": { - "IP": "22.215.7.233", - "Port": 32487 - }, - "Src": { - "IP": "214.236.105.23", - "Port": 26870 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692572-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 255 - ] - }, - { - "Addr": { - "IP": "253.170.228.231", - "Port": 5002 - }, - "Src": { - "IP": "225.49.137.209", - "Port": 16908 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692619-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 161 - ] - }, - { - "Addr": { - "IP": "162.126.204.39", - "Port": 62618 - }, - "Src": { - "IP": "250.214.168.131", - "Port": 3237 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.69203-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 7 - ] - }, - { - "Addr": { - "IP": "83.154.228.215", - "Port": 23508 - }, - "Src": { - "IP": "66.33.77.170", - "Port": 52207 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692153-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 8 - ] - }, - { - "Addr": { - "IP": "132.49.63.65", - "Port": 53651 - }, - "Src": { - "IP": "250.164.163.212", - "Port": 8612 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692253-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 98 - ] - }, - { - "Addr": { - "IP": "200.168.34.12", - "Port": 61901 - }, - "Src": { - "IP": "133.185.186.115", - "Port": 14186 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692488-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 214 - ] - }, - { - "Addr": { - "IP": "31.93.45.219", - "Port": 61036 - }, - "Src": { - "IP": "176.191.214.170", - "Port": 33402 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692024-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 68 - ] - }, - { - "Addr": { - "IP": "250.189.27.93", - "Port": 51665 - }, - "Src": { - "IP": "93.161.116.107", - "Port": 53482 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692196-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 42 - ] - }, - { - "Addr": { - "IP": "50.7.17.126", - "Port": 64300 - }, - "Src": { - "IP": "233.234.64.214", - "Port": 61061 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692444-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 13 - ] - }, - { - "Addr": { - "IP": "88.85.81.64", - "Port": 34834 - }, - "Src": { - "IP": "4.240.150.250", - "Port": 63064 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692248-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 195 - ] - }, - { - "Addr": { - "IP": "242.117.244.198", - "Port": 4363 - }, - "Src": { - "IP": "149.29.34.42", - "Port": 62567 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692263-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 174 - ] - }, - { - "Addr": { - "IP": "245.155.175.114", - "Port": 37262 - }, - "Src": { - "IP": "75.85.36.49", - "Port": 7101 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692313-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 135 - ] - }, - { - "Addr": { - "IP": "224.184.241.26", - "Port": 55870 - }, - "Src": { - "IP": "52.15.194.216", - "Port": 4733 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692327-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 74 - ] - }, - { - "Addr": { - "IP": "43.178.26.188", - "Port": 55914 - }, - "Src": { - "IP": "103.250.250.35", - "Port": 1566 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692577-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 65 - ] - }, - { - "Addr": { - "IP": "102.117.172.117", - "Port": 35855 - }, - "Src": { - "IP": "114.152.204.187", - "Port": 21156 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692158-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 80 - ] - }, - { - "Addr": { - "IP": "39.33.41.199", - "Port": 51600 - }, - "Src": { - "IP": "119.65.88.38", - "Port": 41239 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692188-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 24 - ] - }, - { - "Addr": { - "IP": "63.164.56.227", - "Port": 1660 - }, - "Src": { - "IP": "169.54.47.92", - "Port": 2818 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692227-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 10 - ] - }, - { - "Addr": { - "IP": "50.183.223.115", - "Port": 26910 - }, - "Src": { - "IP": "115.98.199.4", - "Port": 8767 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692201-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 65 - ] - }, - { - "Addr": { - "IP": "132.94.203.167", - "Port": 53156 - }, - "Src": { - "IP": "17.195.234.168", - "Port": 29405 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692294-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 240 - ] - }, - { - "Addr": { - "IP": "135.194.230.212", - "Port": 14340 - }, - "Src": { - "IP": "160.2.241.10", - "Port": 36553 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692363-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 63 - ] - }, - { - "Addr": { - "IP": "116.53.200.25", - "Port": 27092 - }, - "Src": { - "IP": "219.104.163.247", - "Port": 50476 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692543-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 201 - ] - }, - { - "Addr": { - "IP": "125.77.44.185", - "Port": 55291 - }, - "Src": { - "IP": "77.15.232.117", - "Port": 6934 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692589-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 8 - ] - }, - { - "Addr": { - "IP": "27.221.35.172", - "Port": 26418 - }, - "Src": { - "IP": "252.18.49.70", - "Port": 9835 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692068-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 80 - ] - }, - { - "Addr": { - "IP": "133.225.167.135", - "Port": 59468 - }, - "Src": { - "IP": "110.223.163.74", - "Port": 22576 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.69213-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 164 - ] - }, - { - "Addr": { - "IP": "155.131.178.240", - "Port": 60476 - }, - "Src": { - "IP": "143.82.157.1", - "Port": 43821 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692173-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 34 - ] - }, - { - "Addr": { - "IP": "207.13.48.52", - "Port": 28549 - }, - "Src": { - "IP": "238.224.177.29", - "Port": 44100 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692594-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 113 - ] - }, - { - "Addr": { - "IP": "91.137.2.184", - "Port": 44887 - }, - "Src": { - "IP": "72.131.70.84", - "Port": 29960 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692627-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 199 - ] - }, - { - "Addr": { - "IP": "169.59.252.76", - "Port": 57711 - }, - "Src": { - "IP": "194.132.91.119", - "Port": 18037 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692478-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 124 - ] - }, - { - "Addr": { - "IP": "25.174.143.229", - "Port": 41540 - }, - "Src": { - "IP": "58.215.132.148", - "Port": 64950 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692534-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 88 - ] - }, - { - "Addr": { - "IP": "71.239.78.239", - "Port": 46938 - }, - "Src": { - "IP": "156.98.186.169", - "Port": 32046 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692116-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 154 - ] - }, - { - "Addr": { - "IP": "94.137.107.61", - "Port": 20756 - }, - "Src": { - "IP": "101.201.138.179", - "Port": 22877 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692414-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 233 - ] - }, - { - "Addr": { - "IP": "216.62.174.112", - "Port": 60162 - }, - "Src": { - "IP": "225.114.119.144", - "Port": 1575 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692464-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 132 - ] - }, - { - "Addr": { - "IP": "65.183.81.125", - "Port": 17511 - }, - "Src": { - "IP": "12.96.14.61", - "Port": 42308 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692308-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 153 - ] - }, - { - "Addr": { - "IP": "142.26.87.52", - "Port": 41967 - }, - "Src": { - "IP": "60.124.157.139", - "Port": 20727 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692321-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 29 - ] - }, - { - "Addr": { - "IP": "13.77.198.44", - "Port": 54508 - }, - "Src": { - "IP": "142.73.70.174", - "Port": 19525 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692553-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 170 - ] - }, - { - "Addr": { - "IP": "63.192.219.12", - "Port": 46603 - }, - "Src": { - "IP": "26.136.66.29", - "Port": 38924 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692558-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 203 - ] - }, - { - "Addr": { - "IP": "120.82.251.151", - "Port": 43723 - }, - "Src": { - "IP": "136.104.122.219", - "Port": 47452 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692599-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 103 - ] - }, - { - "Addr": { - "IP": "74.79.96.159", - "Port": 46646 - }, - "Src": { - "IP": "218.60.242.116", - "Port": 5361 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692145-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 121 - ] - }, - { - "Addr": { - "IP": "194.65.211.174", - "Port": 43464 - }, - "Src": { - "IP": "87.5.112.153", - "Port": 56348 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692163-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 96 - ] - }, - { - "Addr": { - "IP": "237.158.179.80", - "Port": 32231 - }, - "Src": { - "IP": "210.240.52.244", - "Port": 29142 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692183-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 27 - ] - }, - { - "Addr": { - "IP": "81.157.122.4", - "Port": 9917 - }, - "Src": { - "IP": "213.226.144.152", - "Port": 29950 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692614-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 33 - ] - }, - { - "Addr": { - "IP": "180.147.73.220", - "Port": 367 - }, - "Src": { - "IP": "32.229.253.215", - "Port": 62165 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692529-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 240 - ] - }, - { - "Addr": { - "IP": "83.110.235.17", - "Port": 33231 - }, - "Src": { - "IP": "230.54.162.85", - "Port": 51569 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692563-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 234 - ] - }, - { - "Addr": { - "IP": "100.252.20.2", - "Port": 1633 - }, - "Src": { - "IP": "52.136.47.198", - "Port": 31916 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692644-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 254 - ] - }, - { - "Addr": { - "IP": "74.5.247.79", - "Port": 18703 - }, - "Src": { - "IP": "200.247.68.128", - "Port": 55844 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692378-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 160 - ] - }, - { - "Addr": { - "IP": "17.220.231.87", - "Port": 59015 - }, - "Src": { - "IP": "54.207.49.4", - "Port": 17877 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692404-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 21 - ] - }, - { - "Addr": { - "IP": "156.194.57.127", - "Port": 18944 - }, - "Src": { - "IP": "154.94.235.84", - "Port": 61610 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692439-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 32 - ] - }, - { - "Addr": { - "IP": "137.57.172.158", - "Port": 32031 - }, - "Src": { - "IP": "144.160.225.126", - "Port": 43225 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692568-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 4 - ] - }, - { - "Addr": { - "IP": "101.220.101.200", - "Port": 26480 - }, - "Src": { - "IP": "130.225.42.1", - "Port": 2522 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692637-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 173 - ] - }, - { - "Addr": { - "IP": "136.233.185.164", - "Port": 34011 - }, - "Src": { - "IP": "112.127.216.43", - "Port": 55317 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692649-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 106 - ] - }, - { - "Addr": { - "IP": "101.189.107.148", - "Port": 28671 - }, - "Src": { - "IP": "213.55.140.235", - "Port": 2547 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692178-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 72 - ] - }, - { - "Addr": { - "IP": "61.190.60.64", - "Port": 58467 - }, - "Src": { - "IP": "206.86.120.31", - "Port": 54422 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692358-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 191 - ] - }, - { - "Addr": { - "IP": "227.51.127.223", - "Port": 52754 - }, - "Src": { - "IP": "124.24.12.47", - "Port": 59878 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692393-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 122 - ] - }, - { - "Addr": { - "IP": "101.19.152.238", - "Port": 47491 - }, - "Src": { - "IP": "211.30.216.184", - "Port": 17610 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692135-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 251 - ] - }, - { - "Addr": { - "IP": "182.198.35.238", - "Port": 15065 - }, - "Src": { - "IP": "239.67.104.149", - "Port": 43039 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692268-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 179 - ] - }, - { - "Addr": { - "IP": "233.12.68.51", - "Port": 47544 - }, - "Src": { - "IP": "203.224.119.48", - "Port": 23337 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692454-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 51 - ] - }, - { - "Addr": { - "IP": "181.30.35.80", - "Port": 500 - }, - "Src": { - "IP": "174.200.32.161", - "Port": 10174 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692503-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 69 - ] - }, - { - "Addr": { - "IP": "49.104.89.21", - "Port": 54774 - }, - "Src": { - "IP": "245.95.238.161", - "Port": 14339 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692654-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 120 - ] - }, - { - "Addr": { - "IP": "65.150.169.199", - "Port": 11589 - }, - "Src": { - "IP": "150.110.183.207", - "Port": 17694 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692041-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 182 - ] - }, - { - "Addr": { - "IP": "84.203.198.48", - "Port": 47122 - }, - "Src": { - "IP": "141.209.147.221", - "Port": 26085 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692056-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 74 - ] - }, - { - "Addr": { - "IP": "220.10.106.180", - "Port": 27439 - }, - "Src": { - "IP": "124.170.244.46", - "Port": 5249 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692125-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 166 - ] - }, - { - "Addr": { - "IP": "120.208.32.34", - "Port": 27224 - }, - "Src": { - "IP": "64.194.118.103", - "Port": 24388 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.69251-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 149 - ] - }, - { - "Addr": { - "IP": "245.182.67.231", - "Port": 58067 - }, - "Src": { - "IP": "62.108.238.220", - "Port": 41851 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692522-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 201 - ] - }, - { - "Addr": { - "IP": "50.81.160.105", - "Port": 8113 - }, - "Src": { - "IP": "129.187.68.121", - "Port": 58612 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692284-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 94 - ] - }, - { - "Addr": { - "IP": "101.116.47.155", - "Port": 20287 - }, - "Src": { - "IP": "94.34.167.170", - "Port": 41821 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692299-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 134 - ] - }, - { - "Addr": { - "IP": "159.253.213.86", - "Port": 5222 - }, - "Src": { - "IP": "124.47.162.125", - "Port": 45742 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692429-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 25 - ] - }, - { - "Addr": { - "IP": "124.72.81.213", - "Port": 35723 - }, - "Src": { - "IP": "201.65.186.55", - "Port": 26602 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692493-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 199 - ] - }, - { - "Addr": { - "IP": "77.216.197.130", - "Port": 49129 - }, - "Src": { - "IP": "245.160.14.27", - "Port": 38908 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692517-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 58 - ] - }, - { - "Addr": { - "IP": "175.46.154.0", - "Port": 15297 - }, - "Src": { - "IP": "6.10.7.13", - "Port": 9657 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692583-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 89 - ] - }, - { - "Addr": { - "IP": "176.71.131.235", - "Port": 14342 - }, - "Src": { - "IP": "1.36.215.198", - "Port": 21709 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692206-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 135 - ] - }, - { - "Addr": { - "IP": "34.211.134.186", - "Port": 31608 - }, - "Src": { - "IP": "187.87.12.183", - "Port": 32977 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692221-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 71 - ] - }, - { - "Addr": { - "IP": "238.63.227.107", - "Port": 49502 - }, - "Src": { - "IP": "185.51.127.143", - "Port": 22728 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692483-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 61 - ] - }, - { - "Addr": { - "IP": "160.65.76.45", - "Port": 27307 - }, - "Src": { - "IP": "170.175.198.16", - "Port": 44759 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692051-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 36 - ] - }, - { - "Addr": { - "IP": "152.22.79.90", - "Port": 25861 - }, - "Src": { - "IP": "216.183.31.190", - "Port": 9185 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692409-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 163 - ] - }, - { - "Addr": { - "IP": "200.2.175.37", - "Port": 57270 - }, - "Src": { - "IP": "108.20.254.94", - "Port": 32812 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692434-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 96 - ] - }, - { - "Addr": { - "IP": "111.16.237.10", - "Port": 45200 - }, - "Src": { - "IP": "215.82.246.115", - "Port": 42333 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692469-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 21 - ] - }, - { - "Addr": { - "IP": "166.217.195.221", - "Port": 4579 - }, - "Src": { - "IP": "148.153.131.183", - "Port": 13848 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692498-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 78 - ] - }, - { - "Addr": { - "IP": "1.226.156.147", - "Port": 61660 - }, - "Src": { - "IP": "169.138.16.69", - "Port": 23455 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692548-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 121 - ] - }, - { - "Addr": { - "IP": "108.209.27.58", - "Port": 59102 - }, - "Src": { - "IP": "140.27.139.90", - "Port": 52154 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692014-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 53 - ] - }, - { - "Addr": { - "IP": "221.244.202.95", - "Port": 5032 - }, - "Src": { - "IP": "230.152.141.80", - "Port": 19457 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692168-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 106 - ] - }, - { - "Addr": { - "IP": "55.87.1.138", - "Port": 39686 - }, - "Src": { - "IP": "55.22.167.132", - "Port": 35663 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692258-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 174 - ] - }, - { - "Addr": { - "IP": "209.53.148.74", - "Port": 18502 - }, - "Src": { - "IP": "195.108.121.25", - "Port": 16730 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692304-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 180 - ] - }, - { - "Addr": { - "IP": "21.66.206.236", - "Port": 10771 - }, - "Src": { - "IP": "236.195.50.16", - "Port": 30697 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692368-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 22 - ] - }, - { - "Addr": { - "IP": "190.87.236.91", - "Port": 58378 - }, - "Src": { - "IP": "72.224.218.34", - "Port": 44817 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692459-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 127 - ] - }, - { - "Addr": { - "IP": "197.172.79.170", - "Port": 24958 - }, - "Src": { - "IP": "71.22.4.12", - "Port": 28558 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692036-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 251 - ] - }, - { - "Addr": { - "IP": "160.176.234.94", - "Port": 47013 - }, - "Src": { - "IP": "212.172.24.59", - "Port": 29594 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692062-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 99 - ] - }, - { - "Addr": { - "IP": "170.206.180.18", - "Port": 26212 - }, - "Src": { - "IP": "228.135.62.18", - "Port": 26164 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692234-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 34 - ] - } - ] -} diff --git a/test/fuzz/p2p/pex/testdata/cases/empty b/test/fuzz/p2p/pex/testdata/cases/empty deleted file mode 100644 index e69de29bb..000000000 diff --git a/test/fuzz/p2p/secretconnection/fuzz_test.go b/test/fuzz/p2p/secretconnection/fuzz_test.go index d48dc4267..1f3757aa0 100644 --- a/test/fuzz/p2p/secretconnection/fuzz_test.go +++ b/test/fuzz/p2p/secretconnection/fuzz_test.go @@ -1,7 +1,7 @@ package secretconnection_test import ( - "io/ioutil" + "io" "os" "path/filepath" "testing" @@ -25,7 +25,7 @@ func TestSecretConnectionTestdataCases(t *testing.T) { }() f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) require.NoError(t, err) - input, err := ioutil.ReadAll(f) + input, err := io.ReadAll(f) require.NoError(t, err) secretconnection.Fuzz(input) }) diff --git a/test/fuzz/p2p/secretconnection/init-corpus/main.go b/test/fuzz/p2p/secretconnection/init-corpus/main.go index 635f2d99f..3a2537ff7 100644 --- a/test/fuzz/p2p/secretconnection/init-corpus/main.go +++ b/test/fuzz/p2p/secretconnection/init-corpus/main.go @@ -4,7 +4,6 @@ package main import ( "flag" "fmt" - "io/ioutil" "log" "os" "path/filepath" @@ -39,7 +38,7 @@ func initCorpus(baseDir string) { for i, datum := range data { filename := filepath.Join(corpusDir, fmt.Sprintf("%d", i)) - if err := ioutil.WriteFile(filename, []byte(datum), 0644); err != nil { + if err := os.WriteFile(filename, []byte(datum), 0644); err != nil { log.Fatalf("can't write %v to %q: %v", datum, filename, err) } diff --git a/test/fuzz/rpc/jsonrpc/server/fuzz_test.go b/test/fuzz/rpc/jsonrpc/server/fuzz_test.go index 50b9194fe..41911e725 100644 --- a/test/fuzz/rpc/jsonrpc/server/fuzz_test.go +++ b/test/fuzz/rpc/jsonrpc/server/fuzz_test.go @@ -1,7 +1,7 @@ package server_test import ( - "io/ioutil" + "io" "os" "path/filepath" "testing" @@ -25,7 +25,7 @@ func TestServerTestdataCases(t *testing.T) { }() f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) require.NoError(t, err) - input, err := ioutil.ReadAll(f) + input, err := io.ReadAll(f) require.NoError(t, err) server.Fuzz(input) }) diff --git a/test/fuzz/rpc/jsonrpc/server/handler.go b/test/fuzz/rpc/jsonrpc/server/handler.go index 08f7e2b6b..723c0e030 100644 --- a/test/fuzz/rpc/jsonrpc/server/handler.go +++ b/test/fuzz/rpc/jsonrpc/server/handler.go @@ -3,13 +3,13 @@ package server import ( "bytes" "encoding/json" - "io/ioutil" + "io" "net/http" "net/http/httptest" "github.com/tendermint/tendermint/libs/log" rs "github.com/tendermint/tendermint/rpc/jsonrpc/server" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) var rpcFuncMap = map[string]*rs.RPCFunc{ @@ -32,7 +32,7 @@ func Fuzz(data []byte) int { rec := httptest.NewRecorder() mux.ServeHTTP(rec, req) res := rec.Result() - blob, err := ioutil.ReadAll(res.Body) + blob, err := io.ReadAll(res.Body) if err != nil { panic(err) } diff --git a/test/test_cover.sh b/test/test_cover.sh index 17df139e6..cad6bec6d 100644 --- a/test/test_cover.sh +++ b/test/test_cover.sh @@ -6,7 +6,7 @@ set -e echo "mode: atomic" > coverage.txt for pkg in ${PKGS[@]}; do - go test -timeout 5m -race -coverprofile=profile.out -covermode=atomic "$pkg" + go test -timeout 5m -race -coverprofile=profile.out "$pkg" if [ -f profile.out ]; then tail -n +2 profile.out >> coverage.txt; rm profile.out diff --git a/tools/proto/Dockerfile b/tools/proto/Dockerfile deleted file mode 100644 index 500822690..000000000 --- a/tools/proto/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -FROM bufbuild/buf:latest as buf - -FROM golang:1.14-alpine3.11 as builder - -RUN apk add --update --no-cache build-base curl git upx && \ - rm -rf /var/cache/apk/* - -ENV GOLANG_PROTOBUF_VERSION=1.3.1 \ - GOGO_PROTOBUF_VERSION=1.3.2 - -RUN GO111MODULE=on go get \ - github.com/golang/protobuf/protoc-gen-go@v${GOLANG_PROTOBUF_VERSION} \ - github.com/gogo/protobuf/protoc-gen-gogo@v${GOGO_PROTOBUF_VERSION} \ - github.com/gogo/protobuf/protoc-gen-gogofaster@v${GOGO_PROTOBUF_VERSION} && \ - mv /go/bin/protoc-gen-go* /usr/local/bin/ - - -FROM alpine:edge - -WORKDIR /work - -RUN echo 'http://dl-cdn.alpinelinux.org/alpine/edge/testing' >> /etc/apk/repositories && \ - apk add --update --no-cache clang && \ - rm -rf /var/cache/apk/* - -COPY --from=builder /usr/local/bin /usr/local/bin -COPY --from=buf /usr/local/bin /usr/local/bin diff --git a/tools/tm-signer-harness/internal/test_harness.go b/tools/tm-signer-harness/internal/test_harness.go index 4d333949a..c8b5cd81d 100644 --- a/tools/tm-signer-harness/internal/test_harness.go +++ b/tools/tm-signer-harness/internal/test_harness.go @@ -12,8 +12,8 @@ import ( "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" @@ -89,7 +89,7 @@ type timeoutError interface { // NewTestHarness will load Tendermint data from the given files (including // validator public/private keypairs and chain details) and create a new // harness. -func NewTestHarness(logger log.Logger, cfg TestHarnessConfig) (*TestHarness, error) { +func NewTestHarness(ctx context.Context, logger log.Logger, cfg TestHarnessConfig) (*TestHarness, error) { keyFile := ExpandPath(cfg.KeyFile) stateFile := ExpandPath(cfg.StateFile) logger.Info("Loading private validator configuration", "keyFile", keyFile, "stateFile", stateFile) @@ -113,7 +113,7 @@ func NewTestHarness(logger log.Logger, cfg TestHarnessConfig) (*TestHarness, err return nil, newTestHarnessError(ErrFailedToCreateListener, err, "") } - signerClient, err := privval.NewSignerClient(spv, st.ChainID) + signerClient, err := privval.NewSignerClient(ctx, spv, st.ChainID) if err != nil { return nil, newTestHarnessError(ErrFailedToCreateListener, err, "") } diff --git a/tools/tm-signer-harness/internal/test_harness_test.go b/tools/tm-signer-harness/internal/test_harness_test.go index cf22bc836..2ef630555 100644 --- a/tools/tm-signer-harness/internal/test_harness_test.go +++ b/tools/tm-signer-harness/internal/test_harness_test.go @@ -1,8 +1,8 @@ package internal import ( + "context" "fmt" - "io/ioutil" "os" "testing" "time" @@ -74,17 +74,24 @@ const ( ) func TestRemoteSignerTestHarnessMaxAcceptRetriesReached(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg := makeConfig(t, 1, 2) defer cleanup(cfg) - th, err := NewTestHarness(log.TestingLogger(), cfg) + th, err := NewTestHarness(ctx, log.TestingLogger(), cfg) require.NoError(t, err) th.Run() assert.Equal(t, ErrMaxAcceptRetriesReached, th.exitCode) } func TestRemoteSignerTestHarnessSuccessfulRun(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + harnessTest( + ctx, t, func(th *TestHarness) *privval.SignerServer { return newMockSignerServer(t, th, th.fpv.Key.PrivKey, false, false) @@ -94,7 +101,11 @@ func TestRemoteSignerTestHarnessSuccessfulRun(t *testing.T) { } func TestRemoteSignerPublicKeyCheckFailed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + harnessTest( + ctx, t, func(th *TestHarness) *privval.SignerServer { return newMockSignerServer(t, th, ed25519.GenPrivKey(), false, false) @@ -104,7 +115,11 @@ func TestRemoteSignerPublicKeyCheckFailed(t *testing.T) { } func TestRemoteSignerProposalSigningFailed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + harnessTest( + ctx, t, func(th *TestHarness) *privval.SignerServer { return newMockSignerServer(t, th, th.fpv.Key.PrivKey, true, false) @@ -114,7 +129,11 @@ func TestRemoteSignerProposalSigningFailed(t *testing.T) { } func TestRemoteSignerVoteSigningFailed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + harnessTest( + ctx, t, func(th *TestHarness) *privval.SignerServer { return newMockSignerServer(t, th, th.fpv.Key.PrivKey, false, true) @@ -145,11 +164,16 @@ func newMockSignerServer( } // For running relatively standard tests. -func harnessTest(t *testing.T, signerServerMaker func(th *TestHarness) *privval.SignerServer, expectedExitCode int) { +func harnessTest( + ctx context.Context, + t *testing.T, + signerServerMaker func(th *TestHarness) *privval.SignerServer, + expectedExitCode int, +) { cfg := makeConfig(t, 100, 3) defer cleanup(cfg) - th, err := NewTestHarness(log.TestingLogger(), cfg) + th, err := NewTestHarness(ctx, log.TestingLogger(), cfg) require.NoError(t, err) donec := make(chan struct{}) go func() { @@ -158,7 +182,7 @@ func harnessTest(t *testing.T, signerServerMaker func(th *TestHarness) *privval. }() ss := signerServerMaker(th) - require.NoError(t, ss.Start()) + require.NoError(t, ss.Start(ctx)) assert.True(t, ss.IsRunning()) defer ss.Stop() //nolint:errcheck // ignore for tests @@ -187,7 +211,7 @@ func cleanup(cfg TestHarnessConfig) { } func makeTempFile(name, content string) string { - tempFile, err := ioutil.TempFile("", fmt.Sprintf("%s-*", name)) + tempFile, err := os.CreateTemp("", fmt.Sprintf("%s-*", name)) if err != nil { panic(err) } diff --git a/tools/tm-signer-harness/main.go b/tools/tm-signer-harness/main.go index a6d1312a1..4bf1933e0 100644 --- a/tools/tm-signer-harness/main.go +++ b/tools/tm-signer-harness/main.go @@ -1,9 +1,9 @@ package main import ( + "context" "flag" "fmt" - "io/ioutil" "os" "path/filepath" "time" @@ -116,7 +116,7 @@ Usage: } } -func runTestHarness(acceptRetries int, bindAddr, tmhome string) { +func runTestHarness(ctx context.Context, acceptRetries int, bindAddr, tmhome string) { tmhome = internal.ExpandPath(tmhome) cfg := internal.TestHarnessConfig{ BindAddr: bindAddr, @@ -129,7 +129,7 @@ func runTestHarness(acceptRetries int, bindAddr, tmhome string) { SecretConnKey: ed25519.GenPrivKey(), ExitWhenComplete: true, } - harness, err := internal.NewTestHarness(logger, cfg) + harness, err := internal.NewTestHarness(ctx, logger, cfg) if err != nil { logger.Error(err.Error()) if therr, ok := err.(*internal.TestHarnessError); ok { @@ -149,7 +149,7 @@ func extractKey(tmhome, outputPath string) { os.Exit(1) } pkb := []byte(fpv.Key.PrivKey.(ed25519.PrivKey)) - if err := ioutil.WriteFile(internal.ExpandPath(outputPath), pkb[:32], 0600); err != nil { + if err := os.WriteFile(internal.ExpandPath(outputPath), pkb[:32], 0600); err != nil { logger.Info("Failed to write private key", "output", outputPath, "err", err) os.Exit(1) } @@ -157,6 +157,9 @@ func extractKey(tmhome, outputPath string) { } func main() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if err := rootCmd.Parse(os.Args[1:]); err != nil { fmt.Printf("Error parsing flags: %v\n", err) os.Exit(1) @@ -184,7 +187,7 @@ func main() { fmt.Printf("Error parsing flags: %v\n", err) os.Exit(1) } - runTestHarness(flagAcceptRetries, flagBindAddr, flagTMHome) + runTestHarness(ctx, flagAcceptRetries, flagBindAddr, flagTMHome) case "extract_key": if err := extractKeyCmd.Parse(os.Args[2:]); err != nil { fmt.Printf("Error parsing flags: %v\n", err) diff --git a/types/block_test.go b/types/block_test.go index 1c762653b..e0c1ab3be 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -555,6 +555,9 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { round = 0 ) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + type commitVoteTest struct { blockIDs []BlockID numVotes []int // must sum to numValidators @@ -572,7 +575,7 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { vi := int32(0) for n := range tc.blockIDs { for i := 0; i < tc.numVotes[n]; i++ { - pubKey, err := vals[vi].GetPubKey(context.Background()) + pubKey, err := vals[vi].GetPubKey(ctx) require.NoError(t, err) vote := &Vote{ ValidatorAddress: pubKey.Address(), diff --git a/types/event_bus.go b/types/event_bus.go deleted file mode 100644 index 8cfe1ea54..000000000 --- a/types/event_bus.go +++ /dev/null @@ -1,318 +0,0 @@ -package types - -import ( - "context" - "fmt" - "strings" - - "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/log" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/libs/service" -) - -const defaultCapacity = 0 - -type EventBusSubscriber interface { - Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, outCapacity ...int) (Subscription, error) - Unsubscribe(ctx context.Context, args tmpubsub.UnsubscribeArgs) error - UnsubscribeAll(ctx context.Context, subscriber string) error - - NumClients() int - NumClientSubscriptions(clientID string) int -} - -type Subscription interface { - ID() string - Out() <-chan tmpubsub.Message - Canceled() <-chan struct{} - Err() error -} - -// EventBus is a common bus for all events going through the system. All calls -// are proxied to underlying pubsub server. All events must be published using -// EventBus to ensure correct data types. -type EventBus struct { - service.BaseService - pubsub *tmpubsub.Server -} - -// NewEventBus returns a new event bus. -func NewEventBus() *EventBus { - return NewEventBusWithBufferCapacity(defaultCapacity) -} - -// NewEventBusWithBufferCapacity returns a new event bus with the given buffer capacity. -func NewEventBusWithBufferCapacity(cap int) *EventBus { - // capacity could be exposed later if needed - pubsub := tmpubsub.NewServer(tmpubsub.BufferCapacity(cap)) - b := &EventBus{pubsub: pubsub} - b.BaseService = *service.NewBaseService(nil, "EventBus", b) - return b -} - -func (b *EventBus) SetLogger(l log.Logger) { - b.BaseService.SetLogger(l) - b.pubsub.SetLogger(l.With("module", "pubsub")) -} - -func (b *EventBus) OnStart() error { - return b.pubsub.Start() -} - -func (b *EventBus) OnStop() { - if err := b.pubsub.Stop(); err != nil { - b.pubsub.Logger.Error("error trying to stop eventBus", "error", err) - } -} - -func (b *EventBus) NumClients() int { - return b.pubsub.NumClients() -} - -func (b *EventBus) NumClientSubscriptions(clientID string) int { - return b.pubsub.NumClientSubscriptions(clientID) -} - -func (b *EventBus) Subscribe( - ctx context.Context, - subscriber string, - query tmpubsub.Query, - outCapacity ...int, -) (Subscription, error) { - return b.pubsub.Subscribe(ctx, subscriber, query, outCapacity...) -} - -// This method can be used for a local consensus explorer and synchronous -// testing. Do not use for for public facing / untrusted subscriptions! -func (b *EventBus) SubscribeUnbuffered( - ctx context.Context, - subscriber string, - query tmpubsub.Query, -) (Subscription, error) { - return b.pubsub.SubscribeUnbuffered(ctx, subscriber, query) -} - -func (b *EventBus) Unsubscribe(ctx context.Context, args tmpubsub.UnsubscribeArgs) error { - return b.pubsub.Unsubscribe(ctx, args) -} - -func (b *EventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { - return b.pubsub.UnsubscribeAll(ctx, subscriber) -} - -func (b *EventBus) Publish(eventValue string, eventData TMEventData) error { - // no explicit deadline for publishing events - ctx := context.Background() - - tokens := strings.Split(EventTypeKey, ".") - event := types.Event{ - Type: tokens[0], - Attributes: []types.EventAttribute{ - { - Key: tokens[1], - Value: eventValue, - }, - }, - } - - return b.pubsub.PublishWithEvents(ctx, eventData, []types.Event{event}) -} - -func (b *EventBus) PublishEventNewBlock(data EventDataNewBlock) error { - // no explicit deadline for publishing events - ctx := context.Background() - events := append(data.ResultBeginBlock.Events, data.ResultEndBlock.Events...) - - // add Tendermint-reserved new block event - events = append(events, EventNewBlock) - - return b.pubsub.PublishWithEvents(ctx, data, events) -} - -func (b *EventBus) PublishEventNewBlockHeader(data EventDataNewBlockHeader) error { - // no explicit deadline for publishing events - ctx := context.Background() - events := append(data.ResultBeginBlock.Events, data.ResultEndBlock.Events...) - - // add Tendermint-reserved new block header event - events = append(events, EventNewBlockHeader) - - return b.pubsub.PublishWithEvents(ctx, data, events) -} - -func (b *EventBus) PublishEventNewEvidence(evidence EventDataNewEvidence) error { - return b.Publish(EventNewEvidenceValue, evidence) -} - -func (b *EventBus) PublishEventVote(data EventDataVote) error { - return b.Publish(EventVoteValue, data) -} - -func (b *EventBus) PublishEventValidBlock(data EventDataRoundState) error { - return b.Publish(EventValidBlockValue, data) -} - -func (b *EventBus) PublishEventBlockSyncStatus(data EventDataBlockSyncStatus) error { - return b.Publish(EventBlockSyncStatusValue, data) -} - -func (b *EventBus) PublishEventStateSyncStatus(data EventDataStateSyncStatus) error { - return b.Publish(EventStateSyncStatusValue, data) -} - -// PublishEventTx publishes tx event with events from Result. Note it will add -// predefined keys (EventTypeKey, TxHashKey). Existing events with the same keys -// will be overwritten. -func (b *EventBus) PublishEventTx(data EventDataTx) error { - // no explicit deadline for publishing events - ctx := context.Background() - events := data.Result.Events - - // add Tendermint-reserved events - events = append(events, EventTx) - - tokens := strings.Split(TxHashKey, ".") - events = append(events, types.Event{ - Type: tokens[0], - Attributes: []types.EventAttribute{ - { - Key: tokens[1], - Value: fmt.Sprintf("%X", Tx(data.Tx).Hash()), - }, - }, - }) - - tokens = strings.Split(TxHeightKey, ".") - events = append(events, types.Event{ - Type: tokens[0], - Attributes: []types.EventAttribute{ - { - Key: tokens[1], - Value: fmt.Sprintf("%d", data.Height), - }, - }, - }) - - return b.pubsub.PublishWithEvents(ctx, data, events) -} - -func (b *EventBus) PublishEventNewRoundStep(data EventDataRoundState) error { - return b.Publish(EventNewRoundStepValue, data) -} - -func (b *EventBus) PublishEventTimeoutPropose(data EventDataRoundState) error { - return b.Publish(EventTimeoutProposeValue, data) -} - -func (b *EventBus) PublishEventTimeoutWait(data EventDataRoundState) error { - return b.Publish(EventTimeoutWaitValue, data) -} - -func (b *EventBus) PublishEventNewRound(data EventDataNewRound) error { - return b.Publish(EventNewRoundValue, data) -} - -func (b *EventBus) PublishEventCompleteProposal(data EventDataCompleteProposal) error { - return b.Publish(EventCompleteProposalValue, data) -} - -func (b *EventBus) PublishEventPolka(data EventDataRoundState) error { - return b.Publish(EventPolkaValue, data) -} - -func (b *EventBus) PublishEventRelock(data EventDataRoundState) error { - return b.Publish(EventRelockValue, data) -} - -func (b *EventBus) PublishEventLock(data EventDataRoundState) error { - return b.Publish(EventLockValue, data) -} - -func (b *EventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpdates) error { - return b.Publish(EventValidatorSetUpdatesValue, data) -} - -//----------------------------------------------------------------------------- -type NopEventBus struct{} - -func (NopEventBus) Subscribe( - ctx context.Context, - subscriber string, - query tmpubsub.Query, - out chan<- interface{}, -) error { - return nil -} - -func (NopEventBus) Unsubscribe(ctx context.Context, args tmpubsub.UnsubscribeArgs) error { - return nil -} - -func (NopEventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { - return nil -} - -func (NopEventBus) PublishEventNewBlock(data EventDataNewBlock) error { - return nil -} - -func (NopEventBus) PublishEventNewBlockHeader(data EventDataNewBlockHeader) error { - return nil -} - -func (NopEventBus) PublishEventNewEvidence(evidence EventDataNewEvidence) error { - return nil -} - -func (NopEventBus) PublishEventVote(data EventDataVote) error { - return nil -} - -func (NopEventBus) PublishEventTx(data EventDataTx) error { - return nil -} - -func (NopEventBus) PublishEventNewRoundStep(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventTimeoutPropose(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventTimeoutWait(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventNewRound(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventCompleteProposal(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventPolka(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventRelock(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventLock(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpdates) error { - return nil -} - -func (NopEventBus) PublishEventBlockSyncStatus(data EventDataBlockSyncStatus) error { - return nil -} - -func (NopEventBus) PublishEventStateSyncStatus(data EventDataStateSyncStatus) error { - return nil -} diff --git a/types/event_bus_test.go b/types/event_bus_test.go deleted file mode 100644 index ba3827e40..000000000 --- a/types/event_bus_test.go +++ /dev/null @@ -1,507 +0,0 @@ -package types - -import ( - "context" - "fmt" - mrand "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - abci "github.com/tendermint/tendermint/abci/types" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" -) - -func TestEventBusPublishEventTx(t *testing.T) { - eventBus := NewEventBus() - err := eventBus.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) - - tx := Tx("foo") - result := abci.ResponseDeliverTx{ - Data: []byte("bar"), - Events: []abci.Event{ - {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, - }, - } - - // PublishEventTx adds 3 composite keys, so the query below should work - query := fmt.Sprintf("tm.event='Tx' AND tx.height=1 AND tx.hash='%X' AND testType.baz=1", tx.Hash()) - txsSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query)) - require.NoError(t, err) - - done := make(chan struct{}) - go func() { - msg := <-txsSub.Out() - edt := msg.Data().(EventDataTx) - assert.Equal(t, int64(1), edt.Height) - assert.Equal(t, uint32(0), edt.Index) - assert.EqualValues(t, tx, edt.Tx) - assert.Equal(t, result, edt.Result) - close(done) - }() - - err = eventBus.PublishEventTx(EventDataTx{abci.TxResult{ - Height: 1, - Index: 0, - Tx: tx, - Result: result, - }}) - assert.NoError(t, err) - - select { - case <-done: - case <-time.After(1 * time.Second): - t.Fatal("did not receive a transaction after 1 sec.") - } -} - -func TestEventBusPublishEventNewBlock(t *testing.T) { - eventBus := NewEventBus() - err := eventBus.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) - - block := MakeBlock(0, []Tx{}, nil, []Evidence{}) - blockID := BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(BlockPartSizeBytes).Header()} - resultBeginBlock := abci.ResponseBeginBlock{ - Events: []abci.Event{ - {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, - }, - } - resultEndBlock := abci.ResponseEndBlock{ - Events: []abci.Event{ - {Type: "testType", Attributes: []abci.EventAttribute{{Key: "foz", Value: "2"}}}, - }, - } - - // PublishEventNewBlock adds the tm.event compositeKey, so the query below should work - query := "tm.event='NewBlock' AND testType.baz=1 AND testType.foz=2" - blocksSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query)) - require.NoError(t, err) - - done := make(chan struct{}) - go func() { - msg := <-blocksSub.Out() - edt := msg.Data().(EventDataNewBlock) - assert.Equal(t, block, edt.Block) - assert.Equal(t, blockID, edt.BlockID) - assert.Equal(t, resultBeginBlock, edt.ResultBeginBlock) - assert.Equal(t, resultEndBlock, edt.ResultEndBlock) - close(done) - }() - - err = eventBus.PublishEventNewBlock(EventDataNewBlock{ - Block: block, - BlockID: blockID, - ResultBeginBlock: resultBeginBlock, - ResultEndBlock: resultEndBlock, - }) - assert.NoError(t, err) - - select { - case <-done: - case <-time.After(1 * time.Second): - t.Fatal("did not receive a block after 1 sec.") - } -} - -func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { - eventBus := NewEventBus() - err := eventBus.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) - - tx := Tx("foo") - result := abci.ResponseDeliverTx{ - Data: []byte("bar"), - Events: []abci.Event{ - { - Type: "transfer", - Attributes: []abci.EventAttribute{ - {Key: "sender", Value: "foo"}, - {Key: "recipient", Value: "bar"}, - {Key: "amount", Value: "5"}, - }, - }, - { - Type: "transfer", - Attributes: []abci.EventAttribute{ - {Key: "sender", Value: "baz"}, - {Key: "recipient", Value: "cat"}, - {Key: "amount", Value: "13"}, - }, - }, - { - Type: "withdraw.rewards", - Attributes: []abci.EventAttribute{ - {Key: "address", Value: "bar"}, - {Key: "source", Value: "iceman"}, - {Key: "amount", Value: "33"}, - }, - }, - }, - } - - testCases := []struct { - query string - expectResults bool - }{ - { - "tm.event='Tx' AND tx.height=1 AND transfer.sender='DoesNotExist'", - false, - }, - { - "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo'", - true, - }, - { - "tm.event='Tx' AND tx.height=1 AND transfer.sender='baz'", - true, - }, - { - "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo' AND transfer.sender='baz'", - true, - }, - { - "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo' AND transfer.sender='DoesNotExist'", - false, - }, - } - - for i, tc := range testCases { - sub, err := eventBus.Subscribe(context.Background(), fmt.Sprintf("client-%d", i), tmquery.MustParse(tc.query)) - require.NoError(t, err) - - done := make(chan struct{}) - - go func() { - select { - case msg := <-sub.Out(): - data := msg.Data().(EventDataTx) - assert.Equal(t, int64(1), data.Height) - assert.Equal(t, uint32(0), data.Index) - assert.EqualValues(t, tx, data.Tx) - assert.Equal(t, result, data.Result) - close(done) - case <-time.After(1 * time.Second): - return - } - }() - - err = eventBus.PublishEventTx(EventDataTx{abci.TxResult{ - Height: 1, - Index: 0, - Tx: tx, - Result: result, - }}) - assert.NoError(t, err) - - select { - case <-done: - if !tc.expectResults { - require.Fail(t, "unexpected transaction result(s) from subscription") - } - case <-time.After(1 * time.Second): - if tc.expectResults { - require.Fail(t, "failed to receive a transaction after 1 second") - } - } - } -} - -func TestEventBusPublishEventNewBlockHeader(t *testing.T) { - eventBus := NewEventBus() - err := eventBus.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) - - block := MakeBlock(0, []Tx{}, nil, []Evidence{}) - resultBeginBlock := abci.ResponseBeginBlock{ - Events: []abci.Event{ - {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, - }, - } - resultEndBlock := abci.ResponseEndBlock{ - Events: []abci.Event{ - {Type: "testType", Attributes: []abci.EventAttribute{{Key: "foz", Value: "2"}}}, - }, - } - - // PublishEventNewBlockHeader adds the tm.event compositeKey, so the query below should work - query := "tm.event='NewBlockHeader' AND testType.baz=1 AND testType.foz=2" - headersSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query)) - require.NoError(t, err) - - done := make(chan struct{}) - go func() { - msg := <-headersSub.Out() - edt := msg.Data().(EventDataNewBlockHeader) - assert.Equal(t, block.Header, edt.Header) - assert.Equal(t, resultBeginBlock, edt.ResultBeginBlock) - assert.Equal(t, resultEndBlock, edt.ResultEndBlock) - close(done) - }() - - err = eventBus.PublishEventNewBlockHeader(EventDataNewBlockHeader{ - Header: block.Header, - ResultBeginBlock: resultBeginBlock, - ResultEndBlock: resultEndBlock, - }) - assert.NoError(t, err) - - select { - case <-done: - case <-time.After(1 * time.Second): - t.Fatal("did not receive a block header after 1 sec.") - } -} - -func TestEventBusPublishEventNewEvidence(t *testing.T) { - eventBus := NewEventBus() - err := eventBus.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) - - ev := NewMockDuplicateVoteEvidence(1, time.Now(), "test-chain-id") - - query := "tm.event='NewEvidence'" - evSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query)) - require.NoError(t, err) - - done := make(chan struct{}) - go func() { - msg := <-evSub.Out() - edt := msg.Data().(EventDataNewEvidence) - assert.Equal(t, ev, edt.Evidence) - assert.Equal(t, int64(4), edt.Height) - close(done) - }() - - err = eventBus.PublishEventNewEvidence(EventDataNewEvidence{ - Evidence: ev, - Height: 4, - }) - assert.NoError(t, err) - - select { - case <-done: - case <-time.After(1 * time.Second): - t.Fatal("did not receive a block header after 1 sec.") - } -} - -func TestEventBusPublish(t *testing.T) { - eventBus := NewEventBus() - err := eventBus.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) - - const numEventsExpected = 14 - - sub, err := eventBus.Subscribe(context.Background(), "test", tmquery.Empty{}, numEventsExpected) - require.NoError(t, err) - - done := make(chan struct{}) - go func() { - numEvents := 0 - for range sub.Out() { - numEvents++ - if numEvents >= numEventsExpected { - close(done) - return - } - } - }() - - err = eventBus.Publish(EventNewBlockHeaderValue, EventDataNewBlockHeader{}) - require.NoError(t, err) - err = eventBus.PublishEventNewBlock(EventDataNewBlock{}) - require.NoError(t, err) - err = eventBus.PublishEventNewBlockHeader(EventDataNewBlockHeader{}) - require.NoError(t, err) - err = eventBus.PublishEventVote(EventDataVote{}) - require.NoError(t, err) - err = eventBus.PublishEventNewRoundStep(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventTimeoutPropose(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventTimeoutWait(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventNewRound(EventDataNewRound{}) - require.NoError(t, err) - err = eventBus.PublishEventCompleteProposal(EventDataCompleteProposal{}) - require.NoError(t, err) - err = eventBus.PublishEventPolka(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventRelock(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventLock(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventValidatorSetUpdates(EventDataValidatorSetUpdates{}) - require.NoError(t, err) - err = eventBus.PublishEventBlockSyncStatus(EventDataBlockSyncStatus{}) - require.NoError(t, err) - err = eventBus.PublishEventStateSyncStatus(EventDataStateSyncStatus{}) - require.NoError(t, err) - - select { - case <-done: - case <-time.After(1 * time.Second): - t.Fatalf("expected to receive %d events after 1 sec.", numEventsExpected) - } -} - -func BenchmarkEventBus(b *testing.B) { - benchmarks := []struct { - name string - numClients int - randQueries bool - randEvents bool - }{ - {"10Clients1Query1Event", 10, false, false}, - {"100Clients", 100, false, false}, - {"1000Clients", 1000, false, false}, - - {"10ClientsRandQueries1Event", 10, true, false}, - {"100Clients", 100, true, false}, - {"1000Clients", 1000, true, false}, - - {"10ClientsRandQueriesRandEvents", 10, true, true}, - {"100Clients", 100, true, true}, - {"1000Clients", 1000, true, true}, - - {"10Clients1QueryRandEvents", 10, false, true}, - {"100Clients", 100, false, true}, - {"1000Clients", 1000, false, true}, - } - - for _, bm := range benchmarks { - bm := bm - b.Run(bm.name, func(b *testing.B) { - benchmarkEventBus(bm.numClients, bm.randQueries, bm.randEvents, b) - }) - } -} - -func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *testing.B) { - // for random* functions - mrand.Seed(time.Now().Unix()) - - eventBus := NewEventBusWithBufferCapacity(0) // set buffer capacity to 0 so we are not testing cache - err := eventBus.Start() - if err != nil { - b.Error(err) - } - b.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - b.Error(err) - } - }) - - ctx := context.Background() - q := EventQueryNewBlock - - for i := 0; i < numClients; i++ { - if randQueries { - q = randQuery() - } - sub, err := eventBus.Subscribe(ctx, fmt.Sprintf("client-%d", i), q) - if err != nil { - b.Fatal(err) - } - go func() { - for { - select { - case <-sub.Out(): - case <-sub.Canceled(): - return - } - } - }() - } - - eventValue := EventNewBlockValue - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - if randEvents { - eventValue = randEventValue() - } - - err := eventBus.Publish(eventValue, EventDataString("Gamora")) - if err != nil { - b.Error(err) - } - } -} - -var events = []string{ - EventNewBlockValue, - EventNewBlockHeaderValue, - EventNewRoundValue, - EventNewRoundStepValue, - EventTimeoutProposeValue, - EventCompleteProposalValue, - EventPolkaValue, - EventLockValue, - EventRelockValue, - EventTimeoutWaitValue, - EventVoteValue, - EventBlockSyncStatusValue, - EventStateSyncStatusValue, -} - -func randEventValue() string { - - return events[mrand.Intn(len(events))] -} - -var queries = []tmpubsub.Query{ - EventQueryNewBlock, - EventQueryNewBlockHeader, - EventQueryNewRound, - EventQueryNewRoundStep, - EventQueryTimeoutPropose, - EventQueryCompleteProposal, - EventQueryPolka, - EventQueryLock, - EventQueryRelock, - EventQueryTimeoutWait, - EventQueryVote, - EventQueryBlockSyncStatus, - EventQueryStateSyncStatus, -} - -func randQuery() tmpubsub.Query { - return queries[mrand.Intn(len(queries))] -} diff --git a/types/genesis.go b/types/genesis.go index 47580d5f7..a4b3904ab 100644 --- a/types/genesis.go +++ b/types/genesis.go @@ -5,7 +5,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "os" "time" "github.com/tendermint/tendermint/crypto" @@ -51,7 +51,7 @@ func (genDoc *GenesisDoc) SaveAs(file string) error { return err } - return ioutil.WriteFile(file, genDocBytes, 0644) // nolint:gosec + return os.WriteFile(file, genDocBytes, 0644) // nolint:gosec } // ValidatorHash returns the hash of the validator set contained in the GenesisDoc @@ -125,7 +125,7 @@ func GenesisDocFromJSON(jsonBlob []byte) (*GenesisDoc, error) { // GenesisDocFromFile reads JSON data from a file and unmarshalls it into a GenesisDoc. func GenesisDocFromFile(genDocFile string) (*GenesisDoc, error) { - jsonBlob, err := ioutil.ReadFile(genDocFile) + jsonBlob, err := os.ReadFile(genDocFile) if err != nil { return nil, fmt.Errorf("couldn't read GenesisDoc file: %w", err) } diff --git a/types/genesis_test.go b/types/genesis_test.go index 7fb3088dd..422c2125f 100644 --- a/types/genesis_test.go +++ b/types/genesis_test.go @@ -1,7 +1,6 @@ package types import ( - "io/ioutil" "os" "testing" @@ -122,7 +121,7 @@ func TestGenesisGood(t *testing.T) { } func TestGenesisSaveAs(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "genesis") + tmpfile, err := os.CreateTemp("", "genesis") require.NoError(t, err) defer os.Remove(tmpfile.Name()) diff --git a/types/mempool.go b/types/mempool.go index c739796af..fa0f8a208 100644 --- a/types/mempool.go +++ b/types/mempool.go @@ -1,14 +1,16 @@ package types import ( + "crypto/sha256" "errors" "fmt" ) -var ( - // ErrTxInCache is returned to the client if we saw tx earlier - ErrTxInCache = errors.New("tx already exists in cache") -) +// ErrTxInCache is returned to the client if we saw tx earlier +var ErrTxInCache = errors.New("tx already exists in cache") + +// TxKey is the fixed length array key used as an index. +type TxKey [sha256.Size]byte // ErrTxTooLarge defines an error when a transaction is too big to be sent in a // message to other peers. diff --git a/types/netaddress.go b/types/netaddress.go deleted file mode 100644 index bc074dca6..000000000 --- a/types/netaddress.go +++ /dev/null @@ -1,329 +0,0 @@ -// Modified for Tendermint -// Originally Copyright (c) 2013-2014 Conformal Systems LLC. -// https://github.com/conformal/btcd/blob/master/LICENSE - -package types - -import ( - "errors" - "flag" - "fmt" - "net" - "strconv" - "strings" - "time" -) - -// EmptyNetAddress defines the string representation of an empty NetAddress -const EmptyNetAddress = "" - -// NetAddress defines information about a peer on the network -// including its ID, IP address, and port. -type NetAddress struct { - ID NodeID `json:"id"` - IP net.IP `json:"ip"` - Port uint16 `json:"port"` -} - -// NewNetAddress returns a new NetAddress using the provided TCP -// address. When testing, other net.Addr (except TCP) will result in -// using 0.0.0.0:0. When normal run, other net.Addr (except TCP) will -// panic. Panics if ID is invalid. -// TODO: socks proxies? -func NewNetAddress(id NodeID, addr net.Addr) *NetAddress { - tcpAddr, ok := addr.(*net.TCPAddr) - if !ok { - if flag.Lookup("test.v") == nil { // normal run - panic(fmt.Sprintf("Only TCPAddrs are supported. Got: %v", addr)) - } else { // in testing - netAddr := NewNetAddressIPPort(net.IP("127.0.0.1"), 0) - netAddr.ID = id - return netAddr - } - } - - if err := id.Validate(); err != nil { - panic(fmt.Sprintf("Invalid ID %v: %v (addr: %v)", id, err, addr)) - } - - ip := tcpAddr.IP - port := uint16(tcpAddr.Port) - na := NewNetAddressIPPort(ip, port) - na.ID = id - return na -} - -// NewNetAddressIPPort returns a new NetAddress using the provided IP -// and port number. -func NewNetAddressIPPort(ip net.IP, port uint16) *NetAddress { - return &NetAddress{ - IP: ip, - Port: port, - } -} - -// NewNetAddressString returns a new NetAddress using the provided address in -// the form of "ID@IP:Port". -// Also resolves the host if host is not an IP. -// Errors are of type ErrNetAddressXxx where Xxx is in (NoID, Invalid, Lookup) -func NewNetAddressString(addr string) (*NetAddress, error) { - addrWithoutProtocol := removeProtocolIfDefined(addr) - spl := strings.Split(addrWithoutProtocol, "@") - if len(spl) != 2 { - return nil, ErrNetAddressNoID{addr} - } - - id, err := NewNodeID(spl[0]) - if err != nil { - return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} - } - - if err := id.Validate(); err != nil { - return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} - } - - addrWithoutProtocol = spl[1] - - // get host and port - host, portStr, err := net.SplitHostPort(addrWithoutProtocol) - if err != nil { - return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} - } - if len(host) == 0 { - return nil, ErrNetAddressInvalid{ - addrWithoutProtocol, - errors.New("host is empty")} - } - - ip := net.ParseIP(host) - if ip == nil { - ips, err := net.LookupIP(host) - if err != nil { - return nil, ErrNetAddressLookup{host, err} - } - ip = ips[0] - } - - port, err := strconv.ParseUint(portStr, 10, 16) - if err != nil { - return nil, ErrNetAddressInvalid{portStr, err} - } - - na := NewNetAddressIPPort(ip, uint16(port)) - na.ID = id - return na, nil -} - -// Equals reports whether na and other are the same addresses, -// including their ID, IP, and Port. -func (na *NetAddress) Equals(other interface{}) bool { - if o, ok := other.(*NetAddress); ok { - return na.String() == o.String() - } - return false -} - -// Same returns true is na has the same non-empty ID or DialString as other. -func (na *NetAddress) Same(other interface{}) bool { - if o, ok := other.(*NetAddress); ok { - if na.DialString() == o.DialString() { - return true - } - if na.ID != "" && na.ID == o.ID { - return true - } - } - return false -} - -// String representation: @: -func (na *NetAddress) String() string { - if na == nil { - return EmptyNetAddress - } - - addrStr := na.DialString() - if na.ID != "" { - addrStr = na.ID.AddressString(addrStr) - } - - return addrStr -} - -func (na *NetAddress) DialString() string { - if na == nil { - return "" - } - return net.JoinHostPort( - na.IP.String(), - strconv.FormatUint(uint64(na.Port), 10), - ) -} - -// Dial calls net.Dial on the address. -func (na *NetAddress) Dial() (net.Conn, error) { - conn, err := net.Dial("tcp", na.DialString()) - if err != nil { - return nil, err - } - return conn, nil -} - -// DialTimeout calls net.DialTimeout on the address. -func (na *NetAddress) DialTimeout(timeout time.Duration) (net.Conn, error) { - conn, err := net.DialTimeout("tcp", na.DialString(), timeout) - if err != nil { - return nil, err - } - return conn, nil -} - -// Routable returns true if the address is routable. -func (na *NetAddress) Routable() bool { - if err := na.Valid(); err != nil { - return false - } - // TODO(oga) bitcoind doesn't include RFC3849 here, but should we? - return !(na.RFC1918() || na.RFC3927() || na.RFC4862() || - na.RFC4193() || na.RFC4843() || na.Local()) -} - -// For IPv4 these are either a 0 or all bits set address. For IPv6 a zero -// address or one that matches the RFC3849 documentation address format. -func (na *NetAddress) Valid() error { - if err := na.ID.Validate(); err != nil { - return fmt.Errorf("invalid ID: %w", err) - } - - if na.IP == nil { - return errors.New("no IP") - } - if na.IP.IsUnspecified() || na.RFC3849() || na.IP.Equal(net.IPv4bcast) { - return errors.New("invalid IP") - } - return nil -} - -// Local returns true if it is a local address. -func (na *NetAddress) Local() bool { - return na.IP.IsLoopback() || zero4.Contains(na.IP) -} - -// ReachabilityTo checks whenever o can be reached from na. -func (na *NetAddress) ReachabilityTo(o *NetAddress) int { - const ( - Unreachable = 0 - Default = iota - Teredo - Ipv6Weak - Ipv4 - Ipv6Strong - ) - switch { - case !na.Routable(): - return Unreachable - case na.RFC4380(): - switch { - case !o.Routable(): - return Default - case o.RFC4380(): - return Teredo - case o.IP.To4() != nil: - return Ipv4 - default: // ipv6 - return Ipv6Weak - } - case na.IP.To4() != nil: - if o.Routable() && o.IP.To4() != nil { - return Ipv4 - } - return Default - default: /* ipv6 */ - var tunneled bool - // Is our v6 is tunneled? - if o.RFC3964() || o.RFC6052() || o.RFC6145() { - tunneled = true - } - switch { - case !o.Routable(): - return Default - case o.RFC4380(): - return Teredo - case o.IP.To4() != nil: - return Ipv4 - case tunneled: - // only prioritize ipv6 if we aren't tunneling it. - return Ipv6Weak - } - return Ipv6Strong - } -} - -// RFC1918: IPv4 Private networks (10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12) -// RFC3849: IPv6 Documentation address (2001:0DB8::/32) -// RFC3927: IPv4 Autoconfig (169.254.0.0/16) -// RFC3964: IPv6 6to4 (2002::/16) -// RFC4193: IPv6 unique local (FC00::/7) -// RFC4380: IPv6 Teredo tunneling (2001::/32) -// RFC4843: IPv6 ORCHID: (2001:10::/28) -// RFC4862: IPv6 Autoconfig (FE80::/64) -// RFC6052: IPv6 well known prefix (64:FF9B::/96) -// RFC6145: IPv6 IPv4 translated address ::FFFF:0:0:0/96 -var rfc1918_10 = net.IPNet{IP: net.ParseIP("10.0.0.0"), Mask: net.CIDRMask(8, 32)} -var rfc1918_192 = net.IPNet{IP: net.ParseIP("192.168.0.0"), Mask: net.CIDRMask(16, 32)} -var rfc1918_172 = net.IPNet{IP: net.ParseIP("172.16.0.0"), Mask: net.CIDRMask(12, 32)} -var rfc3849 = net.IPNet{IP: net.ParseIP("2001:0DB8::"), Mask: net.CIDRMask(32, 128)} -var rfc3927 = net.IPNet{IP: net.ParseIP("169.254.0.0"), Mask: net.CIDRMask(16, 32)} -var rfc3964 = net.IPNet{IP: net.ParseIP("2002::"), Mask: net.CIDRMask(16, 128)} -var rfc4193 = net.IPNet{IP: net.ParseIP("FC00::"), Mask: net.CIDRMask(7, 128)} -var rfc4380 = net.IPNet{IP: net.ParseIP("2001::"), Mask: net.CIDRMask(32, 128)} -var rfc4843 = net.IPNet{IP: net.ParseIP("2001:10::"), Mask: net.CIDRMask(28, 128)} -var rfc4862 = net.IPNet{IP: net.ParseIP("FE80::"), Mask: net.CIDRMask(64, 128)} -var rfc6052 = net.IPNet{IP: net.ParseIP("64:FF9B::"), Mask: net.CIDRMask(96, 128)} -var rfc6145 = net.IPNet{IP: net.ParseIP("::FFFF:0:0:0"), Mask: net.CIDRMask(96, 128)} -var zero4 = net.IPNet{IP: net.ParseIP("0.0.0.0"), Mask: net.CIDRMask(8, 32)} -var ( - // onionCatNet defines the IPv6 address block used to support Tor. - // bitcoind encodes a .onion address as a 16 byte number by decoding the - // address prior to the .onion (i.e. the key hash) base32 into a ten - // byte number. It then stores the first 6 bytes of the address as - // 0xfd, 0x87, 0xd8, 0x7e, 0xeb, 0x43. - // - // This is the same range used by OnionCat, which is part part of the - // RFC4193 unique local IPv6 range. - // - // In summary the format is: - // { magic 6 bytes, 10 bytes base32 decode of key hash } - onionCatNet = ipNet("fd87:d87e:eb43::", 48, 128) -) - -func (na *NetAddress) RFC1918() bool { - return rfc1918_10.Contains(na.IP) || - rfc1918_192.Contains(na.IP) || - rfc1918_172.Contains(na.IP) -} -func (na *NetAddress) RFC3849() bool { return rfc3849.Contains(na.IP) } -func (na *NetAddress) RFC3927() bool { return rfc3927.Contains(na.IP) } -func (na *NetAddress) RFC3964() bool { return rfc3964.Contains(na.IP) } -func (na *NetAddress) RFC4193() bool { return rfc4193.Contains(na.IP) } -func (na *NetAddress) RFC4380() bool { return rfc4380.Contains(na.IP) } -func (na *NetAddress) RFC4843() bool { return rfc4843.Contains(na.IP) } -func (na *NetAddress) RFC4862() bool { return rfc4862.Contains(na.IP) } -func (na *NetAddress) RFC6052() bool { return rfc6052.Contains(na.IP) } -func (na *NetAddress) RFC6145() bool { return rfc6145.Contains(na.IP) } -func (na *NetAddress) OnionCatTor() bool { return onionCatNet.Contains(na.IP) } - -func removeProtocolIfDefined(addr string) string { - if strings.Contains(addr, "://") { - return strings.Split(addr, "://")[1] - } - return addr - -} - -// ipNet returns a net.IPNet struct given the passed IP address string, number -// of one bits to include at the start of the mask, and the total number of bits -// for the mask. -func ipNet(ip string, ones, bits int) net.IPNet { - return net.IPNet{IP: net.ParseIP(ip), Mask: net.CIDRMask(ones, bits)} -} diff --git a/types/netaddress_test.go b/types/netaddress_test.go deleted file mode 100644 index 393d70e0b..000000000 --- a/types/netaddress_test.go +++ /dev/null @@ -1,183 +0,0 @@ -package types - -import ( - "net" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNetAddress_String(t *testing.T) { - tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:8080") - require.Nil(t, err) - - netAddr := NewNetAddress("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef", tcpAddr) - - var wg sync.WaitGroup - - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - _ = netAddr.String() - }() - } - - wg.Wait() - - s := netAddr.String() - require.Equal(t, "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", s) -} - -func TestNewNetAddress(t *testing.T) { - tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:8080") - require.Nil(t, err) - - assert.Panics(t, func() { - NewNetAddress("", tcpAddr) - }) - - addr := NewNetAddress("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef", tcpAddr) - assert.Equal(t, "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", addr.String()) - - assert.NotPanics(t, func() { - NewNetAddress("", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8000}) - }, "Calling NewNetAddress with UDPAddr should not panic in testing") -} - -func TestNewNetAddressString(t *testing.T) { - testCases := []struct { - name string - addr string - expected string - correct bool - }{ - {"no node id and no protocol", "127.0.0.1:8080", "", false}, - {"no node id w/ tcp input", "tcp://127.0.0.1:8080", "", false}, - {"no node id w/ udp input", "udp://127.0.0.1:8080", "", false}, - - { - "no protocol", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - true, - }, - { - "tcp input", - "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - true, - }, - { - "udp input", - "udp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - true, - }, - {"malformed tcp input", "tcp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - {"malformed udp input", "udp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - - // {"127.0.0:8080", false}, - {"invalid host", "notahost", "", false}, - {"invalid port", "127.0.0.1:notapath", "", false}, - {"invalid host w/ port", "notahost:8080", "", false}, - {"just a port", "8082", "", false}, - {"non-existent port", "127.0.0:8080000", "", false}, - - {"too short nodeId", "deadbeef@127.0.0.1:8080", "", false}, - {"too short, not hex nodeId", "this-isnot-hex@127.0.0.1:8080", "", false}, - {"not hex nodeId", "xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - - {"too short nodeId w/tcp", "tcp://deadbeef@127.0.0.1:8080", "", false}, - {"too short notHex nodeId w/tcp", "tcp://this-isnot-hex@127.0.0.1:8080", "", false}, - {"notHex nodeId w/tcp", "tcp://xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - { - "correct nodeId w/tcp", - "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - true, - }, - - {"no node id", "tcp://@127.0.0.1:8080", "", false}, - {"no node id or IP", "tcp://@", "", false}, - {"tcp no host, w/ port", "tcp://:26656", "", false}, - {"empty", "", "", false}, - {"node id delimiter 1", "@", "", false}, - {"node id delimiter 2", " @", "", false}, - {"node id delimiter 3", " @ ", "", false}, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - addr, err := NewNetAddressString(tc.addr) - if tc.correct { - if assert.Nil(t, err, tc.addr) { - assert.Equal(t, tc.expected, addr.String()) - } - } else { - assert.NotNil(t, err, tc.addr) - } - }) - } -} - -func TestNewNetAddressIPPort(t *testing.T) { - addr := NewNetAddressIPPort(net.ParseIP("127.0.0.1"), 8080) - assert.Equal(t, "127.0.0.1:8080", addr.String()) -} - -func TestNetAddressProperties(t *testing.T) { - // TODO add more test cases - testCases := []struct { - addr string - valid bool - local bool - routable bool - }{ - {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true, true, false}, - {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@ya.ru:80", true, false, true}, - } - - for _, tc := range testCases { - addr, err := NewNetAddressString(tc.addr) - require.Nil(t, err) - - err = addr.Valid() - if tc.valid { - assert.NoError(t, err) - } else { - assert.Error(t, err) - } - assert.Equal(t, tc.local, addr.Local()) - assert.Equal(t, tc.routable, addr.Routable()) - } -} - -func TestNetAddressReachabilityTo(t *testing.T) { - // TODO add more test cases - testCases := []struct { - addr string - other string - reachability int - }{ - { - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8081", - 0, - }, - {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@ya.ru:80", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", 1}, - } - - for _, tc := range testCases { - addr, err := NewNetAddressString(tc.addr) - require.Nil(t, err) - - other, err := NewNetAddressString(tc.other) - require.Nil(t, err) - - assert.Equal(t, tc.reachability, addr.ReachabilityTo(other)) - } -} diff --git a/types/node_id.go b/types/node_id.go index c260aa117..a5db40159 100644 --- a/types/node_id.go +++ b/types/node_id.go @@ -31,8 +31,7 @@ func NewNodeID(nodeID string) (NodeID, error) { // IDAddressString returns id@hostPort. It strips the leading // protocol from protocolHostPort if it exists. func (id NodeID) AddressString(protocolHostPort string) string { - hostPort := removeProtocolIfDefined(protocolHostPort) - return fmt.Sprintf("%s@%s", id, hostPort) + return fmt.Sprintf("%s@%s", id, removeProtocolIfDefined(protocolHostPort)) } // NodeIDFromPubKey creates a node ID from a given PubKey address. diff --git a/types/node_info.go b/types/node_info.go index 9dbdbf70d..902ca759b 100644 --- a/types/node_info.go +++ b/types/node_info.go @@ -3,6 +3,9 @@ package types import ( "errors" "fmt" + "net" + "strconv" + "strings" "github.com/tendermint/tendermint/libs/bytes" tmstrings "github.com/tendermint/tendermint/libs/strings" @@ -74,17 +77,10 @@ func (info NodeInfo) ID() NodeID { // url-encoding), and we just need to be careful with how we handle that in our // clients. (e.g. off by default). func (info NodeInfo) Validate() error { - - // ID is already validated. - - // Validate ListenAddr. - _, err := NewNetAddressString(info.ID().AddressString(info.ListenAddr)) - if err != nil { + if _, _, err := ParseAddressString(info.ID().AddressString(info.ListenAddr)); err != nil { return err } - // Network is validated in CompatibleWith. - // Validate Version if len(info.Version) > 0 && (!tmstrings.IsASCIIText(info.Version) || tmstrings.ASCIITrim(info.Version) == "") { @@ -163,15 +159,6 @@ OUTER_LOOP: return nil } -// NetAddress returns a NetAddress derived from the NodeInfo - -// it includes the authenticated peer ID and the self-reported -// ListenAddr. Note that the ListenAddr is not authenticated and -// may not match that address actually dialed if its an outbound peer. -func (info NodeInfo) NetAddress() (*NetAddress, error) { - idAddr := info.ID().AddressString(info.ListenAddr) - return NewNetAddressString(idAddr) -} - // AddChannel is used by the router when a channel is opened to add it to the node info func (info *NodeInfo) AddChannel(channel uint16) { // check that the channel doesn't already exist @@ -244,3 +231,58 @@ func NodeInfoFromProto(pb *tmp2p.NodeInfo) (NodeInfo, error) { return dni, nil } + +// ParseAddressString reads an address string, and returns the IP +// address and port information, returning an error for any validation +// errors. +func ParseAddressString(addr string) (net.IP, uint16, error) { + addrWithoutProtocol := removeProtocolIfDefined(addr) + spl := strings.Split(addrWithoutProtocol, "@") + if len(spl) != 2 { + return nil, 0, errors.New("invalid address") + } + + id, err := NewNodeID(spl[0]) + if err != nil { + return nil, 0, err + } + + if err := id.Validate(); err != nil { + return nil, 0, err + } + + addrWithoutProtocol = spl[1] + + // get host and port + host, portStr, err := net.SplitHostPort(addrWithoutProtocol) + if err != nil { + return nil, 0, err + } + if len(host) == 0 { + return nil, 0, err + } + + ip := net.ParseIP(host) + if ip == nil { + ips, err := net.LookupIP(host) + if err != nil { + return nil, 0, err + } + ip = ips[0] + } + + port, err := strconv.ParseUint(portStr, 10, 16) + if err != nil { + return nil, 0, err + } + + return ip, uint16(port), nil +} + +func removeProtocolIfDefined(addr string) string { + if strings.Contains(addr, "://") { + return strings.Split(addr, "://")[1] + } + return addr + +} diff --git a/types/node_info_test.go b/types/node_info_test.go index 812cec184..ff30aa30a 100644 --- a/types/node_info_test.go +++ b/types/node_info_test.go @@ -173,3 +173,80 @@ func TestNodeInfoAddChannel(t *testing.T) { nodeInfo.AddChannel(2) require.Contains(t, nodeInfo.Channels, byte(0x02)) } + +func TestParseAddressString(t *testing.T) { + testCases := []struct { + name string + addr string + expected string + correct bool + }{ + {"no node id and no protocol", "127.0.0.1:8080", "", false}, + {"no node id w/ tcp input", "tcp://127.0.0.1:8080", "", false}, + {"no node id w/ udp input", "udp://127.0.0.1:8080", "", false}, + + { + "no protocol", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + true, + }, + { + "tcp input", + "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + true, + }, + { + "udp input", + "udp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + true, + }, + {"malformed tcp input", "tcp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + {"malformed udp input", "udp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + + // {"127.0.0:8080", false}, + {"invalid host", "notahost", "", false}, + {"invalid port", "127.0.0.1:notapath", "", false}, + {"invalid host w/ port", "notahost:8080", "", false}, + {"just a port", "8082", "", false}, + {"non-existent port", "127.0.0:8080000", "", false}, + + {"too short nodeId", "deadbeef@127.0.0.1:8080", "", false}, + {"too short, not hex nodeId", "this-isnot-hex@127.0.0.1:8080", "", false}, + {"not hex nodeId", "xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + + {"too short nodeId w/tcp", "tcp://deadbeef@127.0.0.1:8080", "", false}, + {"too short notHex nodeId w/tcp", "tcp://this-isnot-hex@127.0.0.1:8080", "", false}, + {"notHex nodeId w/tcp", "tcp://xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + { + "correct nodeId w/tcp", + "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + true, + }, + + {"no node id", "tcp://@127.0.0.1:8080", "", false}, + {"no node id or IP", "tcp://@", "", false}, + {"tcp no host, w/ port", "tcp://:26656", "", false}, + {"empty", "", "", false}, + {"node id delimiter 1", "@", "", false}, + {"node id delimiter 2", " @", "", false}, + {"node id delimiter 3", " @ ", "", false}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + addr, port, err := ParseAddressString(tc.addr) + if tc.correct { + require.Nil(t, err, tc.addr) + assert.Contains(t, tc.expected, addr.String()) + assert.Contains(t, tc.expected, fmt.Sprint(port)) + } else { + assert.Error(t, err, "%v", tc.addr) + } + }) + } +} diff --git a/types/node_key.go b/types/node_key.go index 547fa1696..aecbd8a21 100644 --- a/types/node_key.go +++ b/types/node_key.go @@ -1,7 +1,7 @@ package types import ( - "io/ioutil" + "os" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" @@ -33,7 +33,7 @@ func (nodeKey NodeKey) SaveAs(filePath string) error { if err != nil { return err } - return ioutil.WriteFile(filePath, jsonBytes, 0600) + return os.WriteFile(filePath, jsonBytes, 0600) } // LoadOrGenNodeKey attempts to load the NodeKey from the given filePath. If @@ -67,7 +67,7 @@ func GenNodeKey() NodeKey { // LoadNodeKey loads NodeKey located in filePath. func LoadNodeKey(filePath string) (NodeKey, error) { - jsonBytes, err := ioutil.ReadFile(filePath) + jsonBytes, err := os.ReadFile(filePath) if err != nil { return NodeKey{}, err } diff --git a/types/part_set_test.go b/types/part_set_test.go index c6ea0f452..2dfe12263 100644 --- a/types/part_set_test.go +++ b/types/part_set_test.go @@ -1,7 +1,7 @@ package types import ( - "io/ioutil" + "io" "testing" "github.com/stretchr/testify/assert" @@ -57,7 +57,7 @@ func TestBasicPartSet(t *testing.T) { // Reconstruct data, assert that they are equal. data2Reader := partSet2.GetReader() - data2, err := ioutil.ReadAll(data2Reader) + data2, err := io.ReadAll(data2Reader) require.NoError(t, err) assert.Equal(t, data, data2) diff --git a/types/protobuf.go b/types/protobuf.go index 7cd224665..f82965fbf 100644 --- a/types/protobuf.go +++ b/types/protobuf.go @@ -2,7 +2,7 @@ package types import ( abci "github.com/tendermint/tendermint/abci/types" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" ) //------------------------------------------------------- @@ -22,7 +22,7 @@ func (tm2pb) Validator(val *Validator) abci.Validator { // XXX: panics on unknown pubkey type func (tm2pb) ValidatorUpdate(val *Validator) abci.ValidatorUpdate { - pk, err := cryptoenc.PubKeyToProto(val.PubKey) + pk, err := encoding.PubKeyToProto(val.PubKey) if err != nil { panic(err) } @@ -52,7 +52,7 @@ type pb2tm struct{} func (pb2tm) ValidatorUpdates(vals []abci.ValidatorUpdate) ([]*Validator, error) { tmVals := make([]*Validator, len(vals)) for i, v := range vals { - pub, err := cryptoenc.PubKeyFromProto(v.PubKey) + pub, err := encoding.PubKeyFromProto(v.PubKey) if err != nil { return nil, err } diff --git a/types/protobuf_test.go b/types/protobuf_test.go index b6900f40c..a33d031e2 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -9,7 +9,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" ) func TestABCIPubKey(t *testing.T) { @@ -19,9 +19,9 @@ func TestABCIPubKey(t *testing.T) { } func testABCIPubKey(t *testing.T, pk crypto.PubKey, typeStr string) error { - abciPubKey, err := cryptoenc.PubKeyToProto(pk) + abciPubKey, err := encoding.PubKeyToProto(pk) require.NoError(t, err) - pk2, err := cryptoenc.PubKeyFromProto(abciPubKey) + pk2, err := encoding.PubKeyFromProto(abciPubKey) require.NoError(t, err) require.Equal(t, pk, pk2) return nil diff --git a/types/tx.go b/types/tx.go index 92df92f13..19ee41dac 100644 --- a/types/tx.go +++ b/types/tx.go @@ -2,6 +2,7 @@ package types import ( "bytes" + "crypto/sha256" "errors" "fmt" @@ -16,15 +17,14 @@ import ( // Might we want types here ? type Tx []byte +// Key produces a fixed-length key for use in indexing. +func (tx Tx) Key() TxKey { return sha256.Sum256(tx) } + // Hash computes the TMHASH hash of the wire encoded transaction. -func (tx Tx) Hash() []byte { - return tmhash.Sum(tx) -} +func (tx Tx) Hash() []byte { return tmhash.Sum(tx) } // String returns the hex-encoded transaction as a string. -func (tx Tx) String() string { - return fmt.Sprintf("Tx{%X}", []byte(tx)) -} +func (tx Tx) String() string { return fmt.Sprintf("Tx{%X}", []byte(tx)) } // Txs is a slice of Tx. type Txs []Tx diff --git a/types/validation.go b/types/validation.go index 1bf0265db..e8f53f2a0 100644 --- a/types/validation.go +++ b/types/validation.go @@ -162,9 +162,9 @@ func verifyCommitBatch( var ( val *Validator valIdx int32 - seenVals = make(map[int32]int, len(commit.Signatures)) - batchSigIdxs = make([]int, 0, len(commit.Signatures)) - talliedVotingPower int64 = 0 + talliedVotingPower int64 + seenVals = make(map[int32]int, len(commit.Signatures)) + batchSigIdxs = make([]int, 0, len(commit.Signatures)) ) // attempt to create a batch verifier bv, ok := batch.CreateBatchVerifier(vals.GetProposer().PubKey) @@ -275,9 +275,9 @@ func verifyCommitSingle( var ( val *Validator valIdx int32 - seenVals = make(map[int32]int, len(commit.Signatures)) - talliedVotingPower int64 = 0 + talliedVotingPower int64 voteSignBytes []byte + seenVals = make(map[int32]int, len(commit.Signatures)) ) for idx, commitSig := range commit.Signatures { if ignoreSig(commitSig) { diff --git a/types/validator.go b/types/validator.go index fb3fa2d76..ded8156bf 100644 --- a/types/validator.go +++ b/types/validator.go @@ -7,7 +7,7 @@ import ( "strings" "github.com/tendermint/tendermint/crypto" - ce "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) @@ -114,7 +114,7 @@ func ValidatorListString(vals []*Validator) string { // as its redundant with the pubkey. This also excludes ProposerPriority // which changes every round. func (v *Validator) Bytes() []byte { - pk, err := ce.PubKeyToProto(v.PubKey) + pk, err := encoding.PubKeyToProto(v.PubKey) if err != nil { panic(err) } @@ -137,7 +137,7 @@ func (v *Validator) ToProto() (*tmproto.Validator, error) { return nil, errors.New("nil validator") } - pk, err := ce.PubKeyToProto(v.PubKey) + pk, err := encoding.PubKeyToProto(v.PubKey) if err != nil { return nil, err } @@ -159,7 +159,7 @@ func ValidatorFromProto(vp *tmproto.Validator) (*Validator, error) { return nil, errors.New("nil validator") } - pk, err := ce.PubKeyFromProto(vp.PubKey) + pk, err := encoding.PubKeyFromProto(vp.PubKey) if err != nil { return nil, err } diff --git a/types/validator_set_test.go b/types/validator_set_test.go index a69121344..87008bb1c 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -508,7 +508,7 @@ func TestAveragingInIncrementProposerPriority(t *testing.T) { {Address: []byte("c"), ProposerPriority: 1}}}, // this should average twice but the average should be 0 after the first iteration // (voting power is 0 -> no changes) - 11, 1 / 3}, + 11, 0}, 2: {ValidatorSet{ Validators: []*Validator{ {Address: []byte("a"), ProposerPriority: 100}, diff --git a/types/vote_set.go b/types/vote_set.go index b064f2c07..e014ae7bb 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -372,6 +372,9 @@ func (voteSet *VoteSet) GetByIndex(valIndex int32) *Vote { } voteSet.mtx.Lock() defer voteSet.mtx.Unlock() + if int(valIndex) >= len(voteSet.votes) { + return nil + } return voteSet.votes[valIndex] } diff --git a/version/version.go b/version/version.go index 3fb08652e..e42952f77 100644 --- a/version/version.go +++ b/version/version.go @@ -10,7 +10,7 @@ const ( // TMVersionDefault is the used as the fallback version of Tendermint Core // when not using git describe. It is formatted with semantic versioning. - TMVersionDefault = "0.34.11" + TMVersionDefault = "0.35.0-unreleased" // ABCISemVer is the semantic version of the ABCI library ABCISemVer = "0.17.0"