diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 77b944d40..8be48245c 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,6 +7,6 @@ # global owners are only requested if there isn't a more specific # codeowner specified below. For this reason, the global codeowners # are often repeated in package-level definitions. -* @ebuchman @tendermint/tendermint-engineering +* @ebuchman @tendermint/tendermint-engineering @adizere @lasarojc -/spec @ebuchman @tendermint/tendermint-research @tendermint/tendermint-engineering +/spec @ebuchman @tendermint/tendermint-research @tendermint/tendermint-engineering @adizere @lasarojc diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 2473c5ded..c823bbaae 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -53,10 +53,10 @@ updates: - package-ecosystem: gomod directory: "/" schedule: - interval: weekly + interval: daily target-branch: "v0.37.x" - # Only allow automated security-related dependency updates until we cut the - # final v0.37.0 release. + # Only allow automated security-related dependency updates on release + # branches. open-pull-requests-limit: 0 labels: - T:dependencies @@ -65,9 +65,11 @@ updates: - package-ecosystem: gomod directory: "/" schedule: - interval: weekly + interval: daily target-branch: "v0.34.x" - open-pull-requests-limit: 10 + # Only allow automated security-related dependency updates on release + # branches. + open-pull-requests-limit: 0 labels: - T:dependencies - S:automerge diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 6a2a5c934..e1f38d1a6 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -41,17 +41,17 @@ jobs: platforms: all - name: Set up Docker Build - uses: docker/setup-buildx-action@v2.0.0 + uses: docker/setup-buildx-action@v2.2.1 - name: Login to DockerHub if: ${{ github.event_name != 'pull_request' }} - uses: docker/login-action@v2.0.0 + uses: docker/login-action@v2.1.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Publish to Docker Hub - uses: docker/build-push-action@v3.1.1 + uses: docker/build-push-action@v3.2.0 with: context: . file: ./DOCKER/Dockerfile diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index a01e769d2..944ba13ef 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -57,7 +57,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack on failure - uses: slackapi/slack-github-action@v1.22.0 + uses: slackapi/slack-github-action@v1.23.0 env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK @@ -72,7 +72,7 @@ jobs: "type": "section", "text": { "type": "mrkdwn", - "text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> that caused the failure." + "text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure." } } ] diff --git a/.github/workflows/e2e-nightly-37x.yml b/.github/workflows/e2e-nightly-37x.yml index c3f6b16aa..769a316b2 100644 --- a/.github/workflows/e2e-nightly-37x.yml +++ b/.github/workflows/e2e-nightly-37x.yml @@ -57,7 +57,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack on failure - uses: slackapi/slack-github-action@v1.22.0 + uses: slackapi/slack-github-action@v1.23.0 env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK @@ -72,7 +72,7 @@ jobs: "type": "section", "text": { "type": "mrkdwn", - "text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> that caused the failure." + "text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure." } } ] diff --git a/.github/workflows/e2e-nightly-main.yml b/.github/workflows/e2e-nightly-main.yml index 2bb00dc47..dd8b7abbb 100644 --- a/.github/workflows/e2e-nightly-main.yml +++ b/.github/workflows/e2e-nightly-main.yml @@ -46,7 +46,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack on failure - uses: slackapi/slack-github-action@v1.22.0 + uses: slackapi/slack-github-action@v1.23.0 env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK @@ -61,7 +61,7 @@ jobs: "type": "section", "text": { "type": "mrkdwn", - "text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> that caused the failure." + "text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure." } } ] diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index b7ac5168c..0209501a1 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -76,7 +76,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack on failure - uses: slackapi/slack-github-action@v1.22.0 + uses: slackapi/slack-github-action@v1.23.0 env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK diff --git a/.github/workflows/gosec.yml b/.github/workflows/gosec.yml deleted file mode 100644 index 016234b60..000000000 --- a/.github/workflows/gosec.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: Run Gosec -on: - pull_request: - paths: - - '**/*.go' - - 'go.mod' - - 'go.sum' - push: - branches: - - main - - 'feature/*' - - 'v0.37.x' - - 'v0.34.x' - paths: - - '**/*.go' - - 'go.mod' - - 'go.sum' - -jobs: - Gosec: - permissions: - security-events: write - - runs-on: ubuntu-latest - env: - GO111MODULE: on - steps: - - name: Checkout Source - uses: actions/checkout@v3 - - - name: Run Gosec Security Scanner - uses: cosmos/gosec@master - with: - # Let the report trigger a failure with the Github Security scanner features. - args: "-no-fail -fmt sarif -out results.sarif ./..." - - - name: Upload SARIF file - uses: github/codeql-action/upload-sarif@v2 - with: - # Path to SARIF file relative to the root of the repository - sarif_file: results.sarif diff --git a/.github/workflows/janitor.yml b/.github/workflows/janitor.yml index 28ae05b51..466ec37d3 100644 --- a/.github/workflows/janitor.yml +++ b/.github/workflows/janitor.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 3 steps: - - uses: styfle/cancel-workflow-action@0.10.1 + - uses: styfle/cancel-workflow-action@0.11.0 with: workflow_id: 1041851,1401230,2837803 access_token: ${{ github.token }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index bc038daf9..81e7330ee 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -31,10 +31,7 @@ jobs: go.sum - uses: golangci/golangci-lint-action@v3 with: - # Required: the version of golangci-lint is required and - # must be specified without patch version: we always use the - # latest patch version. - version: v1.47.3 + version: v1.50.1 args: --timeout 10m github-token: ${{ secrets.github_token }} if: env.GIT_DIFF diff --git a/.github/workflows/markdown-links.yml b/.github/workflows/markdown-links.yml index 6aef1baf8..d9de1359f 100644 --- a/.github/workflows/markdown-links.yml +++ b/.github/workflows/markdown-links.yml @@ -1,23 +1,20 @@ name: Check Markdown links on: - push: - branches: - - main - pull_request: - branches: [main] + schedule: + # 2am UTC daily + - cron: '0 2 * * *' jobs: markdown-link-check: + strategy: + matrix: + branch: ['main', 'v0.37.x', 'v0.34.x'] runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: technote-space/get-diff-action@v6 with: - PATTERNS: | - **/**.md - - uses: creachadair/github-action-markdown-link-check@master + ref: ${{ matrix.branch }} + - uses: informalsystems/github-action-markdown-link-check@main with: - check-modified-files-only: 'yes' config-file: '.md-link-check.json' - if: env.GIT_DIFF diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index e8c640d05..8101a6d34 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -8,7 +8,7 @@ on: - "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10 jobs: - goreleaser: + prerelease: runs-on: ubuntu-latest steps: - name: Checkout @@ -38,3 +38,28 @@ jobs: args: release --rm-dist --release-notes=../release_notes.md env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + prerelease-success: + needs: prerelease + if: ${{ success() }} + runs-on: ubuntu-latest + steps: + - name: Notify Slack upon pre-release + uses: slackapi/slack-github-action@v1.23.0 + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK + RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}" + with: + payload: | + { + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":sparkles: New Tendermint pre-release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>" + } + } + ] + } diff --git a/.github/workflows/proto-lint.yml b/.github/workflows/proto-lint.yml index 474a71ff1..806b86ebb 100644 --- a/.github/workflows/proto-lint.yml +++ b/.github/workflows/proto-lint.yml @@ -15,7 +15,7 @@ jobs: timeout-minutes: 5 steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.8.0 + - uses: bufbuild/buf-setup-action@v1.9.0 - uses: bufbuild/buf-lint-action@v1 with: input: 'proto' diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 414a92546..7515d19dc 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -6,7 +6,7 @@ on: - "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10 jobs: - goreleaser: + release: runs-on: ubuntu-latest steps: - name: Checkout @@ -35,3 +35,28 @@ jobs: args: release --rm-dist --release-notes=../release_notes.md env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + release-success: + needs: release + if: ${{ success() }} + runs-on: ubuntu-latest + steps: + - name: Notify Slack upon release + uses: slackapi/slack-github-action@v1.23.0 + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK + RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}" + with: + payload: | + { + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":rocket: New Tendermint release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>" + } + } + ] + } diff --git a/.gitignore b/.gitignore index 103263181..798f93301 100644 --- a/.gitignore +++ b/.gitignore @@ -55,3 +55,5 @@ proto/spec/**/*.pb.go *.pdf *.gz *.dvi +# Python virtual environments +.venv diff --git a/.golangci.yml b/.golangci.yml index 7fa9f28aa..80e7214b2 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -2,7 +2,6 @@ linters: enable: - asciicheck - bodyclose - - deadcode - depguard - dogsled - dupl @@ -26,7 +25,6 @@ linters: - typecheck - unconvert - unused - - varcheck issues: exclude-rules: diff --git a/.md-link-check.json b/.md-link-check.json index 6f47fa2c9..68f3c1482 100644 --- a/.md-link-check.json +++ b/.md-link-check.json @@ -2,5 +2,16 @@ "retryOn429": true, "retryCount": 5, "fallbackRetryDelay": "30s", - "aliveStatusCodes": [200, 206, 503] + "aliveStatusCodes": [200, 206, 503], + "httpHeaders": [ + { + "urls": [ + "https://docs.github.com/", + "https://help.github.com/" + ], + "headers": { + "Accept-Encoding": "zstd, br, gzip, deflate" + } + } + ] } diff --git a/CHANGELOG.md b/CHANGELOG.md index a3f4a881e..81abb1b51 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,76 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/cosmos). +## v0.34.23 + +*Nov 9, 2022* + +This release introduces some new Prometheus metrics to help in determining what +kinds of messages are consuming the most P2P bandwidth. This builds towards our +broader goal of optimizing Tendermint bandwidth consumption, and will give us +meaningful insights once we can establish these metrics for a number of chains. + +We now also return `Cache-Control` headers for select RPC endpoints to help +facilitate caching. + +Special thanks to external contributors on this release: @JayT106 + +### IMPROVEMENTS +- `[p2p]` [\#9641](https://github.com/tendermint/tendermint/issues/9641) Add new + Envelope type and associated methods for sending and receiving Envelopes + instead of raw bytes. This also adds new metrics, + `tendermint_p2p_message_send_bytes_total` and + `tendermint_p2p_message_receive_bytes_total`, that expose how many bytes of + each message type have been sent. +- `[rpc]` [\#9666](https://github.com/tendermint/tendermint/issues/9666) Enable + caching of RPC responses (@JayT106) + + The following RPC endpoints will return `Cache-Control` headers with a maximum + age of 1 day: + + - `/abci_info` + - `/block`, if `height` is supplied + - `/block_by_hash` + - `/block_results`, if `height` is supplied + - `/blockchain` + - `/check_tx` + - `/commit`, if `height` is supplied + - `/consensus_params`, if `height` is supplied + - `/genesis` + - `/genesis_chunked` + - `/tx` + - `/validators`, if `height` is supplied + +## v0.34.22 + +This release includes several bug fixes, [one of +which](https://github.com/tendermint/tendermint/pull/9518) we discovered while +building up a baseline for v0.34 against which to compare our upcoming v0.37 +release during our [QA process](./docs/qa/). + +Special thanks to external contributors on this release: @RiccardoM + +### FEATURES + +- [rpc] [\#9423](https://github.com/tendermint/tendermint/pull/9423) Support + HTTPS URLs from the WebSocket client (@RiccardoM, @cmwaters) + +### BUG FIXES + +- [config] [\#9483](https://github.com/tendermint/tendermint/issues/9483) + Calling `tendermint init` would incorrectly leave out the new `[storage]` + section delimiter in the generated configuration file - this has now been + fixed +- [p2p] [\#9500](https://github.com/tendermint/tendermint/issues/9500) Prevent + peers who have errored being added to the peer set (@jmalicevic) +- [indexer] [\#9473](https://github.com/tendermint/tendermint/issues/9473) Fix + bug that caused the psql indexer to index empty blocks whenever one of the + transactions returned a non zero code. The relevant deduplication logic has + been moved within the kv indexer only (@cmwaters) +- [blocksync] [\#9518](https://github.com/tendermint/tendermint/issues/9518) A + block sync stall was observed during our QA process whereby the node was + unable to make progress. Retrying block requests after a timeout fixes this. + ## v0.34.21 Release highlights include: diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index ea745d4e1..77940da8a 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -11,6 +11,7 @@ - P2P Protocol - Go API + - [p2p] \#9625 Remove unused p2p/trust package (@cmwaters) - Blockchain Protocol @@ -19,14 +20,20 @@ - Tooling - [tools/tm-signer-harness] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106) + - [metrics] \#9682 move state-syncing and block-syncing metrics to their respective packages (@cmwaters) + labels have moved from block_syncing -> blocksync_syncing and state_syncing -> statesync_syncing ### FEATURES +- [config] \#9680 Introduce `BootstrapPeers` to the config to allow nodes to list peers to be added to + the addressbook upon start up (@cmwaters) + ### IMPROVEMENTS - [pubsub] \#7319 Performance improvements for the event query API (@creachadair) - [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm) - [crypto/merkle] \#6443 & \#6513 Improve HashAlternatives performance (@cuonglm, @marbar3778) +- [rpc] \#9650 Enable caching of RPC responses (@JayT106) ### BUG FIXES @@ -96,4 +103,4 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi - [consensus] \#9229 fix round number of `enterPropose` when handling `RoundStepNewRound` timeout. (@fatcat22) - [docker] \#9073 enable cross platform build using docker buildx -- [blocksync] \#9518 handle the case when the sending queue is full: retry block request after a timeout \ No newline at end of file +- [blocksync] \#9518 handle the case when the sending queue is full: retry block request after a timeout diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 21cf736ba..13ee02be6 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -12,7 +12,7 @@ and hence to Tendermint. * We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender, gender identity and expression, sexual orientation, disability, personal appearance, body size, - race, ethnicity, age, religion, nationality, or other similar characteristic. + race, ethnicity, age, religion, nationality, or other similar characteristics. * On Slack, please avoid using overtly sexual nicknames or other nicknames that might detract from a friendly, safe and welcoming environment for all. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fb2c4b9d5..8db8efd40 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -12,7 +12,7 @@ landing changes in `main`. All work on the code base should be motivated by a [Github Issue](https://github.com/tendermint/tendermint/issues). [Search](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) -is a good place start when looking for places to contribute. If you +is a good place to start when looking for places to contribute. If you would like to work on an issue which already exists, please indicate so by leaving a comment. @@ -213,7 +213,7 @@ Changes with multiple classifications should be doubly included (eg. a bug fix that is also a breaking change should be recorded under both). Breaking changes are further subdivided according to the APIs/users they impact. -Any change that effects multiple APIs/users should be recorded multiply - for +Any change that affects multiple APIs/users should be recorded multiply - for instance, a change to the `Blockchain Protocol` that removes a field from the header should also be recorded under `CLI/RPC/Config` since the field will be removed from the header in RPC responses as well. @@ -247,7 +247,7 @@ To begin contributing, create a development branch either on `github.com/tenderm Make changes, and before submitting a pull request, update the `CHANGELOG_PENDING.md` to record your change. Also, run either `git rebase` or `git merge` on top of the latest `main`. (Since pull requests are squash-merged, either is fine!) Update the `UPGRADING.md` if the change you've made is breaking and the -instructions should be in place for a user on how he/she can upgrade it's +instructions should be in place for a user on how he/she can upgrade its software (ABCI application, Tendermint-based blockchain, light client, wallet). Once you have submitted a pull request label the pull request with either `R:minor`, if the change should be included in the next minor release, or `R:major`, if the change is meant for a major release. diff --git a/Makefile b/Makefile index 7a4ef6f9e..97295c6ce 100644 --- a/Makefile +++ b/Makefile @@ -271,7 +271,7 @@ format: lint: @echo "--> Running linter" - @golangci-lint run + @go run github.com/golangci/golangci-lint/cmd/golangci-lint run .PHONY: lint DESTINATION = ./index.html.md diff --git a/README.md b/README.md index 732e53971..ad3cadc2c 100644 --- a/README.md +++ b/README.md @@ -113,10 +113,15 @@ For more information on upgrading, see [UPGRADING.md](./UPGRADING.md). ### Supported Versions -Because we are a small core team, we only ship patch updates, including security -updates, to the most recent minor release and the second-most recent minor -release. Consequently, we strongly recommend keeping Tendermint up-to-date. -Upgrading instructions can be found in [UPGRADING.md](./UPGRADING.md). +Because we are a small core team, we have limited capacity to ship patch +updates, including security updates. Consequently, we strongly recommend keeping +Tendermint up-to-date. Upgrading instructions can be found in +[UPGRADING.md](./UPGRADING.md). + +Currently supported versions include: + +- v0.34.x +- v0.37.x (release candidate) ## Resources diff --git a/STYLE_GUIDE.md b/STYLE_GUIDE.md index 98e81d723..0ed354f6b 100644 --- a/STYLE_GUIDE.md +++ b/STYLE_GUIDE.md @@ -98,7 +98,7 @@ Sometimes it's necessary to rename libraries to avoid naming collisions or ambig * Make use of table driven testing where possible and not-cumbersome * [Inspiration](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) * Make use of [assert](https://godoc.org/github.com/stretchr/testify/assert) and [require](https://godoc.org/github.com/stretchr/testify/require) -* When using mocks, it is recommended to use Testify [mock] ( +* When using mocks, it is recommended to use Testify [mock]( ) along with [Mockery](https://github.com/vektra/mockery) for autogeneration ## Errors diff --git a/UPGRADING.md b/UPGRADING.md index 001f1b7eb..27b267a28 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -5,6 +5,15 @@ Tendermint Core. ## Unreleased +## Config Changes + +* A new config field, `BootstrapPeers` has been introduced as a means of + adding a list of addresses to the addressbook upon initializing a node. This is an + alternative to `PersistentPeers`. `PersistentPeers` shold be only used for + nodes that you want to keep a constant connection with i.e. sentry nodes + +---- + ### ABCI Changes * The `ABCIVersion` is now `1.0.0`. diff --git a/abci/client/local_client.go b/abci/client/local_client.go index a428e2162..c3b291ed0 100644 --- a/abci/client/local_client.go +++ b/abci/client/local_client.go @@ -8,6 +8,10 @@ import ( tmsync "github.com/tendermint/tendermint/libs/sync" ) +// NOTE: use defer to unlock mutex because Application might panic (e.g., in +// case of malicious tx or query). It only makes sense for publicly exposed +// methods like CheckTx (/broadcast_tx_* RPC endpoint) or Query (/abci_query +// RPC endpoint), but defers are used everywhere for the sake of consistency. type localClient struct { service.BaseService diff --git a/abci/client/unsync_local_client.go b/abci/client/unsync_local_client.go new file mode 100644 index 000000000..782eb8f8f --- /dev/null +++ b/abci/client/unsync_local_client.go @@ -0,0 +1,43 @@ +package abcicli + +import ( + "context" + + types "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/service" +) + +type unsyncLocalClient struct { + service.BaseService + + types.Application +} + +var _ Client = (*unsyncLocalClient)(nil) + +// NewUnsyncLocalClient creates an unsynchronized local client, which will be +// directly calling the methods of the given app. +// +// Unlike NewLocalClient, it does not hold a mutex around the application, so +// it is up to the application to manage its synchronization properly. +func NewUnsyncLocalClient(app types.Application) Client { + cli := &unsyncLocalClient{ + Application: app, + } + cli.BaseService = *service.NewBaseService(nil, "unsyncLocalClient", cli) + return cli +} + +// TODO: change types.Application to include Error()? +func (app *unsyncLocalClient) Error() error { + return nil +} + +func (app *unsyncLocalClient) Flush(_ context.Context) error { + return nil +} + +func (app *unsyncLocalClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) { + return &types.ResponseEcho{Message: msg}, nil +} + diff --git a/blocksync/metrics.gen.go b/blocksync/metrics.gen.go new file mode 100644 index 000000000..1d093fb31 --- /dev/null +++ b/blocksync/metrics.gen.go @@ -0,0 +1,30 @@ +// Code generated by metricsgen. DO NOT EDIT. + +package blocksync + +import ( + "github.com/go-kit/kit/metrics/discard" + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + Syncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "syncing", + Help: "Whether or not a node is block syncing. 1 if yes, 0 if no.", + }, labels).With(labelsAndValues...), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + Syncing: discard.NewGauge(), + } +} diff --git a/blocksync/metrics.go b/blocksync/metrics.go new file mode 100644 index 000000000..78a6337b9 --- /dev/null +++ b/blocksync/metrics.go @@ -0,0 +1,19 @@ +package blocksync + +import ( + "github.com/go-kit/kit/metrics" +) + +const ( + // MetricsSubsystem is a subsystem shared by all metrics exposed by this + // package. + MetricsSubsystem = "blocksync" +) + +//go:generate go run ../scripts/metricsgen -struct=Metrics + +// Metrics contains metrics exposed by this package. +type Metrics struct { + // Whether or not a node is block syncing. 1 if yes, 0 if no. + Syncing metrics.Gauge +} diff --git a/blocksync/msgs.go b/blocksync/msgs.go index e3d6e551c..142c38716 100644 --- a/blocksync/msgs.go +++ b/blocksync/msgs.go @@ -19,58 +19,6 @@ const ( BlockResponseMessageFieldKeySize ) -// EncodeMsg encodes a Protobuf message -func EncodeMsg(pb proto.Message) ([]byte, error) { - msg := bcproto.Message{} - - switch pb := pb.(type) { - case *bcproto.BlockRequest: - msg.Sum = &bcproto.Message_BlockRequest{BlockRequest: pb} - case *bcproto.BlockResponse: - msg.Sum = &bcproto.Message_BlockResponse{BlockResponse: pb} - case *bcproto.NoBlockResponse: - msg.Sum = &bcproto.Message_NoBlockResponse{NoBlockResponse: pb} - case *bcproto.StatusRequest: - msg.Sum = &bcproto.Message_StatusRequest{StatusRequest: pb} - case *bcproto.StatusResponse: - msg.Sum = &bcproto.Message_StatusResponse{StatusResponse: pb} - default: - return nil, fmt.Errorf("unknown message type %T", pb) - } - - bz, err := proto.Marshal(&msg) - if err != nil { - return nil, fmt.Errorf("unable to marshal %T: %w", pb, err) - } - - return bz, nil -} - -// DecodeMsg decodes a Protobuf message. -func DecodeMsg(bz []byte) (proto.Message, error) { - pb := &bcproto.Message{} - - err := proto.Unmarshal(bz, pb) - if err != nil { - return nil, err - } - - switch msg := pb.Sum.(type) { - case *bcproto.Message_BlockRequest: - return msg.BlockRequest, nil - case *bcproto.Message_BlockResponse: - return msg.BlockResponse, nil - case *bcproto.Message_NoBlockResponse: - return msg.NoBlockResponse, nil - case *bcproto.Message_StatusRequest: - return msg.StatusRequest, nil - case *bcproto.Message_StatusResponse: - return msg.StatusResponse, nil - default: - return nil, fmt.Errorf("unknown message type %T", msg) - } -} - // ValidateMsg validates a message. func ValidateMsg(pb proto.Message) error { if pb == nil { diff --git a/blocksync/reactor.go b/blocksync/reactor.go index 09dd2ef90..eeada7da2 100644 --- a/blocksync/reactor.go +++ b/blocksync/reactor.go @@ -58,11 +58,13 @@ type Reactor struct { requestsCh <-chan BlockRequest errorsCh <-chan peerError + + metrics *Metrics } // NewReactor returns new reactor instance. func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore, - blockSync bool) *Reactor { + blockSync bool, metrics *Metrics) *Reactor { if state.LastBlockHeight != store.Height() { panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, @@ -88,6 +90,7 @@ func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockS blockSync: blockSync, requestsCh: requestsCh, errorsCh: errorsCh, + metrics: metrics, } bcR.BaseReactor = *p2p.NewBaseReactor("Reactor", bcR) return bcR @@ -143,21 +146,20 @@ func (bcR *Reactor) GetChannels() []*p2p.ChannelDescriptor { SendQueueCapacity: 1000, RecvBufferCapacity: 50 * 4096, RecvMessageCapacity: MaxMsgSize, + MessageType: &bcproto.Message{}, }, } } // AddPeer implements Reactor by sending our state to peer. func (bcR *Reactor) AddPeer(peer p2p.Peer) { - msgBytes, err := EncodeMsg(&bcproto.StatusResponse{ - Base: bcR.store.Base(), - Height: bcR.store.Height()}) - if err != nil { - bcR.Logger.Error("could not convert msg to protobuf", "err", err) - return - } - - peer.Send(BlocksyncChannel, msgBytes) + peer.Send(p2p.Envelope{ + ChannelID: BlocksyncChannel, + Message: &bcproto.StatusResponse{ + Base: bcR.store.Base(), + Height: bcR.store.Height(), + }, + }) // it's OK if send fails. will try later in poolRoutine // peer is added to the pool once we receive the first @@ -182,69 +184,53 @@ func (bcR *Reactor) respondToPeer(msg *bcproto.BlockRequest, return false } - msgBytes, err := EncodeMsg(&bcproto.BlockResponse{Block: bl}) - if err != nil { - bcR.Logger.Error("could not marshal msg", "err", err) - return false - } - - return src.TrySend(BlocksyncChannel, msgBytes) + return src.TrySend(p2p.Envelope{ + ChannelID: BlocksyncChannel, + Message: &bcproto.BlockResponse{Block: bl}, + }) } bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height) - - msgBytes, err := EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height}) - if err != nil { - bcR.Logger.Error("could not convert msg to protobuf", "err", err) - return false - } - - return src.TrySend(BlocksyncChannel, msgBytes) + return src.TrySend(p2p.Envelope{ + ChannelID: BlocksyncChannel, + Message: &bcproto.NoBlockResponse{Height: msg.Height}, + }) } // Receive implements Reactor by handling 4 types of messages (look below). -func (bcR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - msg, err := DecodeMsg(msgBytes) - if err != nil { - bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) - bcR.Switch.StopPeerForError(src, err) +func (bcR *Reactor) Receive(e p2p.Envelope) { + if err := ValidateMsg(e.Message); err != nil { + bcR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err) + bcR.Switch.StopPeerForError(e.Src, err) return } - if err = ValidateMsg(msg); err != nil { - bcR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) - bcR.Switch.StopPeerForError(src, err) - return - } + bcR.Logger.Debug("Receive", "e.Src", e.Src, "chID", e.ChannelID, "msg", e.Message) - bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg) - - switch msg := msg.(type) { + switch msg := e.Message.(type) { case *bcproto.BlockRequest: - bcR.respondToPeer(msg, src) + bcR.respondToPeer(msg, e.Src) case *bcproto.BlockResponse: bi, err := types.BlockFromProto(msg.Block) if err != nil { bcR.Logger.Error("Block content is invalid", "err", err) return } - bcR.pool.AddBlock(src.ID(), bi, len(msgBytes)) + bcR.pool.AddBlock(e.Src.ID(), bi, msg.Block.Size()) case *bcproto.StatusRequest: // Send peer our state. - msgBytes, err := EncodeMsg(&bcproto.StatusResponse{ - Height: bcR.store.Height(), - Base: bcR.store.Base(), + e.Src.TrySend(p2p.Envelope{ + ChannelID: BlocksyncChannel, + Message: &bcproto.StatusResponse{ + Height: bcR.store.Height(), + Base: bcR.store.Base(), + }, }) - if err != nil { - bcR.Logger.Error("could not convert msg to protobut", "err", err) - return - } - src.TrySend(BlocksyncChannel, msgBytes) case *bcproto.StatusResponse: // Got a peer status. Unverified. - bcR.pool.SetPeerRange(src.ID(), msg.Base, msg.Height) + bcR.pool.SetPeerRange(e.Src.ID(), msg.Base, msg.Height) case *bcproto.NoBlockResponse: - bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height) + bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height) default: bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } @@ -253,6 +239,8 @@ func (bcR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { // Handle messages from the poolReactor telling the reactor what to do. // NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down! func (bcR *Reactor) poolRoutine(stateSynced bool) { + bcR.metrics.Syncing.Set(1) + defer bcR.metrics.Syncing.Set(0) trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond) defer trySyncTicker.Stop() @@ -285,13 +273,10 @@ func (bcR *Reactor) poolRoutine(stateSynced bool) { if peer == nil { continue } - msgBytes, err := EncodeMsg(&bcproto.BlockRequest{Height: request.Height}) - if err != nil { - bcR.Logger.Error("could not convert msg to proto", "err", err) - continue - } - - queued := peer.TrySend(BlocksyncChannel, msgBytes) + queued := peer.TrySend(p2p.Envelope{ + ChannelID: BlocksyncChannel, + Message: &bcproto.BlockRequest{Height: request.Height}, + }) if !queued { bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height) } @@ -303,7 +288,7 @@ func (bcR *Reactor) poolRoutine(stateSynced bool) { case <-statusUpdateTicker.C: // ask for status updates - go bcR.BroadcastStatusRequest() //nolint: errcheck + go bcR.BroadcastStatusRequest() } } @@ -429,14 +414,9 @@ FOR_LOOP: } // BroadcastStatusRequest broadcasts `BlockStore` base and height. -func (bcR *Reactor) BroadcastStatusRequest() error { - bm, err := EncodeMsg(&bcproto.StatusRequest{}) - if err != nil { - bcR.Logger.Error("could not convert msg to proto", "err", err) - return fmt.Errorf("could not convert msg to proto: %w", err) - } - - bcR.Switch.Broadcast(BlocksyncChannel, bm) - - return nil +func (bcR *Reactor) BroadcastStatusRequest() { + bcR.Switch.Broadcast(p2p.Envelope{ + ChannelID: BlocksyncChannel, + Message: &bcproto.StatusRequest{}, + }) } diff --git a/blocksync/reactor_test.go b/blocksync/reactor_test.go index ae04f478f..c845b066c 100644 --- a/blocksync/reactor_test.go +++ b/blocksync/reactor_test.go @@ -145,7 +145,7 @@ func newReactor( blockStore.SaveBlock(thisBlock, thisParts, lastCommit) } - bcReactor := NewReactor(state.Copy(), blockExec, blockStore, fastSync) + bcReactor := NewReactor(state.Copy(), blockExec, blockStore, fastSync, NopMetrics()) bcReactor.SetLogger(logger.With("module", "blocksync")) return ReactorPair{bcReactor, proxyApp} diff --git a/cmd/tendermint/commands/debug/util.go b/cmd/tendermint/commands/debug/util.go index 089817f2f..f29fd5a81 100644 --- a/cmd/tendermint/commands/debug/util.go +++ b/cmd/tendermint/commands/debug/util.go @@ -67,7 +67,8 @@ func copyConfig(home, dir string) error { func dumpProfile(dir, addr, profile string, debug int) error { endpoint := fmt.Sprintf("%s/debug/pprof/%s?debug=%d", addr, profile, debug) - resp, err := http.Get(endpoint) //nolint: gosec + //nolint:gosec,nolintlint + resp, err := http.Get(endpoint) if err != nil { return fmt.Errorf("failed to query for %s profile: %w", profile, err) } diff --git a/cmd/tendermint/commands/rollback.go b/cmd/tendermint/commands/rollback.go index 7683759ff..8a60e96ac 100644 --- a/cmd/tendermint/commands/rollback.go +++ b/cmd/tendermint/commands/rollback.go @@ -14,7 +14,7 @@ import ( "github.com/tendermint/tendermint/store" ) -var removeBlock bool = false +var removeBlock = false func init() { RollbackStateCmd.Flags().BoolVar(&removeBlock, "hard", false, "remove last block as well as state") diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index ac2984111..5bb2a6d45 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -66,6 +66,7 @@ func AddNodeFlags(cmd *cobra.Command) { cmd.Flags().String("p2p.external-address", config.P2P.ExternalAddress, "ip:port address to advertise to peers for them to dial") cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes") cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers") + cmd.Flags().String("p2p.bootstrap_peers", config.P2P.BootstrapPeers, "comma-delimited ID@host:port peers to be added to the addressbook on startup") cmd.Flags().String("p2p.unconditional_peer_ids", config.P2P.UnconditionalPeerIDs, "comma-delimited IDs of unconditional peers") cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding") diff --git a/config/config.go b/config/config.go index d76212f6c..cc2491f84 100644 --- a/config/config.go +++ b/config/config.go @@ -423,6 +423,7 @@ type RPCConfig struct { TLSKeyFile string `mapstructure:"tls_key_file"` // pprof listen address (https://golang.org/pkg/net/http/pprof) + // FIXME: This should be moved under the instrumentation section PprofListenAddress string `mapstructure:"pprof_laddr"` } @@ -506,6 +507,10 @@ func (cfg *RPCConfig) IsCorsEnabled() bool { return len(cfg.CORSAllowedOrigins) != 0 } +func (cfg *RPCConfig) IsPprofEnabled() bool { + return len(cfg.PprofListenAddress) != 0 +} + func (cfg RPCConfig) KeyFile() string { path := cfg.TLSKeyFile if filepath.IsAbs(path) { @@ -543,6 +548,11 @@ type P2PConfig struct { //nolint: maligned // We only use these if we can’t connect to peers in the addrbook Seeds string `mapstructure:"seeds"` + // Comma separated list of peers to be added to the peer store + // on startup. Either BootstrapPeers or PersistentPeers are + // needed for peer discovery + BootstrapPeers string `mapstructure:"bootstrap_peers"` + // Comma separated list of nodes to keep persistent connections to PersistentPeers string `mapstructure:"persistent_peers"` @@ -703,14 +713,28 @@ type MempoolConfig struct { // Mempool version to use: // 1) "v0" - (default) FIFO mempool. // 2) "v1" - prioritized mempool. - // WARNING: There's a known memory leak with the prioritized mempool - // that the team are working on. Read more here: - // https://github.com/tendermint/tendermint/issues/8775 - Version string `mapstructure:"version"` - RootDir string `mapstructure:"home"` - Recheck bool `mapstructure:"recheck"` - Broadcast bool `mapstructure:"broadcast"` - WalPath string `mapstructure:"wal_dir"` + Version string `mapstructure:"version"` + // RootDir is the root directory for all data. This should be configured via + // the $TMHOME env variable or --home cmd flag rather than overriding this + // struct field. + RootDir string `mapstructure:"home"` + // Recheck (default: true) defines whether Tendermint should recheck the + // validity for all remaining transaction in the mempool after a block. + // Since a block affects the application state, some transactions in the + // mempool may become invalid. If this does not apply to your application, + // you can disable rechecking. + Recheck bool `mapstructure:"recheck"` + // Broadcast (default: true) defines whether the mempool should relay + // transactions to other peers. Setting this to false will stop the mempool + // from relaying transactions to other peers until they are included in a + // block. In other words, if Broadcast is disabled, only the peer you send + // the tx to will see it until it is included in a block. + Broadcast bool `mapstructure:"broadcast"` + // WalPath (default: "") configures the location of the Write Ahead Log + // (WAL) for the mempool. The WAL is disabled by default. To enable, set + // WalPath to where you want the WAL to be written (e.g. + // "data/mempool.wal"). + WalPath string `mapstructure:"wal_dir"` // Maximum number of transactions in the mempool Size int `mapstructure:"size"` // Limit the total size of all txs in the mempool. @@ -1204,6 +1228,10 @@ func (cfg *InstrumentationConfig) ValidateBasic() error { return nil } +func (cfg *InstrumentationConfig) IsPrometheusEnabled() bool { + return cfg.Prometheus && cfg.PrometheusListenAddr != "" +} + //----------------------------------------------------------------------------- // Utils diff --git a/config/toml.go b/config/toml.go index c2de55642..a5550ff63 100644 --- a/config/toml.go +++ b/config/toml.go @@ -283,6 +283,11 @@ external_address = "{{ .P2P.ExternalAddress }}" # Comma separated list of seed nodes to connect to seeds = "{{ .P2P.Seeds }}" +# Comma separated list of peers to be added to the peer store +# on startup. Either BootstrapPeers or PersistentPeers are +# needed for peer discovery +bootstrap_peers = "{{ .P2P.BootstrapPeers }}" + # Comma separated list of nodes to keep persistent connections to persistent_peers = "{{ .P2P.PersistentPeers }}" @@ -349,8 +354,24 @@ dial_timeout = "{{ .P2P.DialTimeout }}" # 2) "v1" - prioritized mempool. version = "{{ .Mempool.Version }}" +# Recheck (default: true) defines whether Tendermint should recheck the +# validity for all remaining transaction in the mempool after a block. +# Since a block affects the application state, some transactions in the +# mempool may become invalid. If this does not apply to your application, +# you can disable rechecking. recheck = {{ .Mempool.Recheck }} + +# Broadcast (default: true) defines whether the mempool should relay +# transactions to other peers. Setting this to false will stop the mempool +# from relaying transactions to other peers until they are included in a +# block. In other words, if Broadcast is disabled, only the peer you send +# the tx to will see it until it is included in a block. broadcast = {{ .Mempool.Broadcast }} + +# WalPath (default: "") configures the location of the Write Ahead Log +# (WAL) for the mempool. The WAL is disabled by default. To enable, set +# WalPath to where you want the WAL to be written (e.g. +# "data/mempool.wal"). wal_dir = "{{ js .Mempool.WalPath }}" # Maximum number of transactions in the mempool @@ -436,7 +457,7 @@ chunk_fetchers = "{{ .StateSync.ChunkFetchers }}" [blocksync] # Block Sync version to use: -# +# # In v0.37, v1 and v2 of the block sync protocols were deprecated. # Please use v0 instead. # diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index 84a2afad1..d86ab3cba 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -27,6 +27,7 @@ import ( mempoolv0 "github.com/tendermint/tendermint/mempool/v0" mempoolv1 "github.com/tendermint/tendermint/mempool/v1" "github.com/tendermint/tendermint/p2p" + tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" @@ -167,10 +168,16 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { for i, peer := range peerList { if i < len(peerList)/2 { bcs.Logger.Info("Signed and pushed vote", "vote", prevote1, "peer", peer) - peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote1})) + peer.Send(p2p.Envelope{ + Message: &tmcons.Vote{Vote: prevote1.ToProto()}, + ChannelID: VoteChannel, + }) } else { bcs.Logger.Info("Signed and pushed vote", "vote", prevote2, "peer", peer) - peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote2})) + peer.Send(p2p.Envelope{ + Message: &tmcons.Vote{Vote: prevote2.ToProto()}, + ChannelID: VoteChannel, + }) } } } else { @@ -519,18 +526,26 @@ func sendProposalAndParts( parts *types.PartSet, ) { // proposal - msg := &ProposalMessage{Proposal: proposal} - peer.Send(DataChannel, MustEncode(msg)) + peer.Send(p2p.Envelope{ + ChannelID: DataChannel, + Message: &tmcons.Proposal{Proposal: *proposal.ToProto()}, + }) // parts for i := 0; i < int(parts.Total()); i++ { part := parts.GetPart(i) - msg := &BlockPartMessage{ - Height: height, // This tells peer that this part applies to us. - Round: round, // This tells peer that this part applies to us. - Part: part, + pp, err := part.ToProto() + if err != nil { + panic(err) // TODO: wbanfield better error handling } - peer.Send(DataChannel, MustEncode(msg)) + peer.Send(p2p.Envelope{ + ChannelID: DataChannel, + Message: &tmcons.BlockPart{ + Height: height, // This tells peer that this part applies to us. + Round: round, // This tells peer that this part applies to us. + Part: *pp, + }, + }) } // votes @@ -538,9 +553,14 @@ func sendProposalAndParts( prevote, _ := cs.signVote(tmproto.PrevoteType, blockHash, parts.Header()) precommit, _ := cs.signVote(tmproto.PrecommitType, blockHash, parts.Header()) cs.mtx.Unlock() - - peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote})) - peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit})) + peer.Send(p2p.Envelope{ + ChannelID: VoteChannel, + Message: &tmcons.Vote{Vote: prevote.ToProto()}, + }) + peer.Send(p2p.Envelope{ + ChannelID: VoteChannel, + Message: &tmcons.Vote{Vote: precommit.ToProto()}, + }) } //---------------------------------------- @@ -578,7 +598,7 @@ func (br *ByzantineReactor) AddPeer(peer p2p.Peer) { func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) { br.reactor.RemovePeer(peer, reason) } -func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) { - br.reactor.Receive(chID, peer, msgBytes) +func (br *ByzantineReactor) Receive(e p2p.Envelope) { + br.reactor.Receive(e) } func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer } diff --git a/consensus/invalid_test.go b/consensus/invalid_test.go index fa70ff468..54bc453fb 100644 --- a/consensus/invalid_test.go +++ b/consensus/invalid_test.go @@ -7,6 +7,7 @@ import ( "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/p2p" + tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -94,7 +95,10 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, sw peers := sw.Peers().List() for _, peer := range peers { cs.Logger.Info("Sending bad vote", "block", blockHash, "peer", peer) - peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit})) + peer.Send(p2p.Envelope{ + Message: &tmcons.Vote{Vote: precommit.ToProto()}, + ChannelID: VoteChannel, + }) } }() } diff --git a/consensus/metrics.gen.go b/consensus/metrics.gen.go index 6f1699cdd..94ea5d224 100644 --- a/consensus/metrics.gen.go +++ b/consensus/metrics.gen.go @@ -118,18 +118,6 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "latest_block_height", Help: "The latest block height.", }, labels).With(labelsAndValues...), - BlockSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "block_syncing", - Help: "Whether or not a node is block syncing. 1 if yes, 0 if no.", - }, labels).With(labelsAndValues...), - StateSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "state_syncing", - Help: "Whether or not a node is state syncing. 1 if yes, 0 if no.", - }, labels).With(labelsAndValues...), BlockParts: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, @@ -208,8 +196,6 @@ func NopMetrics() *Metrics { BlockSizeBytes: discard.NewGauge(), TotalTxs: discard.NewGauge(), CommittedHeight: discard.NewGauge(), - BlockSyncing: discard.NewGauge(), - StateSyncing: discard.NewGauge(), BlockParts: discard.NewCounter(), StepDurationSeconds: discard.NewHistogram(), BlockGossipPartsReceived: discard.NewCounter(), diff --git a/consensus/metrics.go b/consensus/metrics.go index e6a8f284a..f8262d391 100644 --- a/consensus/metrics.go +++ b/consensus/metrics.go @@ -61,10 +61,6 @@ type Metrics struct { TotalTxs metrics.Gauge // The latest block height. CommittedHeight metrics.Gauge `metrics_name:"latest_block_height"` - // Whether or not a node is block syncing. 1 if yes, 0 if no. - BlockSyncing metrics.Gauge - // Whether or not a node is state syncing. 1 if yes, 0 if no. - StateSyncing metrics.Gauge // Number of block parts transmitted by each peer. BlockParts metrics.Counter `metrics_labels:"peer_id"` diff --git a/consensus/msgs.go b/consensus/msgs.go index 5d22905cd..6eb339aae 100644 --- a/consensus/msgs.go +++ b/consensus/msgs.go @@ -5,7 +5,6 @@ import ( "fmt" "github.com/cosmos/gogoproto/proto" - cstypes "github.com/tendermint/tendermint/consensus/types" "github.com/tendermint/tendermint/libs/bits" tmmath "github.com/tendermint/tendermint/libs/math" @@ -15,173 +14,147 @@ import ( "github.com/tendermint/tendermint/types" ) -// MsgToProto takes a consensus message type and returns the proto defined consensus message -func MsgToProto(msg Message) (*tmcons.Message, error) { +// MsgToProto takes a consensus message type and returns the proto defined consensus message. +// +// TODO: This needs to be removed, but WALToProto depends on this. +func MsgToProto(msg Message) (proto.Message, error) { if msg == nil { return nil, errors.New("consensus: message is nil") } - var pb tmcons.Message + var pb proto.Message switch msg := msg.(type) { case *NewRoundStepMessage: - pb = tmcons.Message{ - Sum: &tmcons.Message_NewRoundStep{ - NewRoundStep: &tmcons.NewRoundStep{ - Height: msg.Height, - Round: msg.Round, - Step: uint32(msg.Step), - SecondsSinceStartTime: msg.SecondsSinceStartTime, - LastCommitRound: msg.LastCommitRound, - }, - }, + pb = &tmcons.NewRoundStep{ + Height: msg.Height, + Round: msg.Round, + Step: uint32(msg.Step), + SecondsSinceStartTime: msg.SecondsSinceStartTime, + LastCommitRound: msg.LastCommitRound, } + case *NewValidBlockMessage: pbPartSetHeader := msg.BlockPartSetHeader.ToProto() pbBits := msg.BlockParts.ToProto() - pb = tmcons.Message{ - Sum: &tmcons.Message_NewValidBlock{ - NewValidBlock: &tmcons.NewValidBlock{ - Height: msg.Height, - Round: msg.Round, - BlockPartSetHeader: pbPartSetHeader, - BlockParts: pbBits, - IsCommit: msg.IsCommit, - }, - }, + pb = &tmcons.NewValidBlock{ + Height: msg.Height, + Round: msg.Round, + BlockPartSetHeader: pbPartSetHeader, + BlockParts: pbBits, + IsCommit: msg.IsCommit, } + case *ProposalMessage: pbP := msg.Proposal.ToProto() - pb = tmcons.Message{ - Sum: &tmcons.Message_Proposal{ - Proposal: &tmcons.Proposal{ - Proposal: *pbP, - }, - }, + pb = &tmcons.Proposal{ + Proposal: *pbP, } + case *ProposalPOLMessage: pbBits := msg.ProposalPOL.ToProto() - pb = tmcons.Message{ - Sum: &tmcons.Message_ProposalPol{ - ProposalPol: &tmcons.ProposalPOL{ - Height: msg.Height, - ProposalPolRound: msg.ProposalPOLRound, - ProposalPol: *pbBits, - }, - }, + pb = &tmcons.ProposalPOL{ + Height: msg.Height, + ProposalPolRound: msg.ProposalPOLRound, + ProposalPol: *pbBits, } + case *BlockPartMessage: parts, err := msg.Part.ToProto() if err != nil { return nil, fmt.Errorf("msg to proto error: %w", err) } - pb = tmcons.Message{ - Sum: &tmcons.Message_BlockPart{ - BlockPart: &tmcons.BlockPart{ - Height: msg.Height, - Round: msg.Round, - Part: *parts, - }, - }, + pb = &tmcons.BlockPart{ + Height: msg.Height, + Round: msg.Round, + Part: *parts, } + case *VoteMessage: vote := msg.Vote.ToProto() - pb = tmcons.Message{ - Sum: &tmcons.Message_Vote{ - Vote: &tmcons.Vote{ - Vote: vote, - }, - }, + pb = &tmcons.Vote{ + Vote: vote, } + case *HasVoteMessage: - pb = tmcons.Message{ - Sum: &tmcons.Message_HasVote{ - HasVote: &tmcons.HasVote{ - Height: msg.Height, - Round: msg.Round, - Type: msg.Type, - Index: msg.Index, - }, - }, + pb = &tmcons.HasVote{ + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, + Index: msg.Index, } + case *VoteSetMaj23Message: bi := msg.BlockID.ToProto() - pb = tmcons.Message{ - Sum: &tmcons.Message_VoteSetMaj23{ - VoteSetMaj23: &tmcons.VoteSetMaj23{ - Height: msg.Height, - Round: msg.Round, - Type: msg.Type, - BlockID: bi, - }, - }, + pb = &tmcons.VoteSetMaj23{ + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, + BlockID: bi, } + case *VoteSetBitsMessage: bi := msg.BlockID.ToProto() bits := msg.Votes.ToProto() - vsb := &tmcons.Message_VoteSetBits{ - VoteSetBits: &tmcons.VoteSetBits{ - Height: msg.Height, - Round: msg.Round, - Type: msg.Type, - BlockID: bi, - }, + vsb := &tmcons.VoteSetBits{ + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, + BlockID: bi, } if bits != nil { - vsb.VoteSetBits.Votes = *bits + vsb.Votes = *bits } - pb = tmcons.Message{ - Sum: vsb, - } + pb = vsb default: return nil, fmt.Errorf("consensus: message not recognized: %T", msg) } - return &pb, nil + return pb, nil } // MsgFromProto takes a consensus proto message and returns the native go type -func MsgFromProto(msg *tmcons.Message) (Message, error) { - if msg == nil { +func MsgFromProto(p proto.Message) (Message, error) { + if p == nil { return nil, errors.New("consensus: nil message") } var pb Message - switch msg := msg.Sum.(type) { - case *tmcons.Message_NewRoundStep: - rs, err := tmmath.SafeConvertUint8(int64(msg.NewRoundStep.Step)) + switch msg := p.(type) { + case *tmcons.NewRoundStep: + rs, err := tmmath.SafeConvertUint8(int64(msg.Step)) // deny message based on possible overflow if err != nil { return nil, fmt.Errorf("denying message due to possible overflow: %w", err) } pb = &NewRoundStepMessage{ - Height: msg.NewRoundStep.Height, - Round: msg.NewRoundStep.Round, + Height: msg.Height, + Round: msg.Round, Step: cstypes.RoundStepType(rs), - SecondsSinceStartTime: msg.NewRoundStep.SecondsSinceStartTime, - LastCommitRound: msg.NewRoundStep.LastCommitRound, + SecondsSinceStartTime: msg.SecondsSinceStartTime, + LastCommitRound: msg.LastCommitRound, } - case *tmcons.Message_NewValidBlock: - pbPartSetHeader, err := types.PartSetHeaderFromProto(&msg.NewValidBlock.BlockPartSetHeader) + case *tmcons.NewValidBlock: + pbPartSetHeader, err := types.PartSetHeaderFromProto(&msg.BlockPartSetHeader) if err != nil { return nil, fmt.Errorf("parts to proto error: %w", err) } pbBits := new(bits.BitArray) - pbBits.FromProto(msg.NewValidBlock.BlockParts) + pbBits.FromProto(msg.BlockParts) pb = &NewValidBlockMessage{ - Height: msg.NewValidBlock.Height, - Round: msg.NewValidBlock.Round, + Height: msg.Height, + Round: msg.Round, BlockPartSetHeader: *pbPartSetHeader, BlockParts: pbBits, - IsCommit: msg.NewValidBlock.IsCommit, + IsCommit: msg.IsCommit, } - case *tmcons.Message_Proposal: - pbP, err := types.ProposalFromProto(&msg.Proposal.Proposal) + case *tmcons.Proposal: + pbP, err := types.ProposalFromProto(&msg.Proposal) if err != nil { return nil, fmt.Errorf("proposal msg to proto error: %w", err) } @@ -189,26 +162,26 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) { pb = &ProposalMessage{ Proposal: pbP, } - case *tmcons.Message_ProposalPol: + case *tmcons.ProposalPOL: pbBits := new(bits.BitArray) - pbBits.FromProto(&msg.ProposalPol.ProposalPol) + pbBits.FromProto(&msg.ProposalPol) pb = &ProposalPOLMessage{ - Height: msg.ProposalPol.Height, - ProposalPOLRound: msg.ProposalPol.ProposalPolRound, + Height: msg.Height, + ProposalPOLRound: msg.ProposalPolRound, ProposalPOL: pbBits, } - case *tmcons.Message_BlockPart: - parts, err := types.PartFromProto(&msg.BlockPart.Part) + case *tmcons.BlockPart: + parts, err := types.PartFromProto(&msg.Part) if err != nil { return nil, fmt.Errorf("blockpart msg to proto error: %w", err) } pb = &BlockPartMessage{ - Height: msg.BlockPart.Height, - Round: msg.BlockPart.Round, + Height: msg.Height, + Round: msg.Round, Part: parts, } - case *tmcons.Message_Vote: - vote, err := types.VoteFromProto(msg.Vote.Vote) + case *tmcons.Vote: + vote, err := types.VoteFromProto(msg.Vote) if err != nil { return nil, fmt.Errorf("vote msg to proto error: %w", err) } @@ -216,36 +189,36 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) { pb = &VoteMessage{ Vote: vote, } - case *tmcons.Message_HasVote: + case *tmcons.HasVote: pb = &HasVoteMessage{ - Height: msg.HasVote.Height, - Round: msg.HasVote.Round, - Type: msg.HasVote.Type, - Index: msg.HasVote.Index, + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, + Index: msg.Index, } - case *tmcons.Message_VoteSetMaj23: - bi, err := types.BlockIDFromProto(&msg.VoteSetMaj23.BlockID) + case *tmcons.VoteSetMaj23: + bi, err := types.BlockIDFromProto(&msg.BlockID) if err != nil { return nil, fmt.Errorf("voteSetMaj23 msg to proto error: %w", err) } pb = &VoteSetMaj23Message{ - Height: msg.VoteSetMaj23.Height, - Round: msg.VoteSetMaj23.Round, - Type: msg.VoteSetMaj23.Type, + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, BlockID: *bi, } - case *tmcons.Message_VoteSetBits: - bi, err := types.BlockIDFromProto(&msg.VoteSetBits.BlockID) + case *tmcons.VoteSetBits: + bi, err := types.BlockIDFromProto(&msg.BlockID) if err != nil { return nil, fmt.Errorf("voteSetBits msg to proto error: %w", err) } bits := new(bits.BitArray) - bits.FromProto(&msg.VoteSetBits.Votes) + bits.FromProto(&msg.Votes) pb = &VoteSetBitsMessage{ - Height: msg.VoteSetBits.Height, - Round: msg.VoteSetBits.Round, - Type: msg.VoteSetBits.Type, + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, BlockID: *bi, Votes: bits, } @@ -260,20 +233,6 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) { return pb, nil } -// MustEncode takes the reactors msg, makes it proto and marshals it -// this mimics `MustMarshalBinaryBare` in that is panics on error -func MustEncode(msg Message) []byte { - pb, err := MsgToProto(msg) - if err != nil { - panic(err) - } - enc, err := proto.Marshal(pb) - if err != nil { - panic(err) - } - return enc -} - // WALToProto takes a WAL message and return a proto walMessage and error func WALToProto(msg WALMessage) (*tmcons.WALMessage, error) { var pb tmcons.WALMessage @@ -294,10 +253,14 @@ func WALToProto(msg WALMessage) (*tmcons.WALMessage, error) { if err != nil { return nil, err } + if w, ok := consMsg.(p2p.Wrapper); ok { + consMsg = w.Wrap() + } + cm := consMsg.(*tmcons.Message) pb = tmcons.WALMessage{ Sum: &tmcons.WALMessage_MsgInfo{ MsgInfo: &tmcons.MsgInfo{ - Msg: *consMsg, + Msg: *cm, PeerID: string(msg.PeerID), }, }, @@ -343,7 +306,11 @@ func WALFromProto(msg *tmcons.WALMessage) (WALMessage, error) { Step: msg.EventDataRoundState.Step, } case *tmcons.WALMessage_MsgInfo: - walMsg, err := MsgFromProto(&msg.MsgInfo.Msg) + um, err := msg.MsgInfo.Msg.Unwrap() + if err != nil { + return nil, fmt.Errorf("unwrap message: %w", err) + } + walMsg, err := MsgFromProto(um) if err != nil { return nil, fmt.Errorf("msgInfo from proto error: %w", err) } diff --git a/consensus/msgs_test.go b/consensus/msgs_test.go index 7690c3364..122a2a411 100644 --- a/consensus/msgs_test.go +++ b/consensus/msgs_test.go @@ -71,7 +71,7 @@ func TestMsgToProto(t *testing.T) { testsCases := []struct { testName string msg Message - want *tmcons.Message + want proto.Message wantErr bool }{ {"successful NewRoundStepMessage", &NewRoundStepMessage{ @@ -80,17 +80,15 @@ func TestMsgToProto(t *testing.T) { Step: 1, SecondsSinceStartTime: 1, LastCommitRound: 2, - }, &tmcons.Message{ - Sum: &tmcons.Message_NewRoundStep{ - NewRoundStep: &tmcons.NewRoundStep{ - Height: 2, - Round: 1, - Step: 1, - SecondsSinceStartTime: 1, - LastCommitRound: 2, - }, - }, - }, false}, + }, &tmcons.NewRoundStep{ + Height: 2, + Round: 1, + Step: 1, + SecondsSinceStartTime: 1, + LastCommitRound: 2, + }, + + false}, {"successful NewValidBlockMessage", &NewValidBlockMessage{ Height: 1, @@ -98,92 +96,78 @@ func TestMsgToProto(t *testing.T) { BlockPartSetHeader: psh, BlockParts: bits, IsCommit: false, - }, &tmcons.Message{ - Sum: &tmcons.Message_NewValidBlock{ - NewValidBlock: &tmcons.NewValidBlock{ - Height: 1, - Round: 1, - BlockPartSetHeader: pbPsh, - BlockParts: pbBits, - IsCommit: false, - }, - }, - }, false}, + }, &tmcons.NewValidBlock{ + Height: 1, + Round: 1, + BlockPartSetHeader: pbPsh, + BlockParts: pbBits, + IsCommit: false, + }, + + false}, {"successful BlockPartMessage", &BlockPartMessage{ Height: 100, Round: 1, Part: &parts, - }, &tmcons.Message{ - Sum: &tmcons.Message_BlockPart{ - BlockPart: &tmcons.BlockPart{ - Height: 100, - Round: 1, - Part: *pbParts, - }, - }, - }, false}, + }, &tmcons.BlockPart{ + Height: 100, + Round: 1, + Part: *pbParts, + }, + + false}, {"successful ProposalPOLMessage", &ProposalPOLMessage{ Height: 1, ProposalPOLRound: 1, ProposalPOL: bits, - }, &tmcons.Message{ - Sum: &tmcons.Message_ProposalPol{ - ProposalPol: &tmcons.ProposalPOL{ - Height: 1, - ProposalPolRound: 1, - ProposalPol: *pbBits, - }, - }}, false}, + }, &tmcons.ProposalPOL{ + Height: 1, + ProposalPolRound: 1, + ProposalPol: *pbBits, + }, + false}, {"successful ProposalMessage", &ProposalMessage{ Proposal: &proposal, - }, &tmcons.Message{ - Sum: &tmcons.Message_Proposal{ - Proposal: &tmcons.Proposal{ - Proposal: *pbProposal, - }, - }, - }, false}, + }, &tmcons.Proposal{ + Proposal: *pbProposal, + }, + + false}, {"successful VoteMessage", &VoteMessage{ Vote: vote, - }, &tmcons.Message{ - Sum: &tmcons.Message_Vote{ - Vote: &tmcons.Vote{ - Vote: pbVote, - }, - }, - }, false}, + }, &tmcons.Vote{ + Vote: pbVote, + }, + + false}, {"successful VoteSetMaj23", &VoteSetMaj23Message{ Height: 1, Round: 1, Type: 1, BlockID: bi, - }, &tmcons.Message{ - Sum: &tmcons.Message_VoteSetMaj23{ - VoteSetMaj23: &tmcons.VoteSetMaj23{ - Height: 1, - Round: 1, - Type: 1, - BlockID: pbBi, - }, - }, - }, false}, + }, &tmcons.VoteSetMaj23{ + Height: 1, + Round: 1, + Type: 1, + BlockID: pbBi, + }, + + false}, {"successful VoteSetBits", &VoteSetBitsMessage{ Height: 1, Round: 1, Type: 1, BlockID: bi, Votes: bits, - }, &tmcons.Message{ - Sum: &tmcons.Message_VoteSetBits{ - VoteSetBits: &tmcons.VoteSetBits{ - Height: 1, - Round: 1, - Type: 1, - BlockID: pbBi, - Votes: *pbBits, - }, - }, - }, false}, + }, &tmcons.VoteSetBits{ + Height: 1, + Round: 1, + Type: 1, + BlockID: pbBi, + Votes: *pbBits, + }, + + false}, {"failure", nil, &tmcons.Message{}, true}, } for _, tt := range testsCases { diff --git a/consensus/reactor.go b/consensus/reactor.go index b0d3e3675..a8d672e44 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -7,8 +7,6 @@ import ( "sync" "time" - "github.com/cosmos/gogoproto/proto" - cstypes "github.com/tendermint/tendermint/consensus/types" "github.com/tendermint/tendermint/libs/bits" tmevents "github.com/tendermint/tendermint/libs/events" @@ -121,8 +119,6 @@ func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) { conR.mtx.Lock() conR.waitSync = false conR.mtx.Unlock() - conR.Metrics.BlockSyncing.Set(0) - conR.Metrics.StateSyncing.Set(0) if skipWAL { conR.conS.doWALCatchup = false @@ -148,6 +144,7 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor { Priority: 6, SendQueueCapacity: 100, RecvMessageCapacity: maxMsgSize, + MessageType: &tmcons.Message{}, }, { ID: DataChannel, // maybe split between gossiping current block and catchup stuff @@ -156,6 +153,7 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor { SendQueueCapacity: 100, RecvBufferCapacity: 50 * 4096, RecvMessageCapacity: maxMsgSize, + MessageType: &tmcons.Message{}, }, { ID: VoteChannel, @@ -163,6 +161,7 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor { SendQueueCapacity: 100, RecvBufferCapacity: 100 * 100, RecvMessageCapacity: maxMsgSize, + MessageType: &tmcons.Message{}, }, { ID: VoteSetBitsChannel, @@ -170,6 +169,7 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor { SendQueueCapacity: 2, RecvBufferCapacity: 1024, RecvMessageCapacity: maxMsgSize, + MessageType: &tmcons.Message{}, }, } } @@ -223,34 +223,33 @@ func (conR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { // Peer state updates can happen in parallel, but processing of // proposals, block parts, and votes are ordered by the receiveRoutine // NOTE: blocks on consensus state for proposals, block parts, and votes -func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { +func (conR *Reactor) Receive(e p2p.Envelope) { if !conR.IsRunning() { - conR.Logger.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes) + conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID) return } - - msg, err := decodeMsg(msgBytes) + msg, err := MsgFromProto(e.Message) if err != nil { - conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) - conR.Switch.StopPeerForError(src, err) + conR.Logger.Error("Error decoding message", "src", e.Src, "chId", e.ChannelID, "err", err) + conR.Switch.StopPeerForError(e.Src, err) return } if err = msg.ValidateBasic(); err != nil { - conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) - conR.Switch.StopPeerForError(src, err) + conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err) + conR.Switch.StopPeerForError(e.Src, err) return } - conR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) + conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", msg) // Get peer states - ps, ok := src.Get(types.PeerStateKey).(*PeerState) + ps, ok := e.Src.Get(types.PeerStateKey).(*PeerState) if !ok { - panic(fmt.Sprintf("Peer %v has no state", src)) + panic(fmt.Sprintf("Peer %v has no state", e.Src)) } - switch chID { + switch e.ChannelID { case StateChannel: switch msg := msg.(type) { case *NewRoundStepMessage: @@ -258,8 +257,8 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { initialHeight := conR.conS.state.InitialHeight conR.conS.mtx.Unlock() if err = msg.ValidateHeight(initialHeight); err != nil { - conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) - conR.Switch.StopPeerForError(src, err) + conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", msg, "err", err) + conR.Switch.StopPeerForError(e.Src, err) return } ps.ApplyNewRoundStepMessage(msg) @@ -278,7 +277,7 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { // Peer claims to have a maj23 for some BlockID at H,R,S, err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.peer.ID(), msg.BlockID) if err != nil { - conR.Switch.StopPeerForError(src, err) + conR.Switch.StopPeerForError(e.Src, err) return } // Respond with a VoteSetBitsMessage showing which votes we have. @@ -292,13 +291,19 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { default: panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?") } - src.TrySend(VoteSetBitsChannel, MustEncode(&VoteSetBitsMessage{ + eMsg := &tmcons.VoteSetBits{ Height: msg.Height, Round: msg.Round, Type: msg.Type, - BlockID: msg.BlockID, - Votes: ourVotes, - })) + BlockID: msg.BlockID.ToProto(), + } + if votes := ourVotes.ToProto(); votes != nil { + eMsg.Votes = *votes + } + e.Src.TrySend(p2p.Envelope{ + ChannelID: VoteSetBitsChannel, + Message: eMsg, + }) default: conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } @@ -311,13 +316,13 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { switch msg := msg.(type) { case *ProposalMessage: ps.SetHasProposal(msg.Proposal) - conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()} + conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()} case *ProposalPOLMessage: ps.ApplyProposalPOLMessage(msg) case *BlockPartMessage: ps.SetHasProposalBlockPart(msg.Height, msg.Round, int(msg.Part.Index)) - conR.Metrics.BlockParts.With("peer_id", string(src.ID())).Add(1) - conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()} + conR.Metrics.BlockParts.With("peer_id", string(e.Src.ID())).Add(1) + conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()} default: conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } @@ -337,7 +342,7 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { ps.EnsureVoteBitArrays(height-1, lastCommitSize) ps.SetHasVote(msg.Vote) - cs.peerMsgQueue <- msgInfo{msg, src.ID()} + cs.peerMsgQueue <- msgInfo{msg, e.Src.ID()} default: // don't punish (leave room for soft upgrades) @@ -376,7 +381,7 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { } default: - conR.Logger.Error(fmt.Sprintf("Unknown chId %X", chID)) + conR.Logger.Error(fmt.Sprintf("Unknown chId %X", e.ChannelID)) } } @@ -430,29 +435,39 @@ func (conR *Reactor) unsubscribeFromBroadcastEvents() { func (conR *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) { nrsMsg := makeRoundStepMessage(rs) - conR.Switch.Broadcast(StateChannel, MustEncode(nrsMsg)) + conR.Switch.Broadcast(p2p.Envelope{ + ChannelID: StateChannel, + Message: nrsMsg, + }) } func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) { - csMsg := &NewValidBlockMessage{ + psh := rs.ProposalBlockParts.Header() + csMsg := &tmcons.NewValidBlock{ Height: rs.Height, Round: rs.Round, - BlockPartSetHeader: rs.ProposalBlockParts.Header(), - BlockParts: rs.ProposalBlockParts.BitArray(), + BlockPartSetHeader: psh.ToProto(), + BlockParts: rs.ProposalBlockParts.BitArray().ToProto(), IsCommit: rs.Step == cstypes.RoundStepCommit, } - conR.Switch.Broadcast(StateChannel, MustEncode(csMsg)) + conR.Switch.Broadcast(p2p.Envelope{ + ChannelID: StateChannel, + Message: csMsg, + }) } // Broadcasts HasVoteMessage to peers that care. func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) { - msg := &HasVoteMessage{ + msg := &tmcons.HasVote{ Height: vote.Height, Round: vote.Round, Type: vote.Type, Index: vote.ValidatorIndex, } - conR.Switch.Broadcast(StateChannel, MustEncode(msg)) + conR.Switch.Broadcast(p2p.Envelope{ + ChannelID: StateChannel, + Message: msg, + }) /* // TODO: Make this broadcast more selective. for _, peer := range conR.Switch.Peers().List() { @@ -463,7 +478,11 @@ func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) { prs := ps.GetRoundState() if prs.Height == vote.Height { // TODO: Also filter on round? - peer.TrySend(StateChannel, struct{ ConsensusMessage }{msg}) + e := p2p.Envelope{ + ChannelID: StateChannel, struct{ ConsensusMessage }{msg}, + Message: p, + } + peer.TrySend(e) } else { // Height doesn't match // TODO: check a field, maybe CatchupCommitRound? @@ -473,11 +492,11 @@ func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) { */ } -func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage) { - nrsMsg = &NewRoundStepMessage{ +func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *tmcons.NewRoundStep) { + nrsMsg = &tmcons.NewRoundStep{ Height: rs.Height, Round: rs.Round, - Step: rs.Step, + Step: uint32(rs.Step), SecondsSinceStartTime: int64(time.Since(rs.StartTime).Seconds()), LastCommitRound: rs.LastCommit.GetRound(), } @@ -487,7 +506,10 @@ func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage) func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) { rs := conR.getRoundState() nrsMsg := makeRoundStepMessage(rs) - peer.Send(StateChannel, MustEncode(nrsMsg)) + peer.Send(p2p.Envelope{ + ChannelID: StateChannel, + Message: nrsMsg, + }) } func (conR *Reactor) updateRoundStateRoutine() { @@ -526,13 +548,19 @@ OUTER_LOOP: if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartSetHeader) { if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok { part := rs.ProposalBlockParts.GetPart(index) - msg := &BlockPartMessage{ - Height: rs.Height, // This tells peer that this part applies to us. - Round: rs.Round, // This tells peer that this part applies to us. - Part: part, + parts, err := part.ToProto() + if err != nil { + panic(err) } logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round) - if peer.Send(DataChannel, MustEncode(msg)) { + if peer.Send(p2p.Envelope{ + ChannelID: DataChannel, + Message: &tmcons.BlockPart{ + Height: rs.Height, // This tells peer that this part applies to us. + Round: rs.Round, // This tells peer that this part applies to us. + Part: *parts, + }, + }) { ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) } continue OUTER_LOOP @@ -578,9 +606,11 @@ OUTER_LOOP: if rs.Proposal != nil && !prs.Proposal { // Proposal: share the proposal metadata with peer. { - msg := &ProposalMessage{Proposal: rs.Proposal} logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round) - if peer.Send(DataChannel, MustEncode(msg)) { + if peer.Send(p2p.Envelope{ + ChannelID: DataChannel, + Message: &tmcons.Proposal{Proposal: *rs.Proposal.ToProto()}, + }) { // NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected! ps.SetHasProposal(rs.Proposal) } @@ -590,13 +620,15 @@ OUTER_LOOP: // rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round, // so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound). if 0 <= rs.Proposal.POLRound { - msg := &ProposalPOLMessage{ - Height: rs.Height, - ProposalPOLRound: rs.Proposal.POLRound, - ProposalPOL: rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(), - } logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round) - peer.Send(DataChannel, MustEncode(msg)) + peer.Send(p2p.Envelope{ + ChannelID: DataChannel, + Message: &tmcons.ProposalPOL{ + Height: rs.Height, + ProposalPolRound: rs.Proposal.POLRound, + ProposalPol: *rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray().ToProto(), + }, + }) } continue OUTER_LOOP } @@ -633,13 +665,20 @@ func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundSt return } // Send the part - msg := &BlockPartMessage{ - Height: prs.Height, // Not our height, so it doesn't matter. - Round: prs.Round, // Not our height, so it doesn't matter. - Part: part, - } logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index) - if peer.Send(DataChannel, MustEncode(msg)) { + pp, err := part.ToProto() + if err != nil { + logger.Error("Could not convert part to proto", "index", index, "error", err) + return + } + if peer.Send(p2p.Envelope{ + ChannelID: DataChannel, + Message: &tmcons.BlockPart{ + Height: prs.Height, // Not our height, so it doesn't matter. + Round: prs.Round, // Not our height, so it doesn't matter. + Part: *pp, + }, + }) { ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) } else { logger.Debug("Sending block part for catchup failed") @@ -798,12 +837,16 @@ OUTER_LOOP: prs := ps.GetRoundState() if rs.Height == prs.Height { if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok { - peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{ - Height: prs.Height, - Round: prs.Round, - Type: tmproto.PrevoteType, - BlockID: maj23, - })) + + peer.TrySend(p2p.Envelope{ + ChannelID: StateChannel, + Message: &tmcons.VoteSetMaj23{ + Height: prs.Height, + Round: prs.Round, + Type: tmproto.PrevoteType, + BlockID: maj23.ToProto(), + }, + }) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } @@ -815,12 +858,15 @@ OUTER_LOOP: prs := ps.GetRoundState() if rs.Height == prs.Height { if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok { - peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{ - Height: prs.Height, - Round: prs.Round, - Type: tmproto.PrecommitType, - BlockID: maj23, - })) + peer.TrySend(p2p.Envelope{ + ChannelID: StateChannel, + Message: &tmcons.VoteSetMaj23{ + Height: prs.Height, + Round: prs.Round, + Type: tmproto.PrecommitType, + BlockID: maj23.ToProto(), + }, + }) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } @@ -832,12 +878,16 @@ OUTER_LOOP: prs := ps.GetRoundState() if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 { if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok { - peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{ - Height: prs.Height, - Round: prs.ProposalPOLRound, - Type: tmproto.PrevoteType, - BlockID: maj23, - })) + + peer.TrySend(p2p.Envelope{ + ChannelID: StateChannel, + Message: &tmcons.VoteSetMaj23{ + Height: prs.Height, + Round: prs.ProposalPOLRound, + Type: tmproto.PrevoteType, + BlockID: maj23.ToProto(), + }, + }) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } @@ -852,12 +902,15 @@ OUTER_LOOP: if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= conR.conS.blockStore.Height() && prs.Height >= conR.conS.blockStore.Base() { if commit := conR.conS.LoadCommit(prs.Height); commit != nil { - peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{ - Height: prs.Height, - Round: commit.Round, - Type: tmproto.PrecommitType, - BlockID: commit.BlockID, - })) + peer.TrySend(p2p.Envelope{ + ChannelID: StateChannel, + Message: &tmcons.VoteSetMaj23{ + Height: prs.Height, + Round: commit.Round, + Type: tmproto.PrecommitType, + BlockID: commit.BlockID.ToProto(), + }, + }) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } @@ -1071,9 +1124,13 @@ func (ps *PeerState) SetHasProposalBlockPart(height int64, round int32, index in // Returns true if vote was sent. func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool { if vote, ok := ps.PickVoteToSend(votes); ok { - msg := &VoteMessage{vote} ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote) - if ps.peer.Send(VoteChannel, MustEncode(msg)) { + if ps.peer.Send(p2p.Envelope{ + ChannelID: VoteChannel, + Message: &tmcons.Vote{ + Vote: vote.ToProto(), + }, + }) { ps.SetHasVote(vote) return true } @@ -1439,15 +1496,6 @@ func init() { tmjson.RegisterType(&VoteSetBitsMessage{}, "tendermint/VoteSetBits") } -func decodeMsg(bz []byte) (msg Message, err error) { - pb := &tmcons.Message{} - if err = proto.Unmarshal(bz, pb); err != nil { - return msg, err - } - - return MsgFromProto(pb) -} - //------------------------------------- // NewRoundStepMessage is sent for every step taken in the ConsensusState. diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index 0e79e37aa..47befb3e9 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -31,6 +31,7 @@ import ( mempoolv1 "github.com/tendermint/tendermint/mempool/v1" "github.com/tendermint/tendermint/p2p" p2pmock "github.com/tendermint/tendermint/p2p/mock" + tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" @@ -267,15 +268,18 @@ func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) { var ( reactor = reactors[0] peer = p2pmock.NewPeer(nil) - msg = MustEncode(&HasVoteMessage{Height: 1, - Round: 1, Index: 1, Type: tmproto.PrevoteType}) ) reactor.InitPeer(peer) // simulate switch calling Receive before AddPeer assert.NotPanics(t, func() { - reactor.Receive(StateChannel, peer, msg) + reactor.Receive(p2p.Envelope{ + ChannelID: StateChannel, + Src: peer, + Message: &tmcons.HasVote{Height: 1, + Round: 1, Index: 1, Type: tmproto.PrevoteType}, + }) reactor.AddPeer(peer) }) } @@ -290,15 +294,18 @@ func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) { var ( reactor = reactors[0] peer = p2pmock.NewPeer(nil) - msg = MustEncode(&HasVoteMessage{Height: 1, - Round: 1, Index: 1, Type: tmproto.PrevoteType}) ) // we should call InitPeer here // simulate switch calling Receive before AddPeer assert.Panics(t, func() { - reactor.Receive(StateChannel, peer, msg) + reactor.Receive(p2p.Envelope{ + ChannelID: StateChannel, + Src: peer, + Message: &tmcons.HasVote{Height: 1, + Round: 1, Index: 1, Type: tmproto.PrevoteType}, + }) }) } diff --git a/docs/DOCS_README.md b/docs/DOCS_README.md index 6b9594c60..4b21320e1 100644 --- a/docs/DOCS_README.md +++ b/docs/DOCS_README.md @@ -99,4 +99,4 @@ configuration file that we can update with PRs. Because the build processes are identical (as is the information contained herein), this file should be kept in sync as much as possible with its [counterpart in the Cosmos SDK -repo](https://github.com/cosmos/cosmos-sdk/blob/master/docs/DOCS_README.md). +repo](https://github.com/cosmos/cosmos-sdk/blob/main/docs/README.md). diff --git a/docs/architecture/README.md b/docs/architecture/README.md index ccf770f5c..66276d7c3 100644 --- a/docs/architecture/README.md +++ b/docs/architecture/README.md @@ -78,6 +78,7 @@ Note the context/background should be written in the present tense. - [ADR-039: Peer-Behaviour](./adr-039-peer-behaviour.md) - [ADR-063: Privval-gRPC](./adr-063-privval-grpc.md) - [ADR-067: Mempool Refactor](./adr-067-mempool-refactor.md) +- [ADR-071: Proposer-Based Timestamps](./adr-071-proposer-based-timestamps.md) - [ADR-075: RPC Event Subscription Interface](./adr-075-rpc-subscription.md) - [ADR-079: Ed25519 Verification](./adr-079-ed25519-verification.md) - [ADR-081: Protocol Buffers Management](./adr-081-protobuf-mgmt.md) @@ -114,7 +115,6 @@ None - [ADR-064: Batch Verification](./adr-064-batch-verification.md) - [ADR-068: Reverse-Sync](./adr-068-reverse-sync.md) - [ADR-069: Node Initialization](./adr-069-flexible-node-initialization.md) -- [ADR-071: Proposer-Based Timestamps](./adr-071-proposer-based-timestamps.md) - [ADR-073: Adopt LibP2P](./adr-073-libp2p.md) - [ADR-074: Migrate Timeout Parameters to Consensus Parameters](./adr-074-timeout-params.md) - [ADR-080: Reverse Sync](./adr-080-reverse-sync.md) diff --git a/docs/architecture/adr-071-proposer-based-timestamps.md b/docs/architecture/adr-071-proposer-based-timestamps.md index 11fd70dae..e17226cce 100644 --- a/docs/architecture/adr-071-proposer-based-timestamps.md +++ b/docs/architecture/adr-071-proposer-based-timestamps.md @@ -61,7 +61,7 @@ The following protocols and application features require a reliable source of ti * Tendermint Light Clients [rely on correspondence between their known time](https://github.com/tendermint/tendermint/blob/main/spec/light-client/verification/README.md#definitions-1) and the block time for block verification. * Tendermint Evidence validity is determined [either in terms of heights or in terms of time](https://github.com/tendermint/tendermint/blob/8029cf7a0fcc89a5004e173ec065aa48ad5ba3c8/spec/consensus/evidence.md#verification). * Unbonding of staked assets in the Cosmos Hub [occurs after a period of 21 days](https://github.com/cosmos/governance/blob/ce75de4019b0129f6efcbb0e752cd2cc9e6136d3/params-change/Staking.md#unbondingtime). -* IBC packets can use either a [timestamp or a height to timeout packet delivery](https://docs.cosmos.network/v0.44/ibc/overview.html#acknowledgements) +* IBC packets can use either a [timestamp or a height to timeout packet delivery](https://docs.cosmos.network/v0.45/ibc/overview.html#acknowledgements) Finally, inflation distribution in the Cosmos Hub uses an approximation of time to calculate an annual percentage rate. This approximation of time is calculated using [block heights with an estimated number of blocks produced in a year](https://github.com/cosmos/governance/blob/master/params-change/Mint.md#blocksperyear). diff --git a/docs/qa/README.md b/docs/qa/README.md new file mode 100644 index 000000000..d322ccb6c --- /dev/null +++ b/docs/qa/README.md @@ -0,0 +1,23 @@ +--- +order: 1 +parent: + title: Tendermint Quality Assurance + description: This is a report on the process followed and results obtained when running v0.34.x on testnets + order: 2 +--- + +# Tendermint Quality Assurance + +This directory keeps track of the process followed by the Tendermint Core team +for Quality Assurance before cutting a release. +This directory is to live in multiple branches. On each release branch, +the contents of this directory reflect the status of the process +at the time the Quality Assurance process was applied for that release. + +File [method](./method.md) keeps track of the process followed to obtain the results +used to decide if a release is passing the Quality Assurance process. +The results obtained in each release are stored in their own directory. +The following releases have undergone the Quality Assurance process: + +* [v0.34.x](./v034/), which was tested just before releasing v0.34.22 +* [v0.37.x](./v037/), with v.34.x acting as a baseline diff --git a/docs/qa/method.md b/docs/qa/method.md new file mode 100644 index 000000000..cc4f82dfa --- /dev/null +++ b/docs/qa/method.md @@ -0,0 +1,214 @@ +--- +order: 1 +title: Method +--- + +# Method + +This document provides a detailed description of the QA process. +It is intended to be used by engineers reproducing the experimental setup for future tests of Tendermint. + +The (first iteration of the) QA process as described [in the RELEASES.md document][releases] +was applied to version v0.34.x in order to have a set of results acting as benchmarking baseline. +This baseline is then compared with results obtained in later versions. + +Out of the testnet-based test cases described in [the releases document][releases] we focused on two of them: +_200 Node Test_, and _Rotating Nodes Test_. + +[releases]: https://github.com/tendermint/tendermint/blob/v0.37.x/RELEASES.md#large-scale-testnets + +## Software Dependencies + +### Infrastructure Requirements to Run the Tests + +* An account at Digital Ocean (DO), with a high droplet limit (>202) +* The machine to orchestrate the tests should have the following installed: + * A clone of the [testnet repository][testnet-repo] + * This repository contains all the scripts mentioned in the reminder of this section + * [Digital Ocean CLI][doctl] + * [Terraform CLI][Terraform] + * [Ansible CLI][Ansible] + +[testnet-repo]: https://github.com/interchainio/tendermint-testnet +[Ansible]: https://docs.ansible.com/ansible/latest/index.html +[Terraform]: https://www.terraform.io/docs +[doctl]: https://docs.digitalocean.com/reference/doctl/how-to/install/ + +### Requirements for Result Extraction + +* Matlab or Octave +* [Prometheus][prometheus] server installed +* blockstore DB of one of the full nodes in the testnet +* Prometheus DB + +[prometheus]: https://prometheus.io/ + +## 200 Node Testnet + +### Running the test + +This section explains how the tests were carried out for reproducibility purposes. + +1. [If you haven't done it before] + Follow steps 1-4 of the `README.md` at the top of the testnet repository to configure Terraform, and `doctl`. +2. Copy file `testnets/testnet200.toml` onto `testnet.toml` (do NOT commit this change) +3. Set the variable `VERSION_TAG` in the `Makefile` to the git hash that is to be tested. +4. Follow steps 5-10 of the `README.md` to configure and start the 200 node testnet + * WARNING: Do NOT forget to run `make terraform-destroy` as soon as you are done with the tests (see step 9) +5. As a sanity check, connect to the Prometheus node's web interface and check the graph for the `tendermint_consensus_height` metric. + All nodes should be increasing their heights. +6. `ssh` into the `testnet-load-runner`, then copy script `script/200-node-loadscript.sh` and run it from the load runner node. + * Before running it, you need to edit the script to provide the IP address of a full node. + This node will receive all transactions from the load runner node. + * This script will take about 40 mins to run + * It is running 90-seconds-long experiments in a loop with different loads +7. Run `make retrieve-data` to gather all relevant data from the testnet into the orchestrating machine +8. Verify that the data was collected without errors + * at least one blockstore DB for a Tendermint validator + * the Prometheus database from the Prometheus node + * for extra care, you can run `zip -T` on the `prometheus.zip` file and (one of) the `blockstore.db.zip` file(s) +9. **Run `make terraform-destroy`** + * Don't forget to type `yes`! Otherwise you're in trouble. + +### Result Extraction + +The method for extracting the results described here is highly manual (and exploratory) at this stage. +The Core team should improve it at every iteration to increase the amount of automation. + +#### Steps + +1. Unzip the blockstore into a directory +2. Extract the latency report and the raw latencies for all the experiments. Run these commands from the directory containing the blockstore + * `go run github.com/tendermint/tendermint/test/loadtime/cmd/report@3ec6e424d --database-type goleveldb --data-dir ./ > results/report.txt` + * `go run github.com/tendermint/tendermint/test/loadtime/cmd/report@3ec6e424d --database-type goleveldb --data-dir ./ --csv results/raw.csv` +3. File `report.txt` contains an unordered list of experiments with varying concurrent connections and transaction rate + * Create files `report01.txt`, `report02.txt`, `report04.txt` and, for each experiment in file `report.txt`, + copy its related lines to the filename that matches the number of connections. + * Sort the experiments in `report01.txt` in ascending tx rate order. Likewise for `report02.txt` and `report04.txt`. +4. Generate file `report_tabbed.txt` by showing the contents `report01.txt`, `report02.txt`, `report04.txt` side by side + * This effectively creates a table where rows are a particular tx rate and columns are a particular number of websocket connections. +5. Extract the raw latencies from file `raw.csv` using the following bash loop. This creates a `.csv` file and a `.dat` file per experiment. + The format of the `.dat` files is amenable to loading them as matrices in Octave + + ```bash + uuids=($(cat report01.txt report02.txt report04.txt | grep '^Experiment ID: ' | awk '{ print $3 }')) + c=1 + for i in 01 02 04; do + for j in 0025 0050 0100 0200; do + echo $i $j $c "${uuids[$c]}" + filename=c${i}_r${j} + grep ${uuids[$c]} raw.csv > ${filename}.csv + cat ${filename}.csv | tr , ' ' | awk '{ print $2, $3 }' > ${filename}.dat + c=$(expr $c + 1) + done + done + ``` + +6. Enter Octave +7. Load all `.dat` files generated in step 5 into matrices using this Octave code snippet + + ```octave + conns = { "01"; "02"; "04" }; + rates = { "0025"; "0050"; "0100"; "0200" }; + for i = 1:length(conns) + for j = 1:length(rates) + filename = strcat("c", conns{i}, "_r", rates{j}, ".dat"); + load("-ascii", filename); + endfor + endfor + ``` + +8. Set variable release to the current release undergoing QA + + ```octave + release = "v0.34.x"; + ``` + +9. Generate a plot with all (or some) experiments, where the X axis is the experiment time, + and the y axis is the latency of transactions. + The following snippet plots all experiments. + + ```octave + legends = {}; + hold off; + for i = 1:length(conns) + for j = 1:length(rates) + data_name = strcat("c", conns{i}, "_r", rates{j}); + l = strcat("c=", conns{i}, " r=", rates{j}); + m = eval(data_name); plot((m(:,1) - min(m(:,1))) / 1e+9, m(:,2) / 1e+9, "."); + hold on; + legends(1, end+1) = l; + endfor + endfor + legend(legends, "location", "northeastoutside"); + xlabel("experiment time (s)"); + ylabel("latency (s)"); + t = sprintf("200-node testnet - %s", release); + title(t); + ``` + +10. Consider adjusting the axis, in case you want to compare your results to the baseline, for instance + + ```octave + axis([0, 100, 0, 30], "tic"); + ``` + +11. Use Octave's GUI menu to save the plot (e.g. as `.png`) + +12. Repeat steps 9 and 10 to obtain as many plots as deemed necessary. + +13. To generate a latency vs throughput plot, using the raw CSV file generated + in step 2, follow the instructions for the [`latency_throughput.py`] script. + +[`latency_throughput.py`]: ../../scripts/qa/reporting/README.md + +#### Extracting Prometheus Metrics + +1. Stop the prometheus server if it is running as a service (e.g. a `systemd` unit). +2. Unzip the prometheus database retrieved from the testnet, and move it to replace the + local prometheus database. +3. Start the prometheus server and make sure no error logs appear at start up. +4. Introduce the metrics you want to gather or plot. + +## Rotating Node Testnet + +### Running the test + +This section explains how the tests were carried out for reproducibility purposes. + +1. [If you haven't done it before] + Follow steps 1-4 of the `README.md` at the top of the testnet repository to configure Terraform, and `doctl`. +2. Copy file `testnet_rotating.toml` onto `testnet.toml` (do NOT commit this change) +3. Set variable `VERSION_TAG` to the git hash that is to be tested. +4. Run `make terraform-apply EPHEMERAL_SIZE=25` + * WARNING: Do NOT forget to run `make terraform-destroy` as soon as you are done with the tests +5. Follow steps 6-10 of the `README.md` to configure and start the "stable" part of the rotating node testnet +6. As a sanity check, connect to the Prometheus node's web interface and check the graph for the `tendermint_consensus_height` metric. + All nodes should be increasing their heights. +7. On a different shell, + * run `make runload ROTATE_CONNECTIONS=X ROTATE_TX_RATE=Y` + * `X` and `Y` should reflect a load below the saturation point (see, e.g., + [this paragraph](./v034/README.md#finding-the-saturation-point) for further info) +8. Run `make rotate` to start the script that creates the ephemeral nodes, and kills them when they are caught up. + * WARNING: If you run this command from your laptop, the laptop needs to be up and connected for full length + of the experiment. +9. When the height of the chain reaches 3000, stop the `make rotate` script +10. When the rotate script has made two iterations (i.e., all ephemeral nodes have caught up twice) + after height 3000 was reached, stop `make rotate` +11. Run `make retrieve-data` to gather all relevant data from the testnet into the orchestrating machine +12. Verify that the data was collected without errors + * at least one blockstore DB for a Tendermint validator + * the Prometheus database from the Prometheus node + * for extra care, you can run `zip -T` on the `prometheus.zip` file and (one of) the `blockstore.db.zip` file(s) +13. **Run `make terraform-destroy`** + +Steps 8 to 10 are highly manual at the moment and will be improved in next iterations. + +### Result Extraction + +In order to obtain a latency plot, follow the instructions above for the 200 node experiment, but: + +* The `results.txt` file contains only one experiment +* Therefore, no need for any `for` loops + +As for prometheus, the same method as for the 200 node experiment can be applied. diff --git a/docs/qa/v034/README.md b/docs/qa/v034/README.md new file mode 100644 index 000000000..b07b10291 --- /dev/null +++ b/docs/qa/v034/README.md @@ -0,0 +1,278 @@ +--- +order: 1 +parent: + title: Tendermint Quality Assurance Results for v0.34.x + description: This is a report on the results obtained when running v0.34.x on testnets + order: 2 +--- + +# v0.34.x + +## 200 Node Testnet + +### Finding the Saturation Point + +The first goal when examining the results of the tests is identifying the saturation point. +The saturation point is a setup with a transaction load big enough to prevent the testnet +from being stable: the load runner tries to produce slightly more transactions than can +be processed by the testnet. + +The following table summarizes the results for v0.34.x, for the different experiments +(extracted from file [`v034_report_tabbed.txt`](./img/v034_report_tabbed.txt)). + +The X axis of this table is `c`, the number of connections created by the load runner process to the target node. +The Y axis of this table is `r`, the rate or number of transactions issued per second. + +| | c=1 | c=2 | c=4 | +| :--- | ----: | ----: | ----: | +| r=25 | 2225 | 4450 | 8900 | +| r=50 | 4450 | 8900 | 17800 | +| r=100 | 8900 | 17800 | 35600 | +| r=200 | 17800 | 35600 | 38660 | + +The table shows the number of 1024-byte-long transactions that were produced by the load runner, +and processed by Tendermint, during the 90 seconds of the experiment's duration. +Each cell in the table refers to an experiment with a particular number of websocket connections (`c`) +to a chosen validator, and the number of transactions per second that the load runner +tries to produce (`r`). Note that the overall load that the tool attempts to generate is $c \cdot r$. + +We can see that the saturation point is beyond the diagonal that spans cells + +* `r=200,c=2` +* `r=100,c=4` + +given that the total transactions should be close to the product of the rate, the number of connections, +and the experiment time (89 seconds, since the last batch never gets sent). + +All experiments below the saturation diagonal (`r=200,c=4`) have in common that the total +number of transactions processed is noticeably less than the product $c \cdot r \cdot 89$, +which is the expected number of transactions when the system is able to deal well with the +load. +With `r=200,c=4`, we obtained 38660 whereas the theoretical number of transactions should +have been $200 \cdot 4 \cdot 89 = 71200$. + +At this point, we chose an experiment at the limit of the saturation diagonal, +in order to further study the performance of this release. +**The chosen experiment is `r=200,c=2`**. + +This is a plot of the CPU load (average over 1 minute, as output by `top`) of the load runner for `r=200,c=2`, +where we can see that the load stays close to 0 most of the time. + +![load-load-runner](./img/v034_r200c2_load-runner.png) + +### Examining latencies + +The method described [here](../method.md) allows us to plot the latencies of transactions +for all experiments. + +![all-latencies](./img/v034_200node_latencies.png) + +As we can see, even the experiments beyond the saturation diagonal managed to keep +transaction latency stable (i.e. not constantly increasing). +Our interpretation for this is that contention within Tendermint was propagated, +via the websockets, to the load runner, +hence the load runner could not produce the target load, but a fraction of it. + +Further examination of the Prometheus data (see below), showed that the mempool contained many transactions +at steady state, but did not grow much without quickly returning to this steady state. This demonstrates +that the transactions were able to be processed by the Tendermint network at least as quickly as they +were submitted to the mempool. Finally, the test script made sure that, at the end of an experiment, the +mempool was empty so that all transactions submitted to the chain were processed. + +Finally, the number of points present in the plot appears to be much less than expected given the +number of transactions in each experiment, particularly close to or above the saturation diagonal. +This is a visual effect of the plot; what appear to be points in the plot are actually potentially huge +clusters of points. To corroborate this, we have zoomed in the plot above by setting (carefully chosen) +tiny axis intervals. The cluster shown below looks like a single point in the plot above. + +![all-latencies-zoomed](./img/v034_200node_latencies_zoomed.png) + +The plot of latencies can we used as a baseline to compare with other releases. + +The following plot summarizes average latencies versus overall throughputs +across different numbers of WebSocket connections to the node into which +transactions are being loaded. + +![latency-vs-throughput](./img/v034_latency_throughput.png) + +### Prometheus Metrics on the Chosen Experiment + +As mentioned [above](#finding-the-saturation-point), the chosen experiment is `r=200,c=2`. +This section further examines key metrics for this experiment extracted from Prometheus data. + +#### Mempool Size + +The mempool size, a count of the number of transactions in the mempool, was shown to be stable and homogeneous +at all full nodes. It did not exhibit any unconstrained growth. +The plot below shows the evolution over time of the cumulative number of transactions inside all full nodes' mempools +at a given time. +The two spikes that can be observed correspond to a period where consensus instances proceeded beyond the initial round +at some nodes. + +![mempool-cumulative](./img/v034_r200c2_mempool_size.png) + +The plot below shows evolution of the average over all full nodes, which oscillates between 1500 and 2000 +outstanding transactions. + +![mempool-avg](./img/v034_r200c2_mempool_size_avg.png) + +The peaks observed coincide with the moments when some nodes proceeded beyond the initial round of consensus (see below). + +#### Peers + +The number of peers was stable at all nodes. +It was higher for the seed nodes (around 140) than for the rest (between 21 and 74). +The fact that non-seed nodes reach more than 50 peers is due to #9548. + +![peers](./img/v034_r200c2_peers.png) + +#### Consensus Rounds per Height + +Most heights took just one round, but some nodes needed to advance to round 1 at some point. + +![rounds](./img/v034_r200c2_rounds.png) + +#### Blocks Produced per Minute, Transactions Processed per Minute + +The blocks produced per minute are the slope of this plot. + +![heights](./img/v034_r200c2_heights.png) + +Over a period of 2 minutes, the height goes from 530 to 569. +This results in an average of 19.5 blocks produced per minute. + +The transactions processed per minute are the slope of this plot. + +![total-txs](./img/v034_r200c2_total-txs.png) + +Over a period of 2 minutes, the total goes from 64525 to 100125 transactions, +resulting in 17800 transactions per minute. However, we can see in the plot that +all transactions in the load are processed long before the two minutes. +If we adjust the time window when transactions are processed (approx. 105 seconds), +we obtain 20343 transactions per minute. + +#### Memory Resident Set Size + +Resident Set Size of all monitored processes is plotted below. + +![rss](./img/v034_r200c2_rss.png) + +The average over all processes oscillates around 1.2 GiB and does not demonstrate unconstrained growth. + +![rss-avg](./img/v034_r200c2_rss_avg.png) + +#### CPU utilization + +The best metric from Prometheus to gauge CPU utilization in a Unix machine is `load1`, +as it usually appears in the +[output of `top`](https://www.digitalocean.com/community/tutorials/load-average-in-linux). + +![load1](./img/v034_r200c2_load1.png) + +It is contained in most cases below 5, which is generally considered acceptable load. + +### Test Result + +**Result: N/A** (v0.34.x is the baseline) + +Date: 2022-10-14 + +Version: 3ec6e424d6ae4c96867c2dcf8310572156068bb6 + +## Rotating Node Testnet + +For this testnet, we will use a load that can safely be considered below the saturation +point for the size of this testnet (between 13 and 38 full nodes): `c=4,r=800`. + +N.B.: The version of Tendermint used for these tests is affected by #9539. +However, the reduced load that reaches the mempools is orthogonal to functionality +we are focusing on here. + +### Latencies + +The plot of all latencies can be seen in the following plot. + +![rotating-all-latencies](./img/v034_rotating_latencies.png) + +We can observe there are some very high latencies, towards the end of the test. +Upon suspicion that they are duplicate transactions, we examined the latencies +raw file and discovered there are more than 100K duplicate transactions. + +The following plot shows the latencies file where all duplicate transactions have +been removed, i.e., only the first occurrence of a duplicate transaction is kept. + +![rotating-all-latencies-uniq](./img/v034_rotating_latencies_uniq.png) + +This problem, existing in `v0.34.x`, will need to be addressed, perhaps in the same way +we addressed it when running the 200 node test with high loads: increasing the `cache_size` +configuration parameter. + +### Prometheus Metrics + +The set of metrics shown here are less than for the 200 node experiment. +We are only interested in those for which the catch-up process (blocksync) may have an impact. + +#### Blocks and Transactions per minute + +Just as shown for the 200 node test, the blocks produced per minute are the gradient of this plot. + +![rotating-heights](./img/v034_rotating_heights.png) + +Over a period of 5229 seconds, the height goes from 2 to 3638. +This results in an average of 41 blocks produced per minute. + +The following plot shows only the heights reported by ephemeral nodes +(which are also included in the plot above). Note that the _height_ metric +is only showed _once the node has switched to consensus_, hence the gaps +when nodes are killed, wiped out, started from scratch, and catching up. + +![rotating-heights-ephe](./img/v034_rotating_heights_ephe.png) + +The transactions processed per minute are the gradient of this plot. + +![rotating-total-txs](./img/v034_rotating_total-txs.png) + +The small lines we see periodically close to `y=0` are the transactions that +ephemeral nodes start processing when they are caught up. + +Over a period of 5229 minutes, the total goes from 0 to 387697 transactions, +resulting in 4449 transactions per minute. We can see some abrupt changes in +the plot's gradient. This will need to be investigated. + +#### Peers + +The plot below shows the evolution in peers throughout the experiment. +The periodic changes observed are due to the ephemeral nodes being stopped, +wiped out, and recreated. + +![rotating-peers](./img/v034_rotating_peers.png) + +The validators' plots are concentrated at the higher part of the graph, whereas the ephemeral nodes +are mostly at the lower part. + +#### Memory Resident Set Size + +The average Resident Set Size (RSS) over all processes seems stable, and slightly growing toward the end. +This might be related to the increased in transaction load observed above. + +![rotating-rss-avg](./img/v034_rotating_rss_avg.png) + +The memory taken by the validators and the ephemeral nodes (when they are up) is comparable. + +#### CPU utilization + +The plot shows metric `load1` for all nodes. + +![rotating-load1](./img/v034_rotating_load1.png) + +It is contained under 5 most of the time, which is considered normal load. +The purple line, which follows a different pattern is the validator receiving all +transactions, via RPC, from the load runner process. + +### Test Result + +**Result: N/A** + +Date: 2022-10-10 + +Version: a28c987f5a604ff66b515dd415270063e6fb069d diff --git a/docs/qa/v034/img/v034_200node_latencies.png b/docs/qa/v034/img/v034_200node_latencies.png new file mode 100644 index 000000000..afd1060ca Binary files /dev/null and b/docs/qa/v034/img/v034_200node_latencies.png differ diff --git a/docs/qa/v034/img/v034_200node_latencies_zoomed.png b/docs/qa/v034/img/v034_200node_latencies_zoomed.png new file mode 100644 index 000000000..1ff936442 Binary files /dev/null and b/docs/qa/v034/img/v034_200node_latencies_zoomed.png differ diff --git a/docs/qa/v034/img/v034_latency_throughput.png b/docs/qa/v034/img/v034_latency_throughput.png new file mode 100644 index 000000000..3674fe47b Binary files /dev/null and b/docs/qa/v034/img/v034_latency_throughput.png differ diff --git a/docs/qa/v034/img/v034_r200c2_heights.png b/docs/qa/v034/img/v034_r200c2_heights.png new file mode 100644 index 000000000..11f3bba43 Binary files /dev/null and b/docs/qa/v034/img/v034_r200c2_heights.png differ diff --git a/docs/qa/v034/img/v034_r200c2_load-runner.png b/docs/qa/v034/img/v034_r200c2_load-runner.png new file mode 100644 index 000000000..70211b0d2 Binary files /dev/null and b/docs/qa/v034/img/v034_r200c2_load-runner.png differ diff --git a/docs/qa/v034/img/v034_r200c2_load1.png b/docs/qa/v034/img/v034_r200c2_load1.png new file mode 100644 index 000000000..11012844d Binary files /dev/null and b/docs/qa/v034/img/v034_r200c2_load1.png differ diff --git a/docs/qa/v034/img/v034_r200c2_mempool_size.png b/docs/qa/v034/img/v034_r200c2_mempool_size.png new file mode 100644 index 000000000..c5d690200 Binary files /dev/null and b/docs/qa/v034/img/v034_r200c2_mempool_size.png differ diff --git a/docs/qa/v034/img/v034_r200c2_mempool_size_avg.png b/docs/qa/v034/img/v034_r200c2_mempool_size_avg.png new file mode 100644 index 000000000..bda399fe5 Binary files /dev/null and b/docs/qa/v034/img/v034_r200c2_mempool_size_avg.png differ diff --git a/docs/qa/v034/img/v034_r200c2_peers.png b/docs/qa/v034/img/v034_r200c2_peers.png new file mode 100644 index 000000000..a0aea7ada Binary files /dev/null and b/docs/qa/v034/img/v034_r200c2_peers.png differ diff --git a/docs/qa/v034/img/v034_r200c2_rounds.png b/docs/qa/v034/img/v034_r200c2_rounds.png new file mode 100644 index 000000000..215be100d Binary files /dev/null and b/docs/qa/v034/img/v034_r200c2_rounds.png differ diff --git a/docs/qa/v034/img/v034_r200c2_rss.png b/docs/qa/v034/img/v034_r200c2_rss.png new file mode 100644 index 000000000..6d14dced0 Binary files /dev/null and b/docs/qa/v034/img/v034_r200c2_rss.png differ diff --git a/docs/qa/v034/img/v034_r200c2_rss_avg.png b/docs/qa/v034/img/v034_r200c2_rss_avg.png new file mode 100644 index 000000000..8dec67da2 Binary files /dev/null and b/docs/qa/v034/img/v034_r200c2_rss_avg.png differ diff --git a/docs/qa/v034/img/v034_r200c2_total-txs.png b/docs/qa/v034/img/v034_r200c2_total-txs.png new file mode 100644 index 000000000..177d5f1c3 Binary files /dev/null and b/docs/qa/v034/img/v034_r200c2_total-txs.png differ diff --git a/docs/qa/v034/img/v034_report_tabbed.txt b/docs/qa/v034/img/v034_report_tabbed.txt new file mode 100644 index 000000000..251495474 --- /dev/null +++ b/docs/qa/v034/img/v034_report_tabbed.txt @@ -0,0 +1,52 @@ +Experiment ID: 3d5cf4ef-1a1a-4b46-aa2d-da5643d2e81e │Experiment ID: 80e472ec-13a1-4772-a827-3b0c907fb51d │Experiment ID: 07aca6cf-c5a4-4696-988f-e3270fc6333b + │ │ + Connections: 1 │ Connections: 2 │ Connections: 4 + Rate: 25 │ Rate: 25 │ Rate: 25 + Size: 1024 │ Size: 1024 │ Size: 1024 + │ │ + Total Valid Tx: 2225 │ Total Valid Tx: 4450 │ Total Valid Tx: 8900 + Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0 + Minimum Latency: 599.404362ms │ Minimum Latency: 448.145181ms │ Minimum Latency: 412.485729ms + Maximum Latency: 3.539686885s │ Maximum Latency: 3.237392049s │ Maximum Latency: 12.026665368s + Average Latency: 1.441485349s │ Average Latency: 1.441267946s │ Average Latency: 2.150192457s + Standard Deviation: 541.049869ms │ Standard Deviation: 525.040007ms │ Standard Deviation: 2.233852478s + │ │ +Experiment ID: 953dc544-dd40-40e8-8712-20c34c3ce45e │Experiment ID: d31fc258-16e7-45cd-9dc8-13ab87bc0b0a │Experiment ID: 15d90a7e-b941-42f4-b411-2f15f857739e + │ │ + Connections: 1 │ Connections: 2 │ Connections: 4 + Rate: 50 │ Rate: 50 │ Rate: 50 + Size: 1024 │ Size: 1024 │ Size: 1024 + │ │ + Total Valid Tx: 4450 │ Total Valid Tx: 8900 │ Total Valid Tx: 17800 + Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0 + Minimum Latency: 482.046942ms │ Minimum Latency: 435.458913ms │ Minimum Latency: 510.746448ms + Maximum Latency: 3.761483455s │ Maximum Latency: 7.175583584s │ Maximum Latency: 6.551497882s + Average Latency: 1.450408183s │ Average Latency: 1.681673116s │ Average Latency: 1.738083875s + Standard Deviation: 587.560056ms │ Standard Deviation: 1.147902047s │ Standard Deviation: 943.46522ms + │ │ +Experiment ID: 9a0b9980-9ce6-4db5-a80a-65ca70294b87 │Experiment ID: df8fa4f4-80af-4ded-8a28-356d15018b43 │Experiment ID: d0e41c2c-89c0-4f38-8e34-ca07adae593a + │ │ + Connections: 1 │ Connections: 2 │ Connections: 4 + Rate: 100 │ Rate: 100 │ Rate: 100 + Size: 1024 │ Size: 1024 │ Size: 1024 + │ │ + Total Valid Tx: 8900 │ Total Valid Tx: 17800 │ Total Valid Tx: 35600 + Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0 + Minimum Latency: 477.417219ms │ Minimum Latency: 564.29247ms │ Minimum Latency: 840.71089ms + Maximum Latency: 6.63744785s │ Maximum Latency: 6.988553219s │ Maximum Latency: 9.555312398s + Average Latency: 1.561216103s │ Average Latency: 1.76419063s │ Average Latency: 3.200941683s + Standard Deviation: 1.011333552s │ Standard Deviation: 1.068459423s │ Standard Deviation: 1.732346601s + │ │ +Experiment ID: 493df3ee-4a36-4bce-80f8-6d65da66beda │Experiment ID: 13060525-f04f-46f6-8ade-286684b2fe50 │Experiment ID: 1777cbd2-8c96-42e4-9ec7-9b21f2225e4d + │ │ + Connections: 1 │ Connections: 2 │ Connections: 4 + Rate: 200 │ Rate: 200 │ Rate: 200 + Size: 1024 │ Size: 1024 │ Size: 1024 + │ │ + Total Valid Tx: 17800 │ Total Valid Tx: 35600 │ Total Valid Tx: 38660 + Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0 + Minimum Latency: 493.705261ms │ Minimum Latency: 955.090573ms │ Minimum Latency: 1.9485821s + Maximum Latency: 7.440921872s │ Maximum Latency: 10.086673491s │ Maximum Latency: 17.73103976s + Average Latency: 1.875510582s │ Average Latency: 3.438130099s │ Average Latency: 8.143862237s + Standard Deviation: 1.304336995s │ Standard Deviation: 1.966391574s │ Standard Deviation: 3.943140002s + diff --git a/docs/qa/v034/img/v034_rotating_heights.png b/docs/qa/v034/img/v034_rotating_heights.png new file mode 100644 index 000000000..47913c282 Binary files /dev/null and b/docs/qa/v034/img/v034_rotating_heights.png differ diff --git a/docs/qa/v034/img/v034_rotating_heights_ephe.png b/docs/qa/v034/img/v034_rotating_heights_ephe.png new file mode 100644 index 000000000..981b93d6c Binary files /dev/null and b/docs/qa/v034/img/v034_rotating_heights_ephe.png differ diff --git a/docs/qa/v034/img/v034_rotating_latencies.png b/docs/qa/v034/img/v034_rotating_latencies.png new file mode 100644 index 000000000..f0a54ed5b Binary files /dev/null and b/docs/qa/v034/img/v034_rotating_latencies.png differ diff --git a/docs/qa/v034/img/v034_rotating_latencies_uniq.png b/docs/qa/v034/img/v034_rotating_latencies_uniq.png new file mode 100644 index 000000000..e5d694a16 Binary files /dev/null and b/docs/qa/v034/img/v034_rotating_latencies_uniq.png differ diff --git a/docs/qa/v034/img/v034_rotating_load1.png b/docs/qa/v034/img/v034_rotating_load1.png new file mode 100644 index 000000000..e9c385b85 Binary files /dev/null and b/docs/qa/v034/img/v034_rotating_load1.png differ diff --git a/docs/qa/v034/img/v034_rotating_peers.png b/docs/qa/v034/img/v034_rotating_peers.png new file mode 100644 index 000000000..ab5c8732d Binary files /dev/null and b/docs/qa/v034/img/v034_rotating_peers.png differ diff --git a/docs/qa/v034/img/v034_rotating_rss_avg.png b/docs/qa/v034/img/v034_rotating_rss_avg.png new file mode 100644 index 000000000..9a4167320 Binary files /dev/null and b/docs/qa/v034/img/v034_rotating_rss_avg.png differ diff --git a/docs/qa/v034/img/v034_rotating_total-txs.png b/docs/qa/v034/img/v034_rotating_total-txs.png new file mode 100644 index 000000000..1ce5f47e9 Binary files /dev/null and b/docs/qa/v034/img/v034_rotating_total-txs.png differ diff --git a/docs/qa/v037/README.md b/docs/qa/v037/README.md new file mode 100644 index 000000000..680ffdaaa --- /dev/null +++ b/docs/qa/v037/README.md @@ -0,0 +1,326 @@ +--- +order: 1 +parent: + title: Tendermint Quality Assurance Results for v0.37.x + description: This is a report on the results obtained when running v0.37.x on testnets + order: 2 +--- + +# v0.37.x + +## Issues discovered + +During this iteration of the QA process, the following issues were found: + +* (critical, fixed) [\#9533] - This bug caused full nodes to sometimes get stuck + when blocksyncing, requiring a manual restart to unblock them. Importantly, + this bug was also present in v0.34.x and the fix was also backported in + [\#9534]. +* (critical, fixed) [\#9539] - `loadtime` is very likely to include more than + one "=" character in transactions, with is rejected by the e2e application. +* (critical, fixed) [\#9581] - Absent prometheus label makes Tendermint crash + when enabling Prometheus metric collection +* (non-critical, not fixed) [\#9548] - Full nodes can go over 50 connected + peers, which is not intended by the default configuration. +* (non-critical, not fixed) [\#9537] - With the default mempool cache setting, + duplicated transactions are not rejected when gossipped and eventually flood + all mempools. The 200 node testnets were thus run with a value of 200000 (as + opposed to the default 10000) + +## 200 Node Testnet + +### Finding the Saturation Point + +The first goal is to identify the saturation point and compare it with the baseline (v0.34.x). +For further details, see [this paragraph](../v034/README.md#finding-the-saturation-point) +in the baseline version. + +The following table summarizes the results for v0.37.x, for the different experiments +(extracted from file [`v037_report_tabbed.txt`](./img/v037_report_tabbed.txt)). + +The X axis of this table is `c`, the number of connections created by the load runner process to the target node. +The Y axis of this table is `r`, the rate or number of transactions issued per second. + +| | c=1 | c=2 | c=4 | +| :--- | ----: | ----: | ----: | +| r=25 | 2225 | 4450 | 8900 | +| r=50 | 4450 | 8900 | 17800 | +| r=100 | 8900 | 17800 | 35600 | +| r=200 | 17800 | 35600 | 38660 | + +For comparison, this is the table with the baseline version. + +| | c=1 | c=2 | c=4 | +| :--- | ----: | ----: | ----: | +| r=25 | 2225 | 4450 | 8900 | +| r=50 | 4450 | 8900 | 17800 | +| r=100 | 8900 | 17800 | 35400 | +| r=200 | 17800 | 35600 | 37358 | + +The saturation point is beyond the diagonal: + +* `r=200,c=2` +* `r=100,c=4` + +which is at the same place as the baseline. For more details on the saturation point, see +[this paragraph](../v034/README.md#finding-the-saturation-point) in the baseline version. + +The experiment chosen to examine Prometheus metrics is the same as in the baseline: +**`r=200,c=2`**. + +The load runner's CPU load was negligible (near 0) when running `r=200,c=2`. + +### Examining latencies + +The method described [here](../method.md) allows us to plot the latencies of transactions +for all experiments. + +![all-latencies](./img/v037_200node_latencies.png) + +The data seen in the plot is similar to that of the baseline. + +![all-latencies-bl](../v034/img/v034_200node_latencies.png) + +Therefore, for further details on these plots, +see [this paragraph](../v034/README.md#examining-latencies) in the baseline version. + +The following plot summarizes average latencies versus overall throughputs +across different numbers of WebSocket connections to the node into which +transactions are being loaded. + +![latency-vs-throughput](./img/v037_latency_throughput.png) + +This is similar to that of the baseline plot: + +![latency-vs-throughput-bl](../v034/img/v034_latency_throughput.png) + +### Prometheus Metrics on the Chosen Experiment + +As mentioned [above](#finding-the-saturation-point), the chosen experiment is `r=200,c=2`. +This section further examines key metrics for this experiment extracted from Prometheus data. + +#### Mempool Size + +The mempool size, a count of the number of transactions in the mempool, was shown to be stable and homogeneous +at all full nodes. It did not exhibit any unconstrained growth. +The plot below shows the evolution over time of the cumulative number of transactions inside all full nodes' mempools +at a given time. + +![mempool-cumulative](./img/v037_r200c2_mempool_size.png) + +The plot below shows evolution of the average over all full nodes, which oscillate between 1500 and 2000 outstanding transactions. + +![mempool-avg](./img/v037_r200c2_mempool_size_avg.png) + +The peaks observed coincide with the moments when some nodes reached round 1 of consensus (see below). + +**These plots yield similar results to the baseline**: + +![mempool-cumulative-bl](../v034/img/v034_r200c2_mempool_size.png) + +![mempool-avg-bl](../v034/img/v034_r200c2_mempool_size_avg.png) + +#### Peers + +The number of peers was stable at all nodes. +It was higher for the seed nodes (around 140) than for the rest (between 16 and 78). + +![peers](./img/v037_r200c2_peers.png) + +Just as in the baseline, the fact that non-seed nodes reach more than 50 peers is due to #9548. + +**This plot yields similar results to the baseline**: + +![peers-bl](../v034/img/v034_r200c2_peers.png) + +#### Consensus Rounds per Height + +Most heights took just one round, but some nodes needed to advance to round 1 at some point. + +![rounds](./img/v037_r200c2_rounds.png) + +**This plot yields slightly better results than the baseline**: + +![rounds-bl](../v034/img/v034_r200c2_rounds.png) + +#### Blocks Produced per Minute, Transactions Processed per Minute + +The blocks produced per minute are the gradient of this plot. + +![heights](./img/v037_r200c2_heights.png) + +Over a period of 2 minutes, the height goes from 477 to 524. +This results in an average of 23.5 blocks produced per minute. + +The transactions processed per minute are the gradient of this plot. + +![total-txs](./img/v037_r200c2_total-txs.png) + +Over a period of 2 minutes, the total goes from 64525 to 100125 transactions, +resulting in 17800 transactions per minute. However, we can see in the plot that +all transactions in the load are process long before the two minutes. +If we adjust the time window when transactions are processed (approx. 90 seconds), +we obtain 23733 transactions per minute. + +**These plots yield similar results to the baseline**: + +![heights-bl](../v034/img/v034_r200c2_heights.png) + +![total-txs](../v034/img/v034_r200c2_total-txs.png) + +#### Memory Resident Set Size + +Resident Set Size of all monitored processes is plotted below. + +![rss](./img/v037_r200c2_rss.png) + +The average over all processes oscillates around 380 MiB and does not demonstrate unconstrained growth. + +![rss-avg](./img/v037_r200c2_rss_avg.png) + +**These plots yield similar results to the baseline**: + +![rss-bl](../v034/img/v034_r200c2_rss.png) + +![rss-avg-bl](../v034/img/v034_r200c2_rss_avg.png) + +#### CPU utilization + +The best metric from Prometheus to gauge CPU utilization in a Unix machine is `load1`, +as it usually appears in the +[output of `top`](https://www.digitalocean.com/community/tutorials/load-average-in-linux). + +![load1](./img/v037_r200c2_load1.png) + +It is contained below 5 on most nodes. + +**This plot yields similar results to the baseline**: + +![load1](../v034/img/v034_r200c2_load1.png) + +### Test Result + +**Result: PASS** + +Date: 2022-10-14 + +Version: 1cf9d8e276afe8595cba960b51cd056514965fd1 + +## Rotating Node Testnet + +We use the same load as in the baseline: `c=4,r=800`. + +Just as in the baseline tests, the version of Tendermint used for these tests is affected by #9539. +See this paragraph in the [baseline report](../v034/README.md#rotating-node-testnet) for further details. +Finally, note that this setup allows for a fairer comparison between this version and the baseline. + +### Latencies + +The plot of all latencies can be seen here. + +![rotating-all-latencies](./img/v037_rotating_latencies.png) + +Which is similar to the baseline. + +![rotating-all-latencies-bl](../v034/img/v034_rotating_latencies_uniq.png) + +Note that we are comparing against the baseline plot with _unique_ +transactions. This is because the problem with duplicate transactions +detected during the baseline experiment did not show up for `v0.37`, +which is _not_ proof that the problem is not present in `v0.37`. + +### Prometheus Metrics + +The set of metrics shown here match those shown on the baseline (`v0.34`) for the same experiment. +We also show the baseline results for comparison. + +#### Blocks and Transactions per minute + +The blocks produced per minute are the gradient of this plot. + +![rotating-heights](./img/v037_rotating_heights.png) + +Over a period of 4446 seconds, the height goes from 5 to 3323. +This results in an average of 45 blocks produced per minute, +which is similar to the baseline, shown below. + +![rotating-heights-bl](../v034/img/v034_rotating_heights.png) + +The following two plots show only the heights reported by ephemeral nodes. +The second plot is the baseline plot for comparison. + +![rotating-heights-ephe](./img/v037_rotating_heights_ephe.png) + +![rotating-heights-ephe-bl](../v034/img/v034_rotating_heights_ephe.png) + +By the length of the segments, we can see that ephemeral nodes in `v0.37` +catch up slightly faster. + +The transactions processed per minute are the gradient of this plot. + +![rotating-total-txs](./img/v037_rotating_total-txs.png) + +Over a period of 3852 seconds, the total goes from 597 to 267298 transactions in one of the validators, +resulting in 4154 transactions per minute, which is slightly lower than the baseline, +although the baseline had to deal with duplicate transactions. + +For comparison, this is the baseline plot. + +![rotating-total-txs-bl](../v034/img/v034_rotating_total-txs.png) + +#### Peers + +The plot below shows the evolution of the number of peers throughout the experiment. + +![rotating-peers](./img/v037_rotating_peers.png) + +This is the baseline plot, for comparison. + +![rotating-peers-bl](../v034/img/v034_rotating_peers.png) + +The plotted values and their evolution are comparable in both plots. + +For further details on these plots, see the baseline report. + +#### Memory Resident Set Size + +The average Resident Set Size (RSS) over all processes looks slightly more stable +on `v0.37` (first plot) than on the baseline (second plot). + +![rotating-rss-avg](./img/v037_rotating_rss_avg.png) + +![rotating-rss-avg-bl](../v034/img/v034_rotating_rss_avg.png) + +The memory taken by the validators and the ephemeral nodes when they are up is comparable (not shown in the plots), +just as observed in the baseline. + +#### CPU utilization + +The plot shows metric `load1` for all nodes. + +![rotating-load1](./img/v037_rotating_load1.png) + +This is the baseline plot. + +![rotating-load1-bl](../v034/img/v034_rotating_load1.png) + +In both cases, it is contained under 5 most of the time, which is considered normal load. +The green line in the `v0.37` plot and the purple line in the baseline plot (`v0.34`) +correspond to the validators receiving all transactions, via RPC, from the load runner process. +In both cases, they oscillate around 5 (normal load). The main difference is that other +nodes are generally less loaded in `v0.37`. + +### Test Result + +**Result: PASS** + +Date: 2022-10-10 + +Version: 155110007b9d8b83997a799016c1d0844c8efbaf + +[\#9533]: https://github.com/tendermint/tendermint/pull/9533 +[\#9534]: https://github.com/tendermint/tendermint/pull/9534 +[\#9539]: https://github.com/tendermint/tendermint/issues/9539 +[\#9548]: https://github.com/tendermint/tendermint/issues/9548 +[\#9537]: https://github.com/tendermint/tendermint/issues/9537 +[\#9581]: https://github.com/tendermint/tendermint/issues/9581 diff --git a/docs/qa/v037/img/v037_200node_latencies.png b/docs/qa/v037/img/v037_200node_latencies.png new file mode 100644 index 000000000..ad469bb29 Binary files /dev/null and b/docs/qa/v037/img/v037_200node_latencies.png differ diff --git a/docs/qa/v037/img/v037_latency_throughput.png b/docs/qa/v037/img/v037_latency_throughput.png new file mode 100644 index 000000000..baf34b2c7 Binary files /dev/null and b/docs/qa/v037/img/v037_latency_throughput.png differ diff --git a/docs/qa/v037/img/v037_r200c2_heights.png b/docs/qa/v037/img/v037_r200c2_heights.png new file mode 100644 index 000000000..360283f14 Binary files /dev/null and b/docs/qa/v037/img/v037_r200c2_heights.png differ diff --git a/docs/qa/v037/img/v037_r200c2_load1.png b/docs/qa/v037/img/v037_r200c2_load1.png new file mode 100644 index 000000000..11d6dfcf7 Binary files /dev/null and b/docs/qa/v037/img/v037_r200c2_load1.png differ diff --git a/docs/qa/v037/img/v037_r200c2_mempool_size.png b/docs/qa/v037/img/v037_r200c2_mempool_size.png new file mode 100644 index 000000000..a2f3bd401 Binary files /dev/null and b/docs/qa/v037/img/v037_r200c2_mempool_size.png differ diff --git a/docs/qa/v037/img/v037_r200c2_mempool_size_avg.png b/docs/qa/v037/img/v037_r200c2_mempool_size_avg.png new file mode 100644 index 000000000..480d4aebc Binary files /dev/null and b/docs/qa/v037/img/v037_r200c2_mempool_size_avg.png differ diff --git a/docs/qa/v037/img/v037_r200c2_peers.png b/docs/qa/v037/img/v037_r200c2_peers.png new file mode 100644 index 000000000..222da73f6 Binary files /dev/null and b/docs/qa/v037/img/v037_r200c2_peers.png differ diff --git a/docs/qa/v037/img/v037_r200c2_rounds.png b/docs/qa/v037/img/v037_r200c2_rounds.png new file mode 100644 index 000000000..7afaaac57 Binary files /dev/null and b/docs/qa/v037/img/v037_r200c2_rounds.png differ diff --git a/docs/qa/v037/img/v037_r200c2_rss.png b/docs/qa/v037/img/v037_r200c2_rss.png new file mode 100644 index 000000000..730a1bc49 Binary files /dev/null and b/docs/qa/v037/img/v037_r200c2_rss.png differ diff --git a/docs/qa/v037/img/v037_r200c2_rss_avg.png b/docs/qa/v037/img/v037_r200c2_rss_avg.png new file mode 100644 index 000000000..3f6cf9f6d Binary files /dev/null and b/docs/qa/v037/img/v037_r200c2_rss_avg.png differ diff --git a/docs/qa/v037/img/v037_r200c2_total-txs.png b/docs/qa/v037/img/v037_r200c2_total-txs.png new file mode 100644 index 000000000..62dced2c8 Binary files /dev/null and b/docs/qa/v037/img/v037_r200c2_total-txs.png differ diff --git a/docs/qa/v037/img/v037_report_tabbed.txt b/docs/qa/v037/img/v037_report_tabbed.txt new file mode 100644 index 000000000..aa4aa4e60 --- /dev/null +++ b/docs/qa/v037/img/v037_report_tabbed.txt @@ -0,0 +1,52 @@ +Experiment ID: af129eae-7039-4c76-8c37-cff9ac636a84 │Experiment ID: 0f88bd33-9bf0-4197-8d1d-9a737c301ec6 │Experiment ID: 88227cad-2ba8-4eb6-b493-041d8120b46f + │ │ + Connections: 1 │ Connections: 2 │ Connections: 4 + Rate: 25 │ Rate: 25 │ Rate: 25 + Size: 1024 │ Size: 1024 │ Size: 1024 + │ │ + Total Valid Tx: 2225 │ Total Valid Tx: 4450 │ Total Valid Tx: 8900 + Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0 + Minimum Latency: 506.248587ms │ Minimum Latency: 469.53452ms │ Minimum Latency: 588.900721ms + Maximum Latency: 3.032125789s │ Maximum Latency: 6.548830955s │ Maximum Latency: 6.533739843s + Average Latency: 1.427767726s │ Average Latency: 1.448582257s │ Average Latency: 1.717432341s + Standard Deviation: 524.11782ms │ Standard Deviation: 768.684133ms │ Standard Deviation: 1.000015768s + │ │ +Experiment ID: f03d39bd-0233-4b3c-b461-543445ae1d4b │Experiment ID: 46674f1c-e591-4e36-bb9b-f375c19fc475 │Experiment ID: 5385c159-8d4d-455b-bced-dcd4a3209988 + │ │ + Connections: 1 │ Connections: 2 │ Connections: 4 + Rate: 50 │ Rate: 50 │ Rate: 50 + Size: 1024 │ Size: 1024 │ Size: 1024 + │ │ + Total Valid Tx: 4450 │ Total Valid Tx: 8900 │ Total Valid Tx: 17800 + Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0 + Minimum Latency: 477.46027ms │ Minimum Latency: 455.757111ms │ Minimum Latency: 594.749081ms + Maximum Latency: 2.483895394s │ Maximum Latency: 2.904715695s │ Maximum Latency: 9.294950389s + Average Latency: 1.407374662s │ Average Latency: 1.397385779s │ Average Latency: 2.621122536s + Standard Deviation: 505.150067ms │ Standard Deviation: 551.67603ms │ Standard Deviation: 1.772725794s + │ │ +Experiment ID: 9161b4a7-d75c-455f-b82d-2b5235d533cf │Experiment ID: 993a13a8-9db1-4b2b-9c20-71a5b85e4bbf │Experiment ID: ad1eb9e1-f4d6-41fd-9ba7-0f1f7dde1e3e + │ │ + Connections: 1 │ Connections: 2 │ Connections: 4 + Rate: 100 │ Rate: 100 │ Rate: 100 + Size: 1024 │ Size: 1024 │ Size: 1024 + │ │ + Total Valid Tx: 8900 │ Total Valid Tx: 17800 │ Total Valid Tx: 35400 + Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0 + Minimum Latency: 448.050467ms │ Minimum Latency: 605.436195ms │ Minimum Latency: 1.16816912s + Maximum Latency: 3.789711139s │ Maximum Latency: 7.292770222s │ Maximum Latency: 11.378681842s + Average Latency: 1.451342158s │ Average Latency: 2.07457999s │ Average Latency: 3.918384209s + Standard Deviation: 644.075973ms │ Standard Deviation: 1.230204022s │ Standard Deviation: 2.172400458s + │ │ +Experiment ID: 3cbe9c3d-9c43-4c9f-b5ca-b567d20bbd57 │Experiment ID: af836c5e-d9b6-4d5d-971c-2fc7f07aa2a0 │Experiment ID: 77606397-4989-41d4-b13b-f1f4d1af063f + │ │ + Connections: 1 │ Connections: 2 │ Connections: 4 + Rate: 200 │ Rate: 200 │ Rate: 200 + Size: 1024 │ Size: 1024 │ Size: 1024 + │ │ + Total Valid Tx: 17800 │ Total Valid Tx: 35600 │ Total Valid Tx: 37358 + Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0 + Minimum Latency: 519.984701ms │ Minimum Latency: 820.755087ms │ Minimum Latency: 1.712574804s + Maximum Latency: 12.609056712s │ Maximum Latency: 9.260798095s │ Maximum Latency: 25.739223696s + Average Latency: 2.717853101s │ Average Latency: 3.477731881s │ Average Latency: 8.547725264s + Standard Deviation: 2.390778155s │ Standard Deviation: 1.675000913s │ Standard Deviation: 4.76961569s + diff --git a/docs/qa/v037/img/v037_rotating_heights.png b/docs/qa/v037/img/v037_rotating_heights.png new file mode 100644 index 000000000..882de51e4 Binary files /dev/null and b/docs/qa/v037/img/v037_rotating_heights.png differ diff --git a/docs/qa/v037/img/v037_rotating_heights_ephe.png b/docs/qa/v037/img/v037_rotating_heights_ephe.png new file mode 100644 index 000000000..1ab2521e8 Binary files /dev/null and b/docs/qa/v037/img/v037_rotating_heights_ephe.png differ diff --git a/docs/qa/v037/img/v037_rotating_latencies.png b/docs/qa/v037/img/v037_rotating_latencies.png new file mode 100644 index 000000000..94548c8b9 Binary files /dev/null and b/docs/qa/v037/img/v037_rotating_latencies.png differ diff --git a/docs/qa/v037/img/v037_rotating_load1.png b/docs/qa/v037/img/v037_rotating_load1.png new file mode 100644 index 000000000..03b7412da Binary files /dev/null and b/docs/qa/v037/img/v037_rotating_load1.png differ diff --git a/docs/qa/v037/img/v037_rotating_peers.png b/docs/qa/v037/img/v037_rotating_peers.png new file mode 100644 index 000000000..86304760b Binary files /dev/null and b/docs/qa/v037/img/v037_rotating_peers.png differ diff --git a/docs/qa/v037/img/v037_rotating_rss_avg.png b/docs/qa/v037/img/v037_rotating_rss_avg.png new file mode 100644 index 000000000..d45c045b7 Binary files /dev/null and b/docs/qa/v037/img/v037_rotating_rss_avg.png differ diff --git a/docs/qa/v037/img/v037_rotating_total-txs.png b/docs/qa/v037/img/v037_rotating_total-txs.png new file mode 100644 index 000000000..50b4c2e3f Binary files /dev/null and b/docs/qa/v037/img/v037_rotating_total-txs.png differ diff --git a/docs/tendermint-core/metrics.md b/docs/tendermint-core/metrics.md index 2a906e01b..ce20f3b6c 100644 --- a/docs/tendermint-core/metrics.md +++ b/docs/tendermint-core/metrics.md @@ -18,50 +18,52 @@ Listen address can be changed in the config file (see The following metrics are available: -| **Name** | **Type** | **Tags** | **Description** | -|----------------------------------------|-----------|-----------------|--------------------------------------------------------------------------------------------------------------------------------------------| -| abci_connection_method_timing_seconds | Histogram | method, type | Timings for each of the ABCI methods | -| consensus_height | Gauge | | Height of the chain | -| consensus_validators | Gauge | | Number of validators | -| consensus_validators_power | Gauge | | Total voting power of all validators | -| consensus_validator_power | Gauge | | Voting power of the node if in the validator set | -| consensus_validator_last_signed_height | Gauge | | Last height the node signed a block, if the node is a validator | -| consensus_validator_missed_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator | -| consensus_missing_validators | Gauge | | Number of validators who did not sign | -| consensus_missing_validators_power | Gauge | | Total voting power of the missing validators | -| consensus_byzantine_validators | Gauge | | Number of validators who tried to double sign | -| consensus_byzantine_validators_power | Gauge | | Total voting power of the byzantine validators | -| consensus_block_interval_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds | -| consensus_rounds | Gauge | | Number of rounds | -| consensus_num_txs | Gauge | | Number of transactions | -| consensus_total_txs | Gauge | | Total number of transactions committed | -| consensus_block_parts | counter | peer_id | number of blockparts transmitted by peer | -| consensus_latest_block_height | gauge | | /status sync_info number | -| consensus_block_syncing | gauge | | either 0 (not block syncing) or 1 (syncing) | -| consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) | -| consensus_block_size_bytes | Gauge | | Block size in bytes | -| consensus_step_duration | Histogram | step | Histogram of durations for each step in the consensus protocol | -| consensus_round_duration | Histogram | | Histogram of durations for all the rounds that have occurred since the process started | -| consensus_block_gossip_parts_received | Counter | matches_current | Number of block parts received by the node | -| consensus_quorum_prevote_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum | -| consensus_full_prevote_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted | -| consensus_proposal_receive_count | Counter | status | Total number of proposals received by the node since process start | -| consensus_proposal_create_count | Counter | | Total number of proposals created by the node since process start | -| consensus_round_voting_power_percent | Gauge | vote_type | A value between 0 and 1.0 representing the percentage of the total voting power per vote type received within a round | -| consensus_late_votes | Counter | vote_type | Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in. | -| p2p_peers | Gauge | | Number of peers node's connected to | -| p2p_peer_receive_bytes_total | counter | peer_id, chID | number of bytes per channel received from a given peer | -| p2p_peer_send_bytes_total | counter | peer_id, chID | number of bytes per channel sent to a given peer | -| p2p_peer_pending_send_bytes | gauge | peer_id | number of pending bytes to be sent to a given peer | -| p2p_num_txs | gauge | peer_id | number of transactions submitted by each peer_id | -| p2p_pending_send_bytes | gauge | peer_id | amount of data pending to be sent to peer | -| mempool_size | Gauge | | Number of uncommitted transactions | -| mempool_tx_size_bytes | histogram | | transaction sizes in bytes | -| mempool_failed_txs | counter | | number of failed transactions | -| mempool_recheck_times | counter | | number of transactions rechecked in the mempool | -| state_block_processing_time | histogram | | time between BeginBlock and EndBlock in ms | -| state_consensus_param_updates | Counter | | number of consensus parameter updates returned by the application since process start | -| state_validator_set_updates | Counter | | number of validator set updates returned by the application since process start | +| **Name** | **Type** | **Tags** | **Description** | +|------------------------------------------|-----------|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------| +| `abci_connection_method_timing_seconds` | Histogram | `method`, `type` | Timings for each of the ABCI methods | +| `consensus_height` | Gauge | | Height of the chain | +| `consensus_validators` | Gauge | | Number of validators | +| `consensus_validators_power` | Gauge | | Total voting power of all validators | +| `consensus_validator_power` | Gauge | | Voting power of the node if in the validator set | +| `consensus_validator_last_signed_height` | Gauge | | Last height the node signed a block, if the node is a validator | +| `consensus_validator_missed_blocks` | Gauge | | Total amount of blocks missed for the node, if the node is a validator | +| `consensus_missing_validators` | Gauge | | Number of validators who did not sign | +| `consensus_missing_validators_power` | Gauge | | Total voting power of the missing validators | +| `consensus_byzantine_validators` | Gauge | | Number of validators who tried to double sign | +| `consensus_byzantine_validators_power` | Gauge | | Total voting power of the byzantine validators | +| `consensus_block_interval_seconds` | Histogram | | Time between this and last block (Block.Header.Time) in seconds | +| `consensus_rounds` | Gauge | | Number of rounds | +| `consensus_num_txs` | Gauge | | Number of transactions | +| `consensus_total_txs` | Gauge | | Total number of transactions committed | +| `consensus_block_parts` | Counter | `peer_id` | Number of blockparts transmitted by peer | +| `consensus_latest_block_height` | Gauge | | /status sync\_info number | +| `consensus_block_syncing` | Gauge | | Either 0 (not block syncing) or 1 (syncing) | +| `consensus_state_syncing` | Gauge | | Either 0 (not state syncing) or 1 (syncing) | +| `consensus_block_size_bytes` | Gauge | | Block size in bytes | +| `consensus_step_duration` | Histogram | `step` | Histogram of durations for each step in the consensus protocol | +| `consensus_round_duration` | Histogram | | Histogram of durations for all the rounds that have occurred since the process started | +| `consensus_block_gossip_parts_received` | Counter | `matches_current` | Number of block parts received by the node | +| `consensus_quorum_prevote_delay` | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum | +| `consensus_full_prevote_delay` | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted | +| `consensus_proposal_receive_count` | Counter | `status` | Total number of proposals received by the node since process start | +| `consensus_proposal_create_count` | Counter | | Total number of proposals created by the node since process start | +| `consensus_round_voting_power_percent` | Gauge | `vote_type` | A value between 0 and 1.0 representing the percentage of the total voting power per vote type received within a round | +| `consensus_late_votes` | Counter | `vote_type` | Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in. | +| `p2p_message_send_bytes_total` | Counter | `message_type` | Number of bytes sent to all peers per message type | +| `p2p_message_receive_bytes_total` | Counter | `message_type` | Number of bytes received from all peers per message type | +| `p2p_peers` | Gauge | | Number of peers node's connected to | +| `p2p_peer_receive_bytes_total` | Counter | `peer_id`, `chID` | Number of bytes per channel received from a given peer | +| `p2p_peer_send_bytes_total` | Counter | `peer_id`, `chID` | Number of bytes per channel sent to a given peer | +| `p2p_peer_pending_send_bytes` | Gauge | `peer_id` | Number of pending bytes to be sent to a given peer | +| `p2p_num_txs` | Gauge | `peer_id` | Number of transactions submitted by each peer\_id | +| `p2p_pending_send_bytes` | Gauge | `peer_id` | Amount of data pending to be sent to peer | +| `mempool_size` | Gauge | | Number of uncommitted transactions | +| `mempool_tx_size_bytes` | Histogram | | Transaction sizes in bytes | +| `mempool_failed_txs` | Counter | | Number of failed transactions | +| `mempool_recheck_times` | Counter | | Number of transactions rechecked in the mempool | +| `state_block_processing_time` | Histogram | | Time between BeginBlock and EndBlock in ms | +| `state_consensus_param_updates` | Counter | | Number of consensus parameter updates returned by the application since process start | +| `state_validator_set_updates` | Counter | | Number of validator set updates returned by the application since process start | ## Useful queries diff --git a/evidence/reactor.go b/evidence/reactor.go index 2a136dbfb..88357e25a 100644 --- a/evidence/reactor.go +++ b/evidence/reactor.go @@ -4,6 +4,7 @@ import ( "fmt" "time" + "github.com/cosmos/gogoproto/proto" clist "github.com/tendermint/tendermint/libs/clist" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" @@ -55,6 +56,7 @@ func (evR *Reactor) GetChannels() []*p2p.ChannelDescriptor { ID: EvidenceChannel, Priority: 6, RecvMessageCapacity: maxMsgSize, + MessageType: &tmproto.EvidenceList{}, }, } } @@ -66,11 +68,11 @@ func (evR *Reactor) AddPeer(peer p2p.Peer) { // Receive implements Reactor. // It adds any received evidence to the evpool. -func (evR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - evis, err := decodeMsg(msgBytes) +func (evR *Reactor) Receive(e p2p.Envelope) { + evis, err := evidenceListFromProto(e.Message) if err != nil { - evR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) - evR.Switch.StopPeerForError(src, err) + evR.Logger.Error("Error decoding message", "src", e.Src, "chId", e.ChannelID, "err", err) + evR.Switch.StopPeerForError(e.Src, err) return } @@ -80,7 +82,7 @@ func (evR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { case *types.ErrInvalidEvidence: evR.Logger.Error(err.Error()) // punish peer - evR.Switch.StopPeerForError(src, err) + evR.Switch.StopPeerForError(e.Src, err) return case nil: default: @@ -126,11 +128,15 @@ func (evR *Reactor) broadcastEvidenceRoutine(peer p2p.Peer) { evis := evR.prepareEvidenceMessage(peer, ev) if len(evis) > 0 { evR.Logger.Debug("Gossiping evidence to peer", "ev", ev, "peer", peer) - msgBytes, err := encodeMsg(evis) + evp, err := evidenceListToProto(evis) if err != nil { panic(err) } - success := peer.Send(EvidenceChannel, msgBytes) + + success := peer.Send(p2p.Envelope{ + ChannelID: EvidenceChannel, + Message: evp, + }) if !success { time.Sleep(peerRetryMessageIntervalMS * time.Millisecond) continue @@ -210,7 +216,7 @@ type PeerState interface { // encodemsg takes a array of evidence // returns the byte encoding of the List Message -func encodeMsg(evis []types.Evidence) ([]byte, error) { +func evidenceListToProto(evis []types.Evidence) (*tmproto.EvidenceList, error) { evi := make([]tmproto.Evidence, len(evis)) for i := 0; i < len(evis); i++ { ev, err := types.EvidenceToProto(evis[i]) @@ -222,19 +228,13 @@ func encodeMsg(evis []types.Evidence) ([]byte, error) { epl := tmproto.EvidenceList{ Evidence: evi, } - - return epl.Marshal() + return &epl, nil } -// decodemsg takes an array of bytes -// returns an array of evidence -func decodeMsg(bz []byte) (evis []types.Evidence, err error) { - lm := tmproto.EvidenceList{} - if err := lm.Unmarshal(bz); err != nil { - return nil, err - } +func evidenceListFromProto(m proto.Message) ([]types.Evidence, error) { + lm := m.(*tmproto.EvidenceList) - evis = make([]types.Evidence, len(lm.Evidence)) + evis := make([]types.Evidence, len(lm.Evidence)) for i := 0; i < len(lm.Evidence); i++ { ev, err := types.EvidenceFromProto(&lm.Evidence[i]) if err != nil { diff --git a/evidence/reactor_test.go b/evidence/reactor_test.go index a2d82bf71..0d7d1110d 100644 --- a/evidence/reactor_test.go +++ b/evidence/reactor_test.go @@ -208,7 +208,10 @@ func TestReactorBroadcastEvidenceMemoryLeak(t *testing.T) { // i.e. broadcastEvidenceRoutine finishes when peer is stopped defer leaktest.CheckTimeout(t, 10*time.Second)() - p.On("Send", evidence.EvidenceChannel, mock.AnythingOfType("[]uint8")).Return(false) + p.On("Send", mock.MatchedBy(func(i interface{}) bool { + e, ok := i.(p2p.Envelope) + return ok && e.ChannelID == evidence.EvidenceChannel + })).Return(false) quitChan := make(<-chan struct{}) p.On("Quit").Return(quitChan) ps := peerState{2} diff --git a/go.mod b/go.mod index e3022a8b6..9394dea91 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/tendermint/tendermint go 1.18 require ( - github.com/BurntSushi/toml v1.2.0 + github.com/BurntSushi/toml v1.2.1 github.com/adlio/schema v1.3.3 github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/fortytw2/leaktest v1.3.0 @@ -11,7 +11,7 @@ require ( github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.5.1 github.com/golang/protobuf v1.5.2 - github.com/golangci/golangci-lint v1.49.0 + github.com/golangci/golangci-lint v1.50.1 github.com/google/orderedcode v0.0.1 github.com/gorilla/websocket v1.5.0 github.com/informalsystems/tm-load-test v1.0.0 @@ -21,42 +21,43 @@ require ( github.com/ory/dockertest v3.3.5+incompatible github.com/pkg/errors v0.9.1 github.com/pointlander/peg v1.0.1 - github.com/prometheus/client_golang v1.13.0 - github.com/prometheus/client_model v0.2.0 + github.com/prometheus/client_golang v1.14.0 + github.com/prometheus/client_model v0.3.0 github.com/prometheus/common v0.37.0 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 github.com/rs/cors v1.8.2 github.com/sasha-s/go-deadlock v0.3.1 github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa - github.com/spf13/cobra v1.5.0 - github.com/spf13/viper v1.13.0 - github.com/stretchr/testify v1.8.0 + github.com/spf13/cobra v1.6.1 + github.com/spf13/viper v1.14.0 + github.com/stretchr/testify v1.8.1 github.com/tendermint/tm-db v0.6.6 - golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa - golang.org/x/net v0.0.0-20220812174116-3211cb980234 - google.golang.org/grpc v1.50.0 + golang.org/x/crypto v0.2.0 + golang.org/x/net v0.2.0 + google.golang.org/grpc v1.50.1 ) require ( - github.com/bufbuild/buf v1.8.0 + github.com/bufbuild/buf v1.9.0 github.com/creachadair/taskgroup v0.3.2 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 ) require ( - github.com/btcsuite/btcd/btcec/v2 v2.2.1 + github.com/btcsuite/btcd/btcec/v2 v2.3.1 github.com/btcsuite/btcd/btcutil v1.1.2 github.com/cosmos/gogoproto v1.4.2 - github.com/gofrs/uuid v4.3.0+incompatible + github.com/gofrs/uuid v4.3.1+incompatible github.com/google/uuid v1.3.0 github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae - github.com/vektra/mockery/v2 v2.14.0 + github.com/vektra/mockery/v2 v2.14.1 gonum.org/v1/gonum v0.12.0 - google.golang.org/protobuf v1.28.1 + google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 ) require ( 4d63.com/gochecknoglobals v0.1.0 // indirect + github.com/Abirdcfly/dupword v0.0.7 // indirect github.com/Antonboom/errname v0.1.7 // indirect github.com/Antonboom/nilnil v0.1.1 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect @@ -64,9 +65,9 @@ require ( github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 // indirect github.com/Masterminds/semver v1.5.0 // indirect - github.com/Microsoft/go-winio v0.5.2 // indirect + github.com/Microsoft/go-winio v0.6.0 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect - github.com/OpenPeeDeeP/depguard v1.1.0 // indirect + github.com/OpenPeeDeeP/depguard v1.1.1 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect github.com/alingse/asasalint v0.0.11 // indirect github.com/ashanbrown/forbidigo v1.3.0 // indirect @@ -77,7 +78,8 @@ require ( github.com/bombsimon/wsl/v3 v3.3.0 // indirect github.com/breml/bidichk v0.2.3 // indirect github.com/breml/errchkjson v0.3.0 // indirect - github.com/bufbuild/connect-go v0.4.0 // indirect + github.com/bufbuild/connect-go v1.0.0 // indirect + github.com/bufbuild/protocompile v0.1.0 // indirect github.com/butuzov/ireturn v0.1.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect @@ -87,8 +89,8 @@ require ( github.com/containerd/continuity v0.3.0 // indirect github.com/containerd/typeurl v1.0.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/curioswitch/go-reassign v0.1.2 // indirect - github.com/daixiang0/gci v0.6.3 // indirect + github.com/curioswitch/go-reassign v0.2.0 // indirect + github.com/daixiang0/gci v0.8.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect github.com/denis-tingaikin/go-header v0.4.3 // indirect @@ -96,24 +98,24 @@ require ( github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de // indirect github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect github.com/docker/distribution v2.8.1+incompatible // indirect - github.com/docker/docker v20.10.17+incompatible // indirect + github.com/docker/docker v20.10.19+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect - github.com/docker/go-units v0.4.0 // indirect + github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/esimonov/ifshort v1.0.4 // indirect github.com/ettle/strcase v0.1.1 // indirect github.com/fatih/color v1.13.0 // indirect github.com/fatih/structtag v1.2.0 // indirect github.com/firefart/nonamedreturns v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/fzipp/gocyclo v0.6.0 // indirect github.com/go-chi/chi/v5 v5.0.7 // indirect - github.com/go-critic/go-critic v0.6.4 // indirect + github.com/go-critic/go-critic v0.6.5 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-toolsmith/astcast v1.0.0 // indirect - github.com/go-toolsmith/astcopy v1.0.1 // indirect - github.com/go-toolsmith/astequal v1.0.2 // indirect + github.com/go-toolsmith/astcopy v1.0.2 // indirect + github.com/go-toolsmith/astequal v1.0.3 // indirect github.com/go-toolsmith/astfmt v1.0.0 // indirect github.com/go-toolsmith/astp v1.0.0 // indirect github.com/go-toolsmith/strparse v1.0.0 // indirect @@ -127,14 +129,14 @@ require ( github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect - github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a // indirect + github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 // indirect github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect github.com/golangci/misspell v0.3.5 // indirect github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect github.com/google/btree v1.0.0 // indirect - github.com/google/go-cmp v0.5.8 // indirect + github.com/google/go-cmp v0.5.9 // indirect github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect github.com/gostaticanalysis/analysisutil v0.7.1 // indirect github.com/gostaticanalysis/comment v1.4.2 // indirect @@ -149,15 +151,14 @@ require ( github.com/inconshreveable/mousetrap v1.0.1 // indirect github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a // indirect github.com/jgautheron/goconst v1.5.1 // indirect - github.com/jhump/protocompile v0.0.0-20220812162104-d108583e055d // indirect - github.com/jhump/protoreflect v1.12.1-0.20220721211354-060cc04fc18b // indirect github.com/jingyugao/rowserrcheck v1.1.1 // indirect github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect github.com/jmhodges/levigo v1.0.0 // indirect github.com/julz/importas v0.1.0 // indirect github.com/kisielk/errcheck v1.6.2 // indirect github.com/kisielk/gotool v1.0.0 // indirect - github.com/klauspost/compress v1.15.9 // indirect + github.com/kkHAIKE/contextcheck v1.1.3 // indirect + github.com/klauspost/compress v1.15.11 // indirect github.com/klauspost/pgzip v1.2.5 // indirect github.com/kulti/thelper v0.6.3 // indirect github.com/kunwardeep/paralleltest v1.0.6 // indirect @@ -167,6 +168,7 @@ require ( github.com/leonklingele/grouper v1.1.0 // indirect github.com/lufeee/execinquery v1.2.1 // indirect github.com/magiconair/properties v1.8.6 // indirect + github.com/maratori/testableexamples v1.0.0 // indirect github.com/maratori/testpackage v1.1.0 // indirect github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -174,20 +176,20 @@ require ( github.com/mattn/go-runewidth v0.0.9 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/mbilski/exhaustivestruct v1.2.0 // indirect - github.com/mgechev/revive v1.2.3 // indirect + github.com/mgechev/revive v1.2.4 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/moby/buildkit v0.10.3 // indirect + github.com/moby/buildkit v0.10.4 // indirect github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect github.com/moricho/tparallel v0.2.1 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/nakabonne/nestif v0.3.1 // indirect github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect - github.com/nishanths/exhaustive v0.8.1 // indirect + github.com/nishanths/exhaustive v0.8.3 // indirect github.com/nishanths/predeclared v0.2.2 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect + github.com/opencontainers/image-spec v1.1.0-rc2 // indirect github.com/opencontainers/runc v1.1.3 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect @@ -198,10 +200,10 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pointlander/compress v1.1.1-0.20190518213731-ff44bd196cc3 // indirect github.com/pointlander/jetset v1.0.1-0.20190518214125-eee7eff80bd4 // indirect - github.com/polyfloyd/go-errorlint v1.0.2 // indirect + github.com/polyfloyd/go-errorlint v1.0.5 // indirect github.com/prometheus/procfs v0.8.0 // indirect - github.com/quasilyte/go-ruleguard v0.3.17 // indirect - github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 // indirect + github.com/quasilyte/go-ruleguard v0.3.18 // indirect + github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f // indirect github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect github.com/rs/zerolog v1.27.0 // indirect @@ -210,7 +212,7 @@ require ( github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect github.com/sashamelentyev/interfacebloat v1.1.0 // indirect - github.com/sashamelentyev/usestdlibvars v1.13.0 // indirect + github.com/sashamelentyev/usestdlibvars v1.20.0 // indirect github.com/satori/go.uuid v1.2.0 // indirect github.com/securego/gosec/v2 v2.13.1 // indirect github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect @@ -220,22 +222,21 @@ require ( github.com/sivchari/tenv v1.7.0 // indirect github.com/sonatard/noctx v0.0.1 // indirect github.com/sourcegraph/go-diff v0.6.1 // indirect - github.com/spf13/afero v1.8.2 // indirect + github.com/spf13/afero v1.9.2 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect - github.com/stretchr/objx v0.4.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect github.com/subosito/gotenv v1.4.1 // indirect - github.com/sylvia7788/contextcheck v1.0.6 // indirect github.com/tdakkota/asciicheck v0.1.1 // indirect github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect github.com/tetafro/godot v1.4.11 // indirect github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 // indirect - github.com/timonwong/logrlint v0.1.0 // indirect - github.com/tomarrell/wrapcheck/v2 v2.6.2 // indirect - github.com/tommy-muehle/go-mnd/v2 v2.5.0 // indirect + github.com/timonwong/loggercheck v0.9.3 // indirect + github.com/tomarrell/wrapcheck/v2 v2.7.0 // indirect + github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect github.com/ultraware/funlen v0.0.3 // indirect github.com/ultraware/whitespace v0.0.5 // indirect github.com/uudashr/gocognit v1.0.6 // indirect @@ -244,26 +245,27 @@ require ( gitlab.com/bosi/decorder v0.2.3 // indirect go.etcd.io/bbolt v1.3.6 // indirect go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.34.0 // indirect - go.opentelemetry.io/otel v1.9.0 // indirect - go.opentelemetry.io/otel/trace v1.9.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3 // indirect + go.opentelemetry.io/otel v1.11.0 // indirect + go.opentelemetry.io/otel/metric v0.32.3 // indirect + go.opentelemetry.io/otel/trace v1.11.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.8.0 // indirect - go.uber.org/zap v1.22.0 // indirect + go.uber.org/zap v1.23.0 // indirect golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect - golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d // indirect - golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde // indirect - golang.org/x/sys v0.0.0-20220818161305-2296e01440c6 // indirect - golang.org/x/term v0.0.0-20220722155259-a9ba230a4035 // indirect - golang.org/x/text v0.3.7 // indirect - golang.org/x/tools v0.1.12 // indirect - google.golang.org/genproto v0.0.0-20220725144611-272f38e5d71b // indirect + golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 // indirect + golang.org/x/mod v0.6.0 // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/sys v0.2.0 // indirect + golang.org/x/term v0.2.0 // indirect + golang.org/x/text v0.4.0 // indirect + golang.org/x/tools v0.2.0 // indirect + google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect honnef.co/go/tools v0.3.3 // indirect - mvdan.cc/gofumpt v0.3.1 // indirect + mvdan.cc/gofumpt v0.4.0 // indirect mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 // indirect diff --git a/go.sum b/go.sum index 0a7b98bea..731ffe3d2 100644 --- a/go.sum +++ b/go.sum @@ -23,14 +23,15 @@ cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPT cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y= +cloud.google.com/go v0.104.0 h1:gSmWO7DY1vOm0MVU6DNXM11BWHHsTUmsC5cv1fuW5X8= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.6.1 h1:2sMmt8prCn7DPaG4Pmh0N3Inmc8cT8ae5k1M6VJ9Wqc= +cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= +cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= @@ -45,6 +46,8 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Abirdcfly/dupword v0.0.7 h1:z14n0yytA3wNO2gpCD/jVtp/acEXPGmYu0esewpBt6Q= +github.com/Abirdcfly/dupword v0.0.7/go.mod h1:K/4M1kj+Zh39d2aotRwypvasonOyAMH1c/IZJzE0dmk= github.com/Antonboom/errname v0.1.7 h1:mBBDKvEYwPl4WFFNwec1CZO096G6vzK9vvDQzAwkako= github.com/Antonboom/errname v0.1.7/go.mod h1:g0ONh16msHIPgJSGsecu1G/dcF2hlYR/0SddnIAGavU= github.com/Antonboom/nilnil v0.1.1 h1:PHhrh5ANKFWRBh7TdYmyyq2gyT2lotnvFvvFbylF81Q= @@ -53,8 +56,8 @@ github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0= -github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= @@ -72,14 +75,14 @@ github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3Q github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= -github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= +github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard v1.1.0 h1:pjK9nLPS1FwQYGGpPxoMYpe7qACHOhAWQMQzV71i49o= -github.com/OpenPeeDeeP/depguard v1.1.0/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc= +github.com/OpenPeeDeeP/depguard v1.1.1 h1:TSUznLjvp/4IUP+OQ0t/4jF4QUyxIcVX8YnghZdunyA= +github.com/OpenPeeDeeP/depguard v1.1.1/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= @@ -149,8 +152,8 @@ github.com/btcsuite/btcd v0.23.0 h1:V2/ZgjfDFIygAX3ZapeigkVBoVUtOJKSwrhZdlpSvaA= github.com/btcsuite/btcd v0.23.0/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY= github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= -github.com/btcsuite/btcd/btcec/v2 v2.2.1 h1:xP60mv8fvp+0khmrN0zTdPC3cNm24rfeE6lh2R/Yv3E= -github.com/btcsuite/btcd/btcec/v2 v2.2.1/go.mod h1:9/CSmJxmuvqzX9Wh2fXMWToLOHhPd11lSPuIupwTkI8= +github.com/btcsuite/btcd/btcec/v2 v2.3.1 h1:v8tFffXRNpwFPbeQhkYPrOXOvVrwD5QIe66Jkz3db14= +github.com/btcsuite/btcd/btcec/v2 v2.3.1/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A= github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE= github.com/btcsuite/btcd/btcutil v1.1.2 h1:XLMbX8JQEiwMcYft2EGi8zPUkoa0abKIU6/BJSRsjzQ= @@ -169,10 +172,12 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/bufbuild/buf v1.8.0 h1:53qJ3QY/KOHwSjWgCQYkQaR3jGWst7aOfTXnFe8e+VQ= -github.com/bufbuild/buf v1.8.0/go.mod h1:tBzKkd1fzCcBV6KKSO7zo3rlhk3o1YQ0F2tQKSC2aNU= -github.com/bufbuild/connect-go v0.4.0 h1:fIMyUYG8mXSTH+nnlOx9KmRUf3mBF0R2uKK+BQBoOHE= -github.com/bufbuild/connect-go v0.4.0/go.mod h1:ZEtBnQ7J/m7bvWOW+H8T/+hKQCzPVfhhhICuvtcnjlI= +github.com/bufbuild/buf v1.9.0 h1:8a60qapVuRj6crerWR0rny4UUV/MhZSL5gagJuBxmx8= +github.com/bufbuild/buf v1.9.0/go.mod h1:1Q+rMHiMVcfgScEF/GOldxmu4o9TrQ2sQQh58K6MscE= +github.com/bufbuild/connect-go v1.0.0 h1:htSflKUT8y1jxhoPhPYTZMrsY3ipUXjjrbcZR5O2cVo= +github.com/bufbuild/connect-go v1.0.0/go.mod h1:9iNvh/NOsfhNBUH5CtvXeVUskQO1xsrEviH7ZArwZ3I= +github.com/bufbuild/protocompile v0.1.0 h1:HjgJBI85hY/qmW5tw/66sNDZ7z0UDdVSi/5r40WHw4s= +github.com/bufbuild/protocompile v0.1.0/go.mod h1:ix/MMMdsT3fzxfw91dvbfzKW3fRRnuPCP47kpAm5m/4= github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY= github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= @@ -249,13 +254,13 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cristalhq/acmd v0.7.0/go.mod h1:LG5oa43pE/BbxtfMoImHCQN++0Su7dzipdgBjMCBVDQ= -github.com/curioswitch/go-reassign v0.1.2 h1:ekM07+z+VFT560Exz4mTv0/s1yU9gem6CJc/tlYpkmI= -github.com/curioswitch/go-reassign v0.1.2/go.mod h1:bFJIHgtTM3hRm2sKXSPkbwNjSFyGURQXyn4IXD2qwfQ= +github.com/cristalhq/acmd v0.8.1/go.mod h1:LG5oa43pE/BbxtfMoImHCQN++0Su7dzipdgBjMCBVDQ= +github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= +github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/daixiang0/gci v0.6.3 h1:wUAqXChk8HbwXn8AfxD9DYSCp9Bpz1L3e6Q4Roe+q9E= -github.com/daixiang0/gci v0.6.3/go.mod h1:EpVfrztufwVgQRXjnX4zuNinEpLj5OmMjtu/+MB0V0c= +github.com/daixiang0/gci v0.8.1 h1:T4xpSC+hmsi4CSyuYfIJdMZAr9o7xZmHpQVygMghGZ4= +github.com/daixiang0/gci v0.8.1/go.mod h1:EpVfrztufwVgQRXjnX4zuNinEpLj5OmMjtu/+MB0V0c= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -279,12 +284,13 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8 github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE= -github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.19+incompatible h1:lzEmjivyNHFHMNAFLXORMBXyGIhw/UP4DvJwvyKYq64= +github.com/docker/docker v20.10.19+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -331,15 +337,15 @@ github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-chi/chi/v5 v5.0.7 h1:rDTPXLDHGATaeHvVlLcR4Qe0zftYethFucbjVQ1PxU8= github.com/go-chi/chi/v5 v5.0.7/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= -github.com/go-critic/go-critic v0.6.4 h1:tucuG1pvOyYgpBIrVxw0R6gwO42lNa92Aq3VaDoIs+E= -github.com/go-critic/go-critic v0.6.4/go.mod h1:qL5SOlk7NtY6sJPoVCTKDIgzNOxHkkkOCVDyi9wJe1U= +github.com/go-critic/go-critic v0.6.5 h1:fDaR/5GWURljXwF8Eh31T2GZNz9X4jeboS912mWF8Uo= +github.com/go-critic/go-critic v0.6.5/go.mod h1:ezfP/Lh7MA6dBNn4c6ab5ALv3sKnZVLx37tr00uuaOY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -368,13 +374,12 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-toolsmith/astcast v1.0.0 h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g= github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astcopy v1.0.1 h1:l09oBhAPyV74kLJ3ZO31iBU8htZGTwr9LTjuMCyL8go= -github.com/go-toolsmith/astcopy v1.0.1/go.mod h1:4TcEdbElGc9twQEYpVo/aieIXfHhiuLh4aLAck6dO7Y= +github.com/go-toolsmith/astcopy v1.0.2 h1:YnWf5Rnh1hUudj11kei53kI57quN/VH6Hp1n+erozn0= +github.com/go-toolsmith/astcopy v1.0.2/go.mod h1:4TcEdbElGc9twQEYpVo/aieIXfHhiuLh4aLAck6dO7Y= github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astequal v1.0.1/go.mod h1:4oGA3EZXTVItV/ipGiOx7NWkY5veFfcsOJVS2YxltLw= -github.com/go-toolsmith/astequal v1.0.2 h1:+XvaV8zNxua+9+Oa4AHmgmpo4RYAbwr/qjNppLfX2yM= github.com/go-toolsmith/astequal v1.0.2/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= +github.com/go-toolsmith/astequal v1.0.3 h1:+LVdyRatFS+XO78SGV4I3TCEA0AC7fKEGma+fH+674o= +github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= github.com/go-toolsmith/astfmt v1.0.0 h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k= github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= github.com/go-toolsmith/astp v1.0.0 h1:alXE75TXgcmupDsMK1fRAy0YUzLzqPVvBKoyWV+KPXg= @@ -394,8 +399,8 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/uuid v4.3.0+incompatible h1:CaSVZxm5B+7o45rtab4jC2G37WGYX1zQfuU2i6DSvnc= -github.com/gofrs/uuid v4.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.3.1+incompatible h1:0/KbAdpx3UXAx1kEOWHJeOkpbgRFGHVgv+CFIY7dBJI= +github.com/gofrs/uuid v4.3.1+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -451,10 +456,10 @@ github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9 github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo= github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.49.0 h1:I8WHOavragDttlLHtSraHn/h39C+R60bEQ5NoGcHQr8= -github.com/golangci/golangci-lint v1.49.0/go.mod h1:+V/7lLv449R6w9mQ3WdV0EKh7Je/jTylMeSwBZcLeWE= +github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 h1:amWTbTGqOZ71ruzrdA+Nx5WA3tV1N0goTspwmKCQvBY= +github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs= +github.com/golangci/golangci-lint v1.50.1 h1:C829clMcZXEORakZlwpk7M4iDw2XiwxxKaG504SZ9zY= +github.com/golangci/golangci-lint v1.50.1/go.mod h1:AQjHBopYS//oB8xs0y0M/dtxdKHkdhl0RvmjUct0/4w= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= @@ -480,8 +485,9 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -614,14 +620,7 @@ github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= -github.com/jhump/gopoet v0.0.0-20190322174617-17282ff210b3/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= -github.com/jhump/gopoet v0.1.0/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= -github.com/jhump/goprotoc v0.5.0/go.mod h1:VrbvcYrQOrTi3i0Vf+m+oqQWk9l72mjkJCYo7UvLHRQ= -github.com/jhump/protocompile v0.0.0-20220812162104-d108583e055d h1:1BLWxsvcb5w9/vGjtyEo//r3dwEPNg7z73nbQ/XV4/s= -github.com/jhump/protocompile v0.0.0-20220812162104-d108583e055d/go.mod h1:qr2b5kx4HbFS7/g4uYO5qv9ei8303JMsC7ESbYiqr2Q= -github.com/jhump/protoreflect v1.11.0/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E= -github.com/jhump/protoreflect v1.12.1-0.20220721211354-060cc04fc18b h1:izTof8BKh/nE1wrKOrloNA5q4odOarjf+Xpe+4qow98= -github.com/jhump/protoreflect v1.12.1-0.20220721211354-060cc04fc18b/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= +github.com/jhump/protoreflect v1.13.1-0.20220928232736-101791cb1b4c h1:XImQJfpJLmGEEd8ll5yPVyL/aEvmgGHW4WYTyNseLOM= github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= @@ -656,11 +655,13 @@ github.com/kisielk/errcheck v1.6.2 h1:uGQ9xI8/pgc9iOoCe7kWQgRE6SBTrCGmTSf0LrEtY7 github.com/kisielk/errcheck v1.6.2/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkHAIKE/contextcheck v1.1.3 h1:l4pNvrb8JSwRd51ojtcOxOeHJzHek+MtOyXbaR0uvmw= +github.com/kkHAIKE/contextcheck v1.1.3/go.mod h1:PG/cwd6c0705/LM0KTr1acO2gORUxkSVWyLJOFW5qoo= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= -github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= +github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -704,6 +705,8 @@ github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI= +github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= github.com/maratori/testpackage v1.1.0 h1:GJY4wlzQhuBusMF1oahQCBtUV/AQ/k69IZ68vxaac2Q= github.com/maratori/testpackage v1.1.0/go.mod h1:PeAhzU8qkCwdGEMTEupsHJNlQu2gZopMC6RjbhmHeDc= github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 h1:pWxk9e//NbPwfxat7RXkts09K+dEBJWakUWwICVqYbA= @@ -737,8 +740,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182aff github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mgechev/revive v1.2.3 h1:NzIEEa9+WimQ6q2Ov7OcNeySS/IOcwtkQ8RAh0R5UJ4= -github.com/mgechev/revive v1.2.3/go.mod h1:iAWlQishqCuj4yhV24FTnKSXGpbAA+0SckXB8GQMX/Q= +github.com/mgechev/revive v1.2.4 h1:+2Hd/S8oO2H0Ikq2+egtNwQsVhAeELHjxjIUFX5ajLI= +github.com/mgechev/revive v1.2.4/go.mod h1:iAWlQishqCuj4yhV24FTnKSXGpbAA+0SckXB8GQMX/Q= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= @@ -761,8 +764,8 @@ github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/buildkit v0.10.3 h1:/dGykD8FW+H4p++q5+KqKEo6gAkYKyBQHdawdjVwVAU= -github.com/moby/buildkit v0.10.3/go.mod h1:jxeOuly98l9gWHai0Ojrbnczrk/rf+o9/JqNhY+UCSo= +github.com/moby/buildkit v0.10.4 h1:FvC+buO8isGpUFZ1abdSLdGHZVqg9sqI4BbFL8tlzP4= +github.com/moby/buildkit v0.10.4/go.mod h1:Yajz9vt1Zw5q9Pp4pdb3TCSUXJBIroIQGQ3TTs/sLug= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae h1:O4SWKdcHVCvYqyDV+9CJA1fcDN2L11Bule0iFy3YlAI= @@ -798,8 +801,8 @@ github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6Fx github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/exhaustive v0.8.1 h1:0QKNascWv9qIHY7zRoZSxeRr6kuk5aAT3YXLTiDmjTo= -github.com/nishanths/exhaustive v0.8.1/go.mod h1:qj+zJJUgJ76tR92+25+03oYUhzF4R7/2Wk7fGTfCHmg= +github.com/nishanths/exhaustive v0.8.3 h1:pw5O09vwg8ZaditDp/nQRqVnrMczSJDxRDJMowvhsrM= +github.com/nishanths/exhaustive v0.8.3/go.mod h1:qj+zJJUgJ76tR92+25+03oYUhzF4R7/2Wk7fGTfCHmg= github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= @@ -831,8 +834,8 @@ github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= +github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= @@ -896,8 +899,8 @@ github.com/pointlander/jetset v1.0.1-0.20190518214125-eee7eff80bd4 h1:RHHRCZeaNy github.com/pointlander/jetset v1.0.1-0.20190518214125-eee7eff80bd4/go.mod h1:RdR1j20Aj5pB6+fw6Y9Ur7lMHpegTEjY1vc19hEZL40= github.com/pointlander/peg v1.0.1 h1:mgA/GQE8TeS9MdkU6Xn6iEzBmQUQCNuWD7rHCK6Mjs0= github.com/pointlander/peg v1.0.1/go.mod h1:5hsGDQR2oZI4QoWz0/Kdg3VSVEC31iJw/b7WjqCBGRI= -github.com/polyfloyd/go-errorlint v1.0.2 h1:kp1yvHflYhTmw5m3MmBy8SCyQkKPjwDthVuMH0ug6Yk= -github.com/polyfloyd/go-errorlint v1.0.2/go.mod h1:APVvOesVSAnne5SClsPxPdfvZTVDojXh1/G3qb5wjGI= +github.com/polyfloyd/go-errorlint v1.0.5 h1:AHB5JRCjlmelh9RrLxT9sgzpalIwwq4hqE8EkwIwKdY= +github.com/polyfloyd/go-errorlint v1.0.5/go.mod h1:APVvOesVSAnne5SClsPxPdfvZTVDojXh1/G3qb5wjGI= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -910,15 +913,16 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= -github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -945,14 +949,14 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5 github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= -github.com/quasilyte/go-ruleguard v0.3.17 h1:cDdoaSbQg11LXPDQqiCK54QmQXsEQQCTIgdcpeULGSI= -github.com/quasilyte/go-ruleguard v0.3.17/go.mod h1:sST5PvaR7yb/Az5ksX8oc88usJ4EGjmJv7cK7y3jyig= +github.com/quasilyte/go-ruleguard v0.3.18 h1:sd+abO1PEI9fkYennwzHn9kl3nqP6M5vE7FiOzZ+5CE= +github.com/quasilyte/go-ruleguard v0.3.18/go.mod h1:lOIzcYlgxrQ2sGJ735EHXmf/e9MJ516j16K/Ifcttvs= github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/go-ruleguard/dsl v0.3.21/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= github.com/quasilyte/go-ruleguard/rules v0.0.0-20211022131956-028d6511ab71/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= -github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 h1:PDWGei+Rf2bBiuZIbZmM20J2ftEy9IeUCHA8HbQqed8= -github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5/go.mod h1:wSEyW6O61xRV6zb6My3HxrQ5/8ke7NE2OayqCHa3xRM= +github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f h1:6Gtn2i04RD0gVyYf2/IUMTIs+qYleBt4zxDqkLTcu4U= +github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY= github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= @@ -964,7 +968,7 @@ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqn github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= @@ -988,8 +992,8 @@ github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71e github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= -github.com/sashamelentyev/usestdlibvars v1.13.0 h1:uObNudVEEHf6JbOJy5bgKJloA1bWjxR9fwgNFpPzKnI= -github.com/sashamelentyev/usestdlibvars v1.13.0/go.mod h1:D2Wb7niIYmTB+gB8z7kh8tyP5ccof1dQ+SFk+WW5NtY= +github.com/sashamelentyev/usestdlibvars v1.20.0 h1:K6CXjqqtSYSsuyRDDC7Sjn6vTMLiSJa4ZmDkiokoqtw= +github.com/sashamelentyev/usestdlibvars v1.20.0/go.mod h1:0GaP+ecfZMXShS0A94CJn6aEuPRILv8h/VuWI9n1ygg= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= @@ -1031,8 +1035,8 @@ github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0b github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= -github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= +github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= @@ -1042,8 +1046,8 @@ github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tL github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= -github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= -github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= +github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= +github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -1056,8 +1060,8 @@ github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/y github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= -github.com/spf13/viper v1.13.0 h1:BWSJ/M+f+3nmdz9bxB+bWX28kkALN2ok11D0rSo8EJU= -github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw= +github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU= +github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= @@ -1069,8 +1073,9 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -1079,13 +1084,12 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/sylvia7788/contextcheck v1.0.6 h1:o2EZgVPyMKE/Mtoqym61DInKEjwEbsmyoxg3VrmjNO4= -github.com/sylvia7788/contextcheck v1.0.6/go.mod h1:9XDxwvxyuKD+8N+a7Gs7bfWLityh5t70g/GjdEt2N2M= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= @@ -1106,14 +1110,14 @@ github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw= github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro= github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/timonwong/logrlint v0.1.0 h1:phZCcypL/vtx6cGxObJgWZ5wexZF5SXFPLOM+ru0e/M= -github.com/timonwong/logrlint v0.1.0/go.mod h1:Zleg4Gw+kRxNej+Ra7o+tEaW5k1qthTaYKU7rSD39LU= +github.com/timonwong/loggercheck v0.9.3 h1:ecACo9fNiHxX4/Bc02rW2+kaJIAMAes7qJ7JKxt0EZI= +github.com/timonwong/loggercheck v0.9.3/go.mod h1:wUqnk9yAOIKtGA39l1KLE9Iz0QiTocu/YZoOf+OzFdw= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck/v2 v2.6.2 h1:3dI6YNcrJTQ/CJQ6M/DUkc0gnqYSIk6o0rChn9E/D0M= -github.com/tomarrell/wrapcheck/v2 v2.6.2/go.mod h1:ao7l5p0aOlUNJKI0qVwB4Yjlqutd0IvAB9Rdwyilxvg= -github.com/tommy-muehle/go-mnd/v2 v2.5.0 h1:iAj0a8e6+dXSL7Liq0aXPox36FiN1dBbjA6lt9fl65s= -github.com/tommy-muehle/go-mnd/v2 v2.5.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/tomarrell/wrapcheck/v2 v2.7.0 h1:J/F8DbSKJC83bAvC6FoZaRjZiZ/iKoueSdrEkmGeacA= +github.com/tomarrell/wrapcheck/v2 v2.7.0/go.mod h1:ao7l5p0aOlUNJKI0qVwB4Yjlqutd0IvAB9Rdwyilxvg= +github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= +github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= @@ -1125,8 +1129,8 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y= github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY= -github.com/vektra/mockery/v2 v2.14.0 h1:KZ1p5Hrn8tiY+LErRMr14HHle6khxo+JKOXLBW/yfqs= -github.com/vektra/mockery/v2 v2.14.0/go.mod h1:bnD1T8tExSgPD1ripLkDbr60JA9VtQeu12P3wgLZd7M= +github.com/vektra/mockery/v2 v2.14.1 h1:Xamr4zUkFBDGdZhJ6iCiJ1AwkGRmUgZd8zkwjRXt+TU= +github.com/vektra/mockery/v2 v2.14.1/go.mod h1:bnD1T8tExSgPD1ripLkDbr60JA9VtQeu12P3wgLZd7M= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= @@ -1167,12 +1171,14 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.34.0 h1:PNEMW4EvpNQ7SuoPFNkvbZqi1STkTPKq+8vfoMl/6AE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.34.0/go.mod h1:fk1+icoN47ytLSgkoWHLJrtVTSQ+HgmkNgPTKrk/Nsc= -go.opentelemetry.io/otel v1.9.0 h1:8WZNQFIB2a71LnANS9JeyidJKKGOOremcUtb/OtHISw= -go.opentelemetry.io/otel v1.9.0/go.mod h1:np4EoPGzoPs3O67xUVNoPPcmSvsfOxNlNA4F4AC+0Eo= -go.opentelemetry.io/otel/trace v1.9.0 h1:oZaCNJUjWcg60VXWee8lJKlqhPbXAPB51URuR47pQYc= -go.opentelemetry.io/otel/trace v1.9.0/go.mod h1:2737Q0MuG8q1uILYm2YYVkAyLtOofiTNGg6VODnOiPo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3 h1:syAz40OyelLZo42+3U68Phisvrx4qh+4wpdZw7eUUdY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3/go.mod h1:Dts42MGkzZne2yCru741+bFiTMWkIj/LLRizad7b9tw= +go.opentelemetry.io/otel v1.11.0 h1:kfToEGMDq6TrVrJ9Vht84Y8y9enykSZzDDZglV0kIEk= +go.opentelemetry.io/otel v1.11.0/go.mod h1:H2KtuEphyMvlhZ+F7tg9GRhAOe60moNx61Ex+WmiKkk= +go.opentelemetry.io/otel/metric v0.32.3 h1:dMpnJYk2KULXr0j8ph6N7+IcuiIQXlPXD4kix9t7L9c= +go.opentelemetry.io/otel/metric v0.32.3/go.mod h1:pgiGmKohxHyTPHGOff+vrtIH39/R9fiO/WoenUQ3kcc= +go.opentelemetry.io/otel/trace v1.11.0 h1:20U/Vj42SX+mASlXLmSGBg6jpI1jQtv682lZtTAOVFI= +go.opentelemetry.io/otel/trace v1.11.0/go.mod h1:nyYjis9jy0gytE9LXGU+/m1sHTKbRY0fX0hulNNDP1U= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1194,8 +1200,8 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.22.0 h1:Zcye5DUgBloQ9BaT4qc9BnjOFog5TvBSAGkJ3Nf70c0= -go.uber.org/zap v1.22.0/go.mod h1:H4siCOZOrAolnUPJEkfaSjDqyP+BDS0DdDWzwcgt3+U= +go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= +go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1219,8 +1225,8 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.2.0 h1:BRXPfhNivWL5Yq0BGQ39a2sW6t44aODpfxkWjYdzewE= +golang.org/x/crypto v0.2.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1237,8 +1243,8 @@ golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMk golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d h1:+W8Qf4iJtMGKkyAygcKohjxTk4JPsL9DpzApJ22m5Ic= -golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 h1:Ic/qN6TEifvObMGQy72k0n1LlJr7DjWWEi+MOsDOiSk= +golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1267,8 +1273,9 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1327,8 +1334,8 @@ golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220812174116-3211cb980234 h1:RDqmgfe7SvlMWoqC3xwQ2blLO3fcWcxMa3eBLRdRW7E= -golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1343,7 +1350,7 @@ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 h1:OSnWWcOd/CtWQC2cYSBgbTSJv3ciqd8r54ySIW2y3RE= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1356,8 +1363,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde h1:ejfdSekXMDxDLbRrJMwUk6KnSLZ2McaUCVcIKM+N6jc= -golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1452,19 +1459,19 @@ golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220818161305-2296e01440c6 h1:Sx/u41w+OwrInGdEckYmEuU5gHoGSL4QbDz3S9s6j4U= -golang.org/x/sys v0.0.0-20220818161305-2296e01440c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220722155259-a9ba230a4035 h1:Q5284mrmYTpACcm+eAKjKJH48BBwSyfJqmmGDTtT8Vc= -golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1473,15 +1480,16 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1574,8 +1582,9 @@ golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da/go.mod h1:nABZi5QlRsZVlz golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= -golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1665,8 +1674,8 @@ google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaE google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20211101144312-62acf1d99145/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220725144611-272f38e5d71b h1:SfSkJugek6xm7lWywqth4r2iTrYLpD8lOj1nMIIhMNM= -google.golang.org/genproto v0.0.0-20220725144611-272f38e5d71b/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e h1:S9GbmC1iCgvbLyAokVCwiO6tVIrU9Y7c5oMx1V/ki/Y= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1696,8 +1705,8 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.50.0 h1:fPVVDxY9w++VjTZsYvXWqEf9Rqar/e+9zYfxKK+W+YU= -google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1710,10 +1719,9 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 h1:KR8+MyP7/qOlV+8Af01LtjL04bu7on42eVsxT4EyBQk= +google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1761,8 +1769,8 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.3.3 h1:oDx7VAwstgpYpb3wv0oxiZlxY+foCpRAwY7Vk6XpAgA= honnef.co/go/tools v0.3.3/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= -mvdan.cc/gofumpt v0.3.1 h1:avhhrOmv0IuvQVK7fvwV91oFSGAk5/6Po8GXTzICeu8= -mvdan.cc/gofumpt v0.3.1/go.mod h1:w3ymliuxvzVx8DAutBnVyDqYb1Niy/yCJt/lk821YCE= +mvdan.cc/gofumpt v0.4.0 h1:JVf4NN1mIpHogBj7ABpgOyZc65/UUOkKQFkoURsz4MM= +mvdan.cc/gofumpt v0.4.0/go.mod h1:PljLOHDeZqgS8opHRKLzp2It2VBuSdteAgqUfzMTxlQ= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= diff --git a/libs/rand/random.go b/libs/rand/random.go index 05af30f1a..7d2410ee9 100644 --- a/libs/rand/random.go +++ b/libs/rand/random.go @@ -48,7 +48,7 @@ func (r *Rand) init() { } func (r *Rand) reset(seed int64) { - r.rand = mrand.New(mrand.NewSource(seed)) //nolint:gosec + r.rand = mrand.New(mrand.NewSource(seed)) } //---------------------------------------- diff --git a/light/proxy/routes.go b/light/proxy/routes.go index c97a91dfd..d7a427095 100644 --- a/light/proxy/routes.go +++ b/light/proxy/routes.go @@ -21,22 +21,22 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc { "health": rpcserver.NewRPCFunc(makeHealthFunc(c), ""), "status": rpcserver.NewRPCFunc(makeStatusFunc(c), ""), "net_info": rpcserver.NewRPCFunc(makeNetInfoFunc(c), ""), - "blockchain": rpcserver.NewRPCFunc(makeBlockchainInfoFunc(c), "minHeight,maxHeight"), - "genesis": rpcserver.NewRPCFunc(makeGenesisFunc(c), ""), - "genesis_chunked": rpcserver.NewRPCFunc(makeGenesisChunkedFunc(c), ""), - "block": rpcserver.NewRPCFunc(makeBlockFunc(c), "height"), - "header": rpcserver.NewRPCFunc(makeHeaderFunc(c), "height"), - "header_by_hash": rpcserver.NewRPCFunc(makeHeaderByHashFunc(c), "hash"), - "block_by_hash": rpcserver.NewRPCFunc(makeBlockByHashFunc(c), "hash"), - "block_results": rpcserver.NewRPCFunc(makeBlockResultsFunc(c), "height"), - "commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height"), - "tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove"), + "blockchain": rpcserver.NewRPCFunc(makeBlockchainInfoFunc(c), "minHeight,maxHeight", rpcserver.Cacheable()), + "genesis": rpcserver.NewRPCFunc(makeGenesisFunc(c), "", rpcserver.Cacheable()), + "genesis_chunked": rpcserver.NewRPCFunc(makeGenesisChunkedFunc(c), "", rpcserver.Cacheable()), + "block": rpcserver.NewRPCFunc(makeBlockFunc(c), "height", rpcserver.Cacheable("height")), + "header": rpcserver.NewRPCFunc(makeHeaderFunc(c), "height", rpcserver.Cacheable("height")), + "header_by_hash": rpcserver.NewRPCFunc(makeHeaderByHashFunc(c), "hash", rpcserver.Cacheable()), + "block_by_hash": rpcserver.NewRPCFunc(makeBlockByHashFunc(c), "hash", rpcserver.Cacheable()), + "block_results": rpcserver.NewRPCFunc(makeBlockResultsFunc(c), "height", rpcserver.Cacheable("height")), + "commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height", rpcserver.Cacheable("height")), + "tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove", rpcserver.Cacheable()), "tx_search": rpcserver.NewRPCFunc(makeTxSearchFunc(c), "query,prove,page,per_page,order_by"), "block_search": rpcserver.NewRPCFunc(makeBlockSearchFunc(c), "query,page,per_page,order_by"), - "validators": rpcserver.NewRPCFunc(makeValidatorsFunc(c), "height,page,per_page"), + "validators": rpcserver.NewRPCFunc(makeValidatorsFunc(c), "height,page,per_page", rpcserver.Cacheable("height")), "dump_consensus_state": rpcserver.NewRPCFunc(makeDumpConsensusStateFunc(c), ""), "consensus_state": rpcserver.NewRPCFunc(makeConsensusStateFunc(c), ""), - "consensus_params": rpcserver.NewRPCFunc(makeConsensusParamsFunc(c), "height"), + "consensus_params": rpcserver.NewRPCFunc(makeConsensusParamsFunc(c), "height", rpcserver.Cacheable("height")), "unconfirmed_txs": rpcserver.NewRPCFunc(makeUnconfirmedTxsFunc(c), "limit"), "num_unconfirmed_txs": rpcserver.NewRPCFunc(makeNumUnconfirmedTxsFunc(c), ""), @@ -47,7 +47,7 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc { // abci API "abci_query": rpcserver.NewRPCFunc(makeABCIQueryFunc(c), "path,data,height,prove"), - "abci_info": rpcserver.NewRPCFunc(makeABCIInfoFunc(c), ""), + "abci_info": rpcserver.NewRPCFunc(makeABCIInfoFunc(c), "", rpcserver.Cacheable()), // evidence API "broadcast_evidence": rpcserver.NewRPCFunc(makeBroadcastEvidenceFunc(c), "evidence"), diff --git a/mempool/v0/reactor.go b/mempool/v0/reactor.go index 3fc850641..30f1bc50d 100644 --- a/mempool/v0/reactor.go +++ b/mempool/v0/reactor.go @@ -134,6 +134,7 @@ func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor { ID: mempool.MempoolChannel, Priority: 5, RecvMessageCapacity: batchMsg.Size(), + MessageType: &protomem.Message{}, }, } } @@ -154,27 +155,34 @@ func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { // Receive implements Reactor. // It adds any received transactions to the mempool. -func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - msg, err := memR.decodeMsg(msgBytes) - if err != nil { - memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) - memR.Switch.StopPeerForError(src, err) - return - } - memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) - - txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(src)} - if src != nil { - txInfo.SenderP2PID = src.ID() - } - - for _, tx := range msg.Txs { - err = memR.mempool.CheckTx(tx, nil, txInfo) - if errors.Is(err, mempool.ErrTxInCache) { - memR.Logger.Debug("Tx already exists in cache", "tx", tx.String()) - } else if err != nil { - memR.Logger.Info("Could not check tx", "tx", tx.String(), "err", err) +func (memR *Reactor) Receive(e p2p.Envelope) { + memR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", e.Message) + switch msg := e.Message.(type) { + case *protomem.Txs: + protoTxs := msg.GetTxs() + if len(protoTxs) == 0 { + memR.Logger.Error("received empty txs from peer", "src", e.Src) + return } + txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(e.Src)} + if e.Src != nil { + txInfo.SenderP2PID = e.Src.ID() + } + + var err error + for _, tx := range protoTxs { + ntx := types.Tx(tx) + err = memR.mempool.CheckTx(ntx, nil, txInfo) + if errors.Is(err, mempool.ErrTxInCache) { + memR.Logger.Debug("Tx already exists in cache", "tx", ntx.String()) + } else if err != nil { + memR.Logger.Info("Could not check tx", "tx", ntx.String(), "err", err) + } + } + default: + memR.Logger.Error("unknown message type", "src", e.Src, "chId", e.ChannelID, "msg", e.Message) + memR.Switch.StopPeerForError(e.Src, fmt.Errorf("mempool cannot handle message of type: %T", e.Message)) + return } // broadcasting happens from go routines per peer @@ -234,18 +242,10 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { // https://github.com/tendermint/tendermint/issues/5796 if _, ok := memTx.senders.Load(peerID); !ok { - msg := protomem.Message{ - Sum: &protomem.Message_Txs{ - Txs: &protomem.Txs{Txs: [][]byte{memTx.tx}}, - }, - } - - bz, err := msg.Marshal() - if err != nil { - panic(err) - } - - success := peer.Send(mempool.MempoolChannel, bz) + success := peer.Send(p2p.Envelope{ + ChannelID: mempool.MempoolChannel, + Message: &protomem.Txs{Txs: [][]byte{memTx.tx}}, + }) if !success { time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond) continue @@ -264,35 +264,6 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { } } -func (memR *Reactor) decodeMsg(bz []byte) (TxsMessage, error) { - msg := protomem.Message{} - err := msg.Unmarshal(bz) - if err != nil { - return TxsMessage{}, err - } - - var message TxsMessage - - if i, ok := msg.Sum.(*protomem.Message_Txs); ok { - txs := i.Txs.GetTxs() - - if len(txs) == 0 { - return message, errors.New("empty TxsMessage") - } - - decoded := make([]types.Tx, len(txs)) - for j, tx := range txs { - decoded[j] = types.Tx(tx) - } - - message = TxsMessage{ - Txs: decoded, - } - return message, nil - } - return message, fmt.Errorf("msg type: %T is not supported", msg) -} - // TxsMessage is a Message containing transactions. type TxsMessage struct { Txs []types.Tx diff --git a/mempool/v0/reactor_test.go b/mempool/v0/reactor_test.go index 8501db5b1..3e4ef9074 100644 --- a/mempool/v0/reactor_test.go +++ b/mempool/v0/reactor_test.go @@ -267,6 +267,10 @@ func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) { }) } +// TODO: This test tests that we don't panic and are able to generate new +// PeerIDs for each peer we add. It seems as though we should be able to test +// this in a much more direct way. +// https://github.com/tendermint/tendermint/issues/9639 func TestDontExhaustMaxActiveIDs(t *testing.T) { config := cfg.TestConfig() const N = 1 @@ -282,7 +286,12 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) { for i := 0; i < mempool.MaxActiveIDs+1; i++ { peer := mock.NewPeer(nil) - reactor.Receive(mempool.MempoolChannel, peer, []byte{0x1, 0x2, 0x3}) + reactor.Receive(p2p.Envelope{ + ChannelID: mempool.MempoolChannel, + Src: peer, + Message: &memproto.Message{}, // This uses the wrong message type on purpose to stop the peer as in an error state in the reactor. + }, + ) reactor.AddPeer(peer) } } diff --git a/mempool/v1/reactor.go b/mempool/v1/reactor.go index 4da51bab8..58218bf71 100644 --- a/mempool/v1/reactor.go +++ b/mempool/v1/reactor.go @@ -133,6 +133,7 @@ func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor { ID: mempool.MempoolChannel, Priority: 5, RecvMessageCapacity: batchMsg.Size(), + MessageType: &protomem.Message{}, }, } } @@ -153,27 +154,36 @@ func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { // Receive implements Reactor. // It adds any received transactions to the mempool. -func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - msg, err := memR.decodeMsg(msgBytes) - if err != nil { - memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) - memR.Switch.StopPeerForError(src, err) +func (memR *Reactor) Receive(e p2p.Envelope) { + memR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", e.Message) + switch msg := e.Message.(type) { + case *protomem.Txs: + protoTxs := msg.GetTxs() + if len(protoTxs) == 0 { + memR.Logger.Error("received tmpty txs from peer", "src", e.Src) + return + } + txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(e.Src)} + if e.Src != nil { + txInfo.SenderP2PID = e.Src.ID() + } + + var err error + for _, tx := range protoTxs { + ntx := types.Tx(tx) + err = memR.mempool.CheckTx(ntx, nil, txInfo) + if errors.Is(err, mempool.ErrTxInCache) { + memR.Logger.Debug("Tx already exists in cache", "tx", ntx.String()) + } else if err != nil { + memR.Logger.Info("Could not check tx", "tx", ntx.String(), "err", err) + } + } + default: + memR.Logger.Error("unknown message type", "src", e.Src, "chId", e.ChannelID, "msg", e.Message) + memR.Switch.StopPeerForError(e.Src, fmt.Errorf("mempool cannot handle message of type: %T", e.Message)) return } - memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) - txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(src)} - if src != nil { - txInfo.SenderP2PID = src.ID() - } - for _, tx := range msg.Txs { - err = memR.mempool.CheckTx(tx, nil, txInfo) - if err == mempool.ErrTxInCache { - memR.Logger.Debug("Tx already exists in cache", "tx", tx.String()) - } else if err != nil { - memR.Logger.Info("Could not check tx", "tx", tx.String(), "err", err) - } - } // broadcasting happens from go routines per peer } @@ -233,18 +243,10 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { // NOTE: Transaction batching was disabled due to // https://github.com/tendermint/tendermint/issues/5796 if !memTx.HasPeer(peerID) { - msg := protomem.Message{ - Sum: &protomem.Message_Txs{ - Txs: &protomem.Txs{Txs: [][]byte{memTx.tx}}, - }, - } - - bz, err := msg.Marshal() - if err != nil { - panic(err) - } - - success := peer.Send(mempool.MempoolChannel, bz) + success := peer.Send(p2p.Envelope{ + ChannelID: mempool.MempoolChannel, + Message: &protomem.Txs{Txs: [][]byte{memTx.tx}}, + }) if !success { time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond) continue @@ -268,37 +270,6 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { //----------------------------------------------------------------------------- // Messages -func (memR *Reactor) decodeMsg(bz []byte) (TxsMessage, error) { - msg := protomem.Message{} - err := msg.Unmarshal(bz) - if err != nil { - return TxsMessage{}, err - } - - var message TxsMessage - - if i, ok := msg.Sum.(*protomem.Message_Txs); ok { - txs := i.Txs.GetTxs() - - if len(txs) == 0 { - return message, errors.New("empty TxsMessage") - } - - decoded := make([]types.Tx, len(txs)) - for j, tx := range txs { - decoded[j] = types.Tx(tx) - } - - message = TxsMessage{ - Txs: decoded, - } - return message, nil - } - return message, fmt.Errorf("msg type: %T is not supported", msg) -} - -//------------------------------------- - // TxsMessage is a Message containing transactions. type TxsMessage struct { Txs []types.Tx diff --git a/node/id.go b/node/id.go deleted file mode 100644 index ffa162f81..000000000 --- a/node/id.go +++ /dev/null @@ -1,35 +0,0 @@ -package node - -import ( - "time" - - "github.com/tendermint/tendermint/crypto" -) - -type ID struct { - Name string - PubKey crypto.PubKey -} - -type PrivNodeID struct { - ID - PrivKey crypto.PrivKey -} - -type Greeting struct { - ID - Version string - ChainID string - Message string - Time time.Time -} - -type SignedNodeGreeting struct { - Greeting - Signature []byte -} - -func (pnid *PrivNodeID) SignGreeting() *SignedNodeGreeting { - // greeting := NodeGreeting{} - return nil -} diff --git a/node/node.go b/node/node.go index 067ae39a2..252715223 100644 --- a/node/node.go +++ b/node/node.go @@ -1,49 +1,34 @@ package node import ( - "bytes" "context" - "errors" "fmt" "net" "net/http" - "strings" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/rs/cors" - dbm "github.com/tendermint/tm-db" - abci "github.com/tendermint/tendermint/abci/types" bc "github.com/tendermint/tendermint/blocksync" cfg "github.com/tendermint/tendermint/config" cs "github.com/tendermint/tendermint/consensus" - "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/evidence" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/light" mempl "github.com/tendermint/tendermint/mempool" - mempoolv0 "github.com/tendermint/tendermint/mempool/v0" - mempoolv1 "github.com/tendermint/tendermint/mempool/v1" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p/pex" - "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/proxy" rpccore "github.com/tendermint/tendermint/rpc/core" grpccore "github.com/tendermint/tendermint/rpc/grpc" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/indexer" - blockidxkv "github.com/tendermint/tendermint/state/indexer/block/kv" - blockidxnull "github.com/tendermint/tendermint/state/indexer/block/null" - "github.com/tendermint/tendermint/state/indexer/sink/psql" "github.com/tendermint/tendermint/state/txindex" - "github.com/tendermint/tendermint/state/txindex/kv" "github.com/tendermint/tendermint/state/txindex/null" "github.com/tendermint/tendermint/statesync" "github.com/tendermint/tendermint/store" @@ -51,94 +36,54 @@ import ( tmtime "github.com/tendermint/tendermint/types/time" "github.com/tendermint/tendermint/version" - _ "net/http/pprof" //nolint: gosec // securely exposed on separate, optional port - - _ "github.com/lib/pq" // provide the psql db driver + _ "net/http/pprof" //nolint: gosec ) -//------------------------------------------------------------------------------ +// Node is the highest level interface to a full Tendermint node. +// It includes all configuration information and running services. +type Node struct { + service.BaseService -// DBContext specifies config information for loading a new DB. -type DBContext struct { - ID string - Config *cfg.Config -} + // config + config *cfg.Config + genesisDoc *types.GenesisDoc // initial validator set + privValidator types.PrivValidator // local node's validator key -// DBProvider takes a DBContext and returns an instantiated DB. -type DBProvider func(*DBContext) (dbm.DB, error) + // network + transport *p2p.MultiplexTransport + sw *p2p.Switch // p2p connections + addrBook pex.AddrBook // known peers + nodeInfo p2p.NodeInfo + nodeKey *p2p.NodeKey // our node privkey + isListening bool -const readHeaderTimeout = 10 * time.Second - -// DefaultDBProvider returns a database using the DBBackend and DBDir -// specified in the ctx.Config. -func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) { - dbType := dbm.BackendType(ctx.Config.DBBackend) - return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()) -} - -// GenesisDocProvider returns a GenesisDoc. -// It allows the GenesisDoc to be pulled from sources other than the -// filesystem, for instance from a distributed key-value store cluster. -type GenesisDocProvider func() (*types.GenesisDoc, error) - -// DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads -// the GenesisDoc from the config.GenesisFile() on the filesystem. -func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider { - return func() (*types.GenesisDoc, error) { - return types.GenesisDocFromFile(config.GenesisFile()) - } -} - -// Provider takes a config and a logger and returns a ready to go Node. -type Provider func(*cfg.Config, log.Logger) (*Node, error) - -// DefaultNewNode returns a Tendermint node with default settings for the -// PrivValidator, ClientCreator, GenesisDoc, and DBProvider. -// It implements NodeProvider. -func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) { - nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) - if err != nil { - return nil, fmt.Errorf("failed to load or gen node key %s: %w", config.NodeKeyFile(), err) - } - - return NewNode(config, - privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()), - nodeKey, - proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), - DefaultGenesisDocProviderFunc(config), - DefaultDBProvider, - DefaultMetricsProvider(config.Instrumentation), - logger, - ) -} - -// MetricsProvider returns a consensus, p2p and mempool Metrics. -type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics, *proxy.Metrics) - -// DefaultMetricsProvider returns Metrics build using Prometheus client library -// if Prometheus is enabled. Otherwise, it returns no-op Metrics. -func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider { - return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics, *proxy.Metrics) { - if config.Prometheus { - return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID), - p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID), - mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID), - sm.PrometheusMetrics(config.Namespace, "chain_id", chainID), - proxy.PrometheusMetrics(config.Namespace, "chain_id", chainID) - } - return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics(), proxy.NopMetrics() - } + // services + eventBus *types.EventBus // pub/sub for services + stateStore sm.Store + blockStore *store.BlockStore // store the blockchain to disk + bcReactor p2p.Reactor // for block-syncing + mempoolReactor p2p.Reactor // for gossipping transactions + mempool mempl.Mempool + stateSync bool // whether the node should state sync on startup + stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots + stateSyncProvider statesync.StateProvider // provides state data for bootstrapping a node + stateSyncGenesis sm.State // provides the genesis state for state sync + consensusState *cs.State // latest consensus state + consensusReactor *cs.Reactor // for participating in the consensus + pexReactor *pex.Reactor // for exchanging peer addresses + evidencePool *evidence.Pool // tracking evidence + proxyApp proxy.AppConns // connection to the application + rpcListeners []net.Listener // rpc servers + txIndexer txindex.TxIndexer + blockIndexer indexer.BlockIndexer + indexerService *txindex.IndexerService + prometheusSrv *http.Server + pprofSrv *http.Server } // Option sets a parameter for the node. type Option func(*Node) -// Temporary interface for switching to block sync, we should get rid of v0 and v1 reactors. -// See: https://github.com/tendermint/tendermint/issues/4595 -type blockSyncReactor interface { - SwitchToBlockSync(sm.State) error -} - // CustomReactors allows you to add custom reactors (name -> p2p.Reactor) to // the node's Switch. // @@ -190,517 +135,6 @@ func StateProvider(stateProvider statesync.StateProvider) Option { //------------------------------------------------------------------------------ -// Node is the highest level interface to a full Tendermint node. -// It includes all configuration information and running services. -type Node struct { - service.BaseService - - // config - config *cfg.Config - genesisDoc *types.GenesisDoc // initial validator set - privValidator types.PrivValidator // local node's validator key - - // network - transport *p2p.MultiplexTransport - sw *p2p.Switch // p2p connections - addrBook pex.AddrBook // known peers - nodeInfo p2p.NodeInfo - nodeKey *p2p.NodeKey // our node privkey - isListening bool - - // services - eventBus *types.EventBus // pub/sub for services - stateStore sm.Store - blockStore *store.BlockStore // store the blockchain to disk - bcReactor p2p.Reactor // for block-syncing - mempoolReactor p2p.Reactor // for gossipping transactions - mempool mempl.Mempool - stateSync bool // whether the node should state sync on startup - stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots - stateSyncProvider statesync.StateProvider // provides state data for bootstrapping a node - stateSyncGenesis sm.State // provides the genesis state for state sync - consensusState *cs.State // latest consensus state - consensusReactor *cs.Reactor // for participating in the consensus - pexReactor *pex.Reactor // for exchanging peer addresses - evidencePool *evidence.Pool // tracking evidence - proxyApp proxy.AppConns // connection to the application - rpcListeners []net.Listener // rpc servers - txIndexer txindex.TxIndexer - blockIndexer indexer.BlockIndexer - indexerService *txindex.IndexerService - prometheusSrv *http.Server -} - -func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) { - var blockStoreDB dbm.DB - blockStoreDB, err = dbProvider(&DBContext{"blockstore", config}) - if err != nil { - return - } - blockStore = store.NewBlockStore(blockStoreDB) - - stateDB, err = dbProvider(&DBContext{"state", config}) - if err != nil { - return - } - - return -} - -func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger, metrics *proxy.Metrics) (proxy.AppConns, error) { - proxyApp := proxy.NewAppConns(clientCreator, metrics) - proxyApp.SetLogger(logger.With("module", "proxy")) - if err := proxyApp.Start(); err != nil { - return nil, fmt.Errorf("error starting proxy app connections: %v", err) - } - return proxyApp, nil -} - -func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) { - eventBus := types.NewEventBus() - eventBus.SetLogger(logger.With("module", "events")) - if err := eventBus.Start(); err != nil { - return nil, err - } - return eventBus, nil -} - -func createAndStartIndexerService( - config *cfg.Config, - chainID string, - dbProvider DBProvider, - eventBus *types.EventBus, - logger log.Logger, -) (*txindex.IndexerService, txindex.TxIndexer, indexer.BlockIndexer, error) { - var ( - txIndexer txindex.TxIndexer - blockIndexer indexer.BlockIndexer - ) - - switch config.TxIndex.Indexer { - case "kv": - store, err := dbProvider(&DBContext{"tx_index", config}) - if err != nil { - return nil, nil, nil, err - } - - txIndexer = kv.NewTxIndex(store) - blockIndexer = blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events"))) - - case "psql": - if config.TxIndex.PsqlConn == "" { - return nil, nil, nil, errors.New(`no psql-conn is set for the "psql" indexer`) - } - es, err := psql.NewEventSink(config.TxIndex.PsqlConn, chainID) - if err != nil { - return nil, nil, nil, fmt.Errorf("creating psql indexer: %w", err) - } - txIndexer = es.TxIndexer() - blockIndexer = es.BlockIndexer() - - default: - txIndexer = &null.TxIndex{} - blockIndexer = &blockidxnull.BlockerIndexer{} - } - - indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus, false) - indexerService.SetLogger(logger.With("module", "txindex")) - - if err := indexerService.Start(); err != nil { - return nil, nil, nil, err - } - - return indexerService, txIndexer, blockIndexer, nil -} - -func doHandshake( - stateStore sm.Store, - state sm.State, - blockStore sm.BlockStore, - genDoc *types.GenesisDoc, - eventBus types.BlockEventPublisher, - proxyApp proxy.AppConns, - consensusLogger log.Logger, -) error { - handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc) - handshaker.SetLogger(consensusLogger) - handshaker.SetEventBus(eventBus) - if err := handshaker.Handshake(proxyApp); err != nil { - return fmt.Errorf("error during handshake: %v", err) - } - return nil -} - -func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) { - // Log the version info. - logger.Info("Version info", - "tendermint_version", version.TMCoreSemVer, - "abci", version.ABCISemVer, - "block", version.BlockProtocol, - "p2p", version.P2PProtocol, - "commit_hash", version.TMGitCommitHash, - ) - - // If the state and software differ in block version, at least log it. - if state.Version.Consensus.Block != version.BlockProtocol { - logger.Info("Software and state have different block protocols", - "software", version.BlockProtocol, - "state", state.Version.Consensus.Block, - ) - } - - addr := pubKey.Address() - // Log whether this node is a validator or an observer - if state.Validators.HasAddress(addr) { - consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey) - } else { - consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey) - } -} - -func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool { - if state.Validators.Size() > 1 { - return false - } - addr, _ := state.Validators.GetByIndex(0) - return bytes.Equal(pubKey.Address(), addr) -} - -func createMempoolAndMempoolReactor( - config *cfg.Config, - proxyApp proxy.AppConns, - state sm.State, - memplMetrics *mempl.Metrics, - logger log.Logger, -) (mempl.Mempool, p2p.Reactor) { - switch config.Mempool.Version { - case cfg.MempoolV1: - mp := mempoolv1.NewTxMempool( - logger, - config.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, - mempoolv1.WithMetrics(memplMetrics), - mempoolv1.WithPreCheck(sm.TxPreCheck(state)), - mempoolv1.WithPostCheck(sm.TxPostCheck(state)), - ) - - reactor := mempoolv1.NewReactor( - config.Mempool, - mp, - ) - if config.Consensus.WaitForTxs() { - mp.EnableTxsAvailable() - } - - return mp, reactor - - case cfg.MempoolV0: - mp := mempoolv0.NewCListMempool( - config.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, - mempoolv0.WithMetrics(memplMetrics), - mempoolv0.WithPreCheck(sm.TxPreCheck(state)), - mempoolv0.WithPostCheck(sm.TxPostCheck(state)), - ) - - mp.SetLogger(logger) - - reactor := mempoolv0.NewReactor( - config.Mempool, - mp, - ) - if config.Consensus.WaitForTxs() { - mp.EnableTxsAvailable() - } - - return mp, reactor - - default: - return nil, nil - } -} - -func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider, - stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger, -) (*evidence.Reactor, *evidence.Pool, error) { - evidenceDB, err := dbProvider(&DBContext{"evidence", config}) - if err != nil { - return nil, nil, err - } - evidenceLogger := logger.With("module", "evidence") - evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB, sm.StoreOptions{ - DiscardFinalizeBlockResponses: config.Storage.DiscardFinalizeBlockResponses, - }), blockStore) - if err != nil { - return nil, nil, err - } - evidenceReactor := evidence.NewReactor(evidencePool) - evidenceReactor.SetLogger(evidenceLogger) - return evidenceReactor, evidencePool, nil -} - -func createBlocksyncReactor(config *cfg.Config, - state sm.State, - blockExec *sm.BlockExecutor, - blockStore *store.BlockStore, - blockSync bool, - logger log.Logger, -) (bcReactor p2p.Reactor, err error) { - switch config.BlockSync.Version { - case "v0": - bcReactor = bc.NewReactor(state.Copy(), blockExec, blockStore, blockSync) - case "v1", "v2": - return nil, fmt.Errorf("block sync version %s has been deprecated. Please use v0", config.BlockSync.Version) - default: - return nil, fmt.Errorf("unknown fastsync version %s", config.BlockSync.Version) - } - - bcReactor.SetLogger(logger.With("module", "blocksync")) - return bcReactor, nil -} - -func createConsensusReactor(config *cfg.Config, - state sm.State, - blockExec *sm.BlockExecutor, - blockStore sm.BlockStore, - mempool mempl.Mempool, - evidencePool *evidence.Pool, - privValidator types.PrivValidator, - csMetrics *cs.Metrics, - waitSync bool, - eventBus *types.EventBus, - consensusLogger log.Logger, -) (*cs.Reactor, *cs.State) { - consensusState := cs.NewState( - config.Consensus, - state.Copy(), - blockExec, - blockStore, - mempool, - evidencePool, - cs.StateMetrics(csMetrics), - ) - consensusState.SetLogger(consensusLogger) - if privValidator != nil { - consensusState.SetPrivValidator(privValidator) - } - consensusReactor := cs.NewReactor(consensusState, waitSync, cs.ReactorMetrics(csMetrics)) - consensusReactor.SetLogger(consensusLogger) - // services which will be publishing and/or subscribing for messages (events) - // consensusReactor will set it on consensusState and blockExecutor - consensusReactor.SetEventBus(eventBus) - return consensusReactor, consensusState -} - -func createTransport( - config *cfg.Config, - nodeInfo p2p.NodeInfo, - nodeKey *p2p.NodeKey, - proxyApp proxy.AppConns, -) ( - *p2p.MultiplexTransport, - []p2p.PeerFilterFunc, -) { - var ( - mConnConfig = p2p.MConnConfig(config.P2P) - transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig) - connFilters = []p2p.ConnFilterFunc{} - peerFilters = []p2p.PeerFilterFunc{} - ) - - if !config.P2P.AllowDuplicateIP { - connFilters = append(connFilters, p2p.ConnDuplicateIPFilter()) - } - - // Filter peers by addr or pubkey with an ABCI query. - // If the query return code is OK, add peer. - if config.FilterPeers { - connFilters = append( - connFilters, - // ABCI query for address filtering. - func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error { - res, err := proxyApp.Query().Query(context.TODO(), &abci.RequestQuery{ - Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()), - }) - if err != nil { - return err - } - if res.IsErr() { - return fmt.Errorf("error querying abci app: %v", res) - } - - return nil - }, - ) - - peerFilters = append( - peerFilters, - // ABCI query for ID filtering. - func(_ p2p.IPeerSet, p p2p.Peer) error { - res, err := proxyApp.Query().Query(context.TODO(), &abci.RequestQuery{ - Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()), - }) - if err != nil { - return err - } - if res.IsErr() { - return fmt.Errorf("error querying abci app: %v", res) - } - - return nil - }, - ) - } - - p2p.MultiplexTransportConnFilters(connFilters...)(transport) - - // Limit the number of incoming connections. - max := config.P2P.MaxNumInboundPeers + len(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) - p2p.MultiplexTransportMaxIncomingConnections(max)(transport) - - return transport, peerFilters -} - -func createSwitch(config *cfg.Config, - transport p2p.Transport, - p2pMetrics *p2p.Metrics, - peerFilters []p2p.PeerFilterFunc, - mempoolReactor p2p.Reactor, - bcReactor p2p.Reactor, - stateSyncReactor *statesync.Reactor, - consensusReactor *cs.Reactor, - evidenceReactor *evidence.Reactor, - nodeInfo p2p.NodeInfo, - nodeKey *p2p.NodeKey, - p2pLogger log.Logger, -) *p2p.Switch { - sw := p2p.NewSwitch( - config.P2P, - transport, - p2p.WithMetrics(p2pMetrics), - p2p.SwitchPeerFilters(peerFilters...), - ) - sw.SetLogger(p2pLogger) - sw.AddReactor("MEMPOOL", mempoolReactor) - sw.AddReactor("BLOCKSYNC", bcReactor) - sw.AddReactor("CONSENSUS", consensusReactor) - sw.AddReactor("EVIDENCE", evidenceReactor) - sw.AddReactor("STATESYNC", stateSyncReactor) - - sw.SetNodeInfo(nodeInfo) - sw.SetNodeKey(nodeKey) - - p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile()) - return sw -} - -func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch, - p2pLogger log.Logger, nodeKey *p2p.NodeKey, -) (pex.AddrBook, error) { - addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) - addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) - - // Add ourselves to addrbook to prevent dialing ourselves - if config.P2P.ExternalAddress != "" { - addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ExternalAddress)) - if err != nil { - return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err) - } - addrBook.AddOurAddress(addr) - } - if config.P2P.ListenAddress != "" { - addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ListenAddress)) - if err != nil { - return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err) - } - addrBook.AddOurAddress(addr) - } - - sw.SetAddrBook(addrBook) - - return addrBook, nil -} - -func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, - sw *p2p.Switch, logger log.Logger, -) *pex.Reactor { - // TODO persistent peers ? so we can have their DNS addrs saved - pexReactor := pex.NewReactor(addrBook, - &pex.ReactorConfig{ - Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "), - SeedMode: config.P2P.SeedMode, - // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000 - // blocks assuming 10s blocks ~ 28 hours. - // TODO (melekes): make it dynamic based on the actual block latencies - // from the live network. - // https://github.com/tendermint/tendermint/issues/3523 - SeedDisconnectWaitPeriod: 28 * time.Hour, - PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod, - }) - pexReactor.SetLogger(logger.With("module", "pex")) - sw.AddReactor("PEX", pexReactor) - return pexReactor -} - -// startStateSync starts an asynchronous state sync process, then switches to block sync mode. -func startStateSync(ssR *statesync.Reactor, bcR blockSyncReactor, conR *cs.Reactor, - stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, blockSync bool, - stateStore sm.Store, blockStore *store.BlockStore, state sm.State, -) error { - ssR.Logger.Info("Starting state sync") - - if stateProvider == nil { - var err error - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - stateProvider, err = statesync.NewLightClientStateProvider( - ctx, - state.ChainID, state.Version, state.InitialHeight, - config.RPCServers, light.TrustOptions{ - Period: config.TrustPeriod, - Height: config.TrustHeight, - Hash: config.TrustHashBytes(), - }, ssR.Logger.With("module", "light")) - if err != nil { - return fmt.Errorf("failed to set up light client state provider: %w", err) - } - } - - go func() { - state, commit, err := ssR.Sync(stateProvider, config.DiscoveryTime) - if err != nil { - ssR.Logger.Error("State sync failed", "err", err) - return - } - err = stateStore.Bootstrap(state) - if err != nil { - ssR.Logger.Error("Failed to bootstrap node with new state", "err", err) - return - } - err = blockStore.SaveSeenCommit(state.LastBlockHeight, commit) - if err != nil { - ssR.Logger.Error("Failed to store last seen commit", "err", err) - return - } - - if blockSync { - // FIXME Very ugly to have these metrics bleed through here. - conR.Metrics.StateSyncing.Set(0) - conR.Metrics.BlockSyncing.Set(1) - err = bcR.SwitchToBlockSync(state) - if err != nil { - ssR.Logger.Error("Failed to switch to block sync", "err", err) - return - } - } else { - conR.SwitchToConsensus(state, true) - } - }() - return nil -} - // NewNode returns a new, ready to go, Tendermint Node. func NewNode(config *cfg.Config, privValidator types.PrivValidator, @@ -726,7 +160,7 @@ func NewNode(config *cfg.Config, return nil, err } - csMetrics, p2pMetrics, memplMetrics, smMetrics, abciMetrics := metricsProvider(genDoc.ChainID) + csMetrics, p2pMetrics, memplMetrics, smMetrics, abciMetrics, bsMetrics, ssMetrics := metricsProvider(genDoc.ChainID) // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). proxyApp, err := createAndStartProxyAppConns(clientCreator, logger, abciMetrics) @@ -798,7 +232,7 @@ func NewNode(config *cfg.Config, mempool, mempoolReactor := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger) // Make Evidence Reactor - evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, blockStore, logger) + evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateStore, blockStore, logger) if err != nil { return nil, err } @@ -815,18 +249,12 @@ func NewNode(config *cfg.Config, ) // Make BlocksyncReactor. Don't start block sync if we're doing a state sync first. - bcReactor, err := createBlocksyncReactor(config, state, blockExec, blockStore, blockSync && !stateSync, logger) + bcReactor, err := createBlocksyncReactor(config, state, blockExec, blockStore, blockSync && !stateSync, logger, bsMetrics) if err != nil { return nil, fmt.Errorf("could not create blocksync reactor: %w", err) } - // Make ConsensusReactor. Don't enable fully if doing a state sync and/or block sync first. - // FIXME We need to update metrics here, since other reactors don't have access to them. - if stateSync { - csMetrics.StateSyncing.Set(1) - } else if blockSync { - csMetrics.BlockSyncing.Set(1) - } + // Make ConsensusReactor consensusReactor, consensusState := createConsensusReactor( config, state, blockExec, blockStore, mempool, evidencePool, privValidator, csMetrics, stateSync || blockSync, eventBus, consensusLogger, @@ -841,6 +269,7 @@ func NewNode(config *cfg.Config, proxyApp.Snapshot(), proxyApp.Query(), config.StateSync.TempDir, + ssMetrics, ) stateSyncReactor.SetLogger(logger.With("module", "statesync")) @@ -874,6 +303,17 @@ func NewNode(config *cfg.Config, return nil, fmt.Errorf("could not create addrbook: %w", err) } + for _, addr := range splitAndTrimEmpty(config.P2P.BootstrapPeers, ",", " ") { + netAddrs, err := p2p.NewNetAddressString(addr) + if err != nil { + return nil, fmt.Errorf("invalid bootstrap peer address: %w", err) + } + err = addrBook.AddAddress(netAddrs, netAddrs) + if err != nil { + return nil, fmt.Errorf("adding bootstrap address to addressbook: %w", err) + } + } + // Optionally, start the pex reactor // // TODO: @@ -891,12 +331,8 @@ func NewNode(config *cfg.Config, pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger) } - if config.RPC.PprofListenAddress != "" { - go func() { - logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress) - logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil)) - }() - } + // Add private IDs to addrbook to block those peers being added + addrBook.AddPrivateIDs(splitAndTrimEmpty(config.P2P.PrivatePeerIDs, ",", " ")) node := &Node{ config: config, @@ -945,8 +381,15 @@ func (n *Node) OnStart() error { time.Sleep(genTime.Sub(now)) } - // Add private IDs to addrbook to block those peers being added - n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " ")) + // run pprof server if it is enabled + if n.config.RPC.IsPprofEnabled() { + n.pprofSrv = n.startPprofServer() + } + + // begin prometheus metrics gathering if it is enabled + if n.config.Instrumentation.IsPrometheusEnabled() { + n.prometheusSrv = n.startPrometheusServer() + } // Start the RPC server before the P2P server // so we can eg. receive txs for the first block @@ -958,11 +401,6 @@ func (n *Node) OnStart() error { n.rpcListeners = listeners } - if n.config.Instrumentation.Prometheus && - n.config.Instrumentation.PrometheusListenAddr != "" { - n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr) - } - // Start the transport. addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress)) if err != nil { @@ -1047,6 +485,11 @@ func (n *Node) OnStop() { n.Logger.Error("Prometheus HTTP server Shutdown", "err", err) } } + if n.pprofSrv != nil { + if err := n.pprofSrv.Shutdown(context.Background()); err != nil { + n.Logger.Error("Pprof HTTP server Shutdown", "err", err) + } + } if n.blockStore != nil { if err := n.blockStore.Close(); err != nil { n.Logger.Error("problem closing blockstore", "err", err) @@ -1215,9 +658,9 @@ func (n *Node) startRPC() ([]net.Listener, error) { // startPrometheusServer starts a Prometheus HTTP server, listening for metrics // collectors on addr. -func (n *Node) startPrometheusServer(addr string) *http.Server { +func (n *Node) startPrometheusServer() *http.Server { srv := &http.Server{ - Addr: addr, + Addr: n.config.Instrumentation.PrometheusListenAddr, Handler: promhttp.InstrumentMetricHandler( prometheus.DefaultRegisterer, promhttp.HandlerFor( prometheus.DefaultGatherer, @@ -1235,6 +678,22 @@ func (n *Node) startPrometheusServer(addr string) *http.Server { return srv } +// starts a ppro +func (n *Node) startPprofServer() *http.Server { + srv := &http.Server{ + Addr: n.config.RPC.PprofListenAddress, + Handler: nil, + ReadHeaderTimeout: readHeaderTimeout, + } + go func() { + if err := srv.ListenAndServe(); err != http.ErrServerClosed { + // Error starting or closing listener: + n.Logger.Error("pprof HTTP server ListenAndServe", "err", err) + } + }() + return srv +} + // Switch returns the Node's Switch. func (n *Node) Switch() *p2p.Switch { return n.sw @@ -1368,123 +827,3 @@ func makeNodeInfo( err := nodeInfo.Validate() return nodeInfo, err } - -//------------------------------------------------------------------------------ - -var genesisDocKey = []byte("genesisDoc") - -// LoadStateFromDBOrGenesisDocProvider attempts to load the state from the -// database, or creates one using the given genesisDocProvider. On success this also -// returns the genesis doc loaded through the given provider. -func LoadStateFromDBOrGenesisDocProvider( - stateDB dbm.DB, - genesisDocProvider GenesisDocProvider, -) (sm.State, *types.GenesisDoc, error) { - // Get genesis doc - genDoc, err := loadGenesisDoc(stateDB) - if err != nil { - genDoc, err = genesisDocProvider() - if err != nil { - return sm.State{}, nil, err - } - - err = genDoc.ValidateAndComplete() - if err != nil { - return sm.State{}, nil, fmt.Errorf("error in genesis doc: %w", err) - } - // save genesis doc to prevent a certain class of user errors (e.g. when it - // was changed, accidentally or not). Also good for audit trail. - if err := saveGenesisDoc(stateDB, genDoc); err != nil { - return sm.State{}, nil, err - } - } - stateStore := sm.NewStore(stateDB, sm.StoreOptions{ - DiscardFinalizeBlockResponses: false, - }) - state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) - if err != nil { - return sm.State{}, nil, err - } - return state, genDoc, nil -} - -// panics if failed to unmarshal bytes -func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) { - b, err := db.Get(genesisDocKey) - if err != nil { - panic(err) - } - if len(b) == 0 { - return nil, errors.New("genesis doc not found") - } - var genDoc *types.GenesisDoc - err = tmjson.Unmarshal(b, &genDoc) - if err != nil { - panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b)) - } - return genDoc, nil -} - -// panics if failed to marshal the given genesis document -func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) error { - b, err := tmjson.Marshal(genDoc) - if err != nil { - return fmt.Errorf("failed to save genesis doc due to marshaling error: %w", err) - } - if err := db.SetSync(genesisDocKey, b); err != nil { - return err - } - - return nil -} - -func createAndStartPrivValidatorSocketClient( - listenAddr, - chainID string, - logger log.Logger, -) (types.PrivValidator, error) { - pve, err := privval.NewSignerListener(listenAddr, logger) - if err != nil { - return nil, fmt.Errorf("failed to start private validator: %w", err) - } - - pvsc, err := privval.NewSignerClient(pve, chainID) - if err != nil { - return nil, fmt.Errorf("failed to start private validator: %w", err) - } - - // try to get a pubkey from private validate first time - _, err = pvsc.GetPubKey() - if err != nil { - return nil, fmt.Errorf("can't get pubkey: %w", err) - } - - const ( - retries = 50 // 50 * 100ms = 5s total - timeout = 100 * time.Millisecond - ) - pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout) - - return pvscWithRetries, nil -} - -// splitAndTrimEmpty slices s into all subslices separated by sep and returns a -// slice of the string s with all leading and trailing Unicode code points -// contained in cutset removed. If sep is empty, SplitAndTrim splits after each -// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of -// -1. also filter out empty strings, only return non-empty strings. -func splitAndTrimEmpty(s, sep, cutset string) []string { - if s == "" { - return []string{} - } - - spl := strings.Split(s, sep) - nonEmptyStrings := make([]string, 0, len(spl)) - for i := 0; i < len(spl); i++ { - element := strings.Trim(spl[i], cutset) - if element != "" { - nonEmptyStrings = append(nonEmptyStrings, element) - } - } - return nonEmptyStrings -} diff --git a/node/node_test.go b/node/node_test.go index bb0508929..9f389a9fd 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net" + "net/http" "os" "syscall" "testing" @@ -135,6 +136,29 @@ func TestNodeSetAppVersion(t *testing.T) { assert.Equal(t, n.nodeInfo.(p2p.DefaultNodeInfo).ProtocolVersion.App, appVersion) } +func TestPprofServer(t *testing.T) { + config := test.ResetTestRoot("node_pprof_test") + defer os.RemoveAll(config.RootDir) + config.RPC.PprofListenAddress = testFreeAddr(t) + + // should not work yet + _, err := http.Get("http://" + config.RPC.PprofListenAddress) //nolint: bodyclose + assert.Error(t, err) + + n, err := DefaultNewNode(config, log.TestingLogger()) + assert.NoError(t, err) + assert.NoError(t, n.Start()) + defer func() { + require.NoError(t, n.Stop()) + }() + assert.NotNil(t, n.pprofSrv) + + resp, err := http.Get("http://" + config.RPC.PprofListenAddress + "/debug/pprof") + assert.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, 200, resp.StatusCode) +} + func TestNodeSetPrivValTCP(t *testing.T) { addr := "tcp://" + testFreeAddr(t) diff --git a/node/setup.go b/node/setup.go new file mode 100644 index 000000000..dc5498ba1 --- /dev/null +++ b/node/setup.go @@ -0,0 +1,711 @@ +package node + +import ( + "bytes" + "context" + "errors" + "fmt" + "net" + "strings" + "time" + + dbm "github.com/tendermint/tm-db" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/blocksync" + cfg "github.com/tendermint/tendermint/config" + cs "github.com/tendermint/tendermint/consensus" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/evidence" + + tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/light" + mempl "github.com/tendermint/tendermint/mempool" + mempoolv0 "github.com/tendermint/tendermint/mempool/v0" + mempoolv1 "github.com/tendermint/tendermint/mempool/v1" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/p2p/pex" + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/state/indexer" + blockidxkv "github.com/tendermint/tendermint/state/indexer/block/kv" + blockidxnull "github.com/tendermint/tendermint/state/indexer/block/null" + "github.com/tendermint/tendermint/state/indexer/sink/psql" + "github.com/tendermint/tendermint/state/txindex" + "github.com/tendermint/tendermint/state/txindex/kv" + "github.com/tendermint/tendermint/state/txindex/null" + "github.com/tendermint/tendermint/statesync" + "github.com/tendermint/tendermint/store" + "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/version" + + _ "github.com/lib/pq" // provide the psql db driver +) + +// DBContext specifies config information for loading a new DB. +type DBContext struct { + ID string + Config *cfg.Config +} + +// DBProvider takes a DBContext and returns an instantiated DB. +type DBProvider func(*DBContext) (dbm.DB, error) + +const readHeaderTimeout = 10 * time.Second + +// DefaultDBProvider returns a database using the DBBackend and DBDir +// specified in the ctx.Config. +func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) { + dbType := dbm.BackendType(ctx.Config.DBBackend) + return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()) +} + +// GenesisDocProvider returns a GenesisDoc. +// It allows the GenesisDoc to be pulled from sources other than the +// filesystem, for instance from a distributed key-value store cluster. +type GenesisDocProvider func() (*types.GenesisDoc, error) + +// DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads +// the GenesisDoc from the config.GenesisFile() on the filesystem. +func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider { + return func() (*types.GenesisDoc, error) { + return types.GenesisDocFromFile(config.GenesisFile()) + } +} + +// Provider takes a config and a logger and returns a ready to go Node. +type Provider func(*cfg.Config, log.Logger) (*Node, error) + +// DefaultNewNode returns a Tendermint node with default settings for the +// PrivValidator, ClientCreator, GenesisDoc, and DBProvider. +// It implements NodeProvider. +func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) { + nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) + if err != nil { + return nil, fmt.Errorf("failed to load or gen node key %s: %w", config.NodeKeyFile(), err) + } + + return NewNode(config, + privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()), + nodeKey, + proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), + DefaultGenesisDocProviderFunc(config), + DefaultDBProvider, + DefaultMetricsProvider(config.Instrumentation), + logger, + ) +} + +// MetricsProvider returns a consensus, p2p and mempool Metrics. +type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics, *proxy.Metrics, *blocksync.Metrics, *statesync.Metrics) + +// DefaultMetricsProvider returns Metrics build using Prometheus client library +// if Prometheus is enabled. Otherwise, it returns no-op Metrics. +func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider { + return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics, *proxy.Metrics, *blocksync.Metrics, *statesync.Metrics) { + if config.Prometheus { + return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID), + p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID), + mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID), + sm.PrometheusMetrics(config.Namespace, "chain_id", chainID), + proxy.PrometheusMetrics(config.Namespace, "chain_id", chainID), + blocksync.PrometheusMetrics(config.Namespace, "chain_id", chainID), + statesync.PrometheusMetrics(config.Namespace, "chain_id", chainID) + } + return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics(), proxy.NopMetrics(), blocksync.NopMetrics(), statesync.NopMetrics() + } +} + +type blockSyncReactor interface { + SwitchToBlockSync(sm.State) error +} + +//------------------------------------------------------------------------------ + +func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) { + var blockStoreDB dbm.DB + blockStoreDB, err = dbProvider(&DBContext{"blockstore", config}) + if err != nil { + return + } + blockStore = store.NewBlockStore(blockStoreDB) + + stateDB, err = dbProvider(&DBContext{"state", config}) + if err != nil { + return + } + + return +} + +func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger, metrics *proxy.Metrics) (proxy.AppConns, error) { + proxyApp := proxy.NewAppConns(clientCreator, metrics) + proxyApp.SetLogger(logger.With("module", "proxy")) + if err := proxyApp.Start(); err != nil { + return nil, fmt.Errorf("error starting proxy app connections: %v", err) + } + return proxyApp, nil +} + +func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) { + eventBus := types.NewEventBus() + eventBus.SetLogger(logger.With("module", "events")) + if err := eventBus.Start(); err != nil { + return nil, err + } + return eventBus, nil +} + +func createAndStartIndexerService( + config *cfg.Config, + chainID string, + dbProvider DBProvider, + eventBus *types.EventBus, + logger log.Logger, +) (*txindex.IndexerService, txindex.TxIndexer, indexer.BlockIndexer, error) { + var ( + txIndexer txindex.TxIndexer + blockIndexer indexer.BlockIndexer + ) + + switch config.TxIndex.Indexer { + case "kv": + store, err := dbProvider(&DBContext{"tx_index", config}) + if err != nil { + return nil, nil, nil, err + } + + txIndexer = kv.NewTxIndex(store) + blockIndexer = blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events"))) + + case "psql": + if config.TxIndex.PsqlConn == "" { + return nil, nil, nil, errors.New(`no psql-conn is set for the "psql" indexer`) + } + es, err := psql.NewEventSink(config.TxIndex.PsqlConn, chainID) + if err != nil { + return nil, nil, nil, fmt.Errorf("creating psql indexer: %w", err) + } + txIndexer = es.TxIndexer() + blockIndexer = es.BlockIndexer() + + default: + txIndexer = &null.TxIndex{} + blockIndexer = &blockidxnull.BlockerIndexer{} + } + + indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus, false) + indexerService.SetLogger(logger.With("module", "txindex")) + + if err := indexerService.Start(); err != nil { + return nil, nil, nil, err + } + + return indexerService, txIndexer, blockIndexer, nil +} + +func doHandshake( + stateStore sm.Store, + state sm.State, + blockStore sm.BlockStore, + genDoc *types.GenesisDoc, + eventBus types.BlockEventPublisher, + proxyApp proxy.AppConns, + consensusLogger log.Logger, +) error { + handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc) + handshaker.SetLogger(consensusLogger) + handshaker.SetEventBus(eventBus) + if err := handshaker.Handshake(proxyApp); err != nil { + return fmt.Errorf("error during handshake: %v", err) + } + return nil +} + +func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) { + // Log the version info. + logger.Info("Version info", + "tendermint_version", version.TMCoreSemVer, + "abci", version.ABCISemVer, + "block", version.BlockProtocol, + "p2p", version.P2PProtocol, + "commit_hash", version.TMGitCommitHash, + ) + + // If the state and software differ in block version, at least log it. + if state.Version.Consensus.Block != version.BlockProtocol { + logger.Info("Software and state have different block protocols", + "software", version.BlockProtocol, + "state", state.Version.Consensus.Block, + ) + } + + addr := pubKey.Address() + // Log whether this node is a validator or an observer + if state.Validators.HasAddress(addr) { + consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey) + } else { + consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey) + } +} + +func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool { + if state.Validators.Size() > 1 { + return false + } + addr, _ := state.Validators.GetByIndex(0) + return bytes.Equal(pubKey.Address(), addr) +} + +func createMempoolAndMempoolReactor( + config *cfg.Config, + proxyApp proxy.AppConns, + state sm.State, + memplMetrics *mempl.Metrics, + logger log.Logger, +) (mempl.Mempool, p2p.Reactor) { + switch config.Mempool.Version { + case cfg.MempoolV1: + mp := mempoolv1.NewTxMempool( + logger, + config.Mempool, + proxyApp.Mempool(), + state.LastBlockHeight, + mempoolv1.WithMetrics(memplMetrics), + mempoolv1.WithPreCheck(sm.TxPreCheck(state)), + mempoolv1.WithPostCheck(sm.TxPostCheck(state)), + ) + + reactor := mempoolv1.NewReactor( + config.Mempool, + mp, + ) + if config.Consensus.WaitForTxs() { + mp.EnableTxsAvailable() + } + + return mp, reactor + + case cfg.MempoolV0: + mp := mempoolv0.NewCListMempool( + config.Mempool, + proxyApp.Mempool(), + state.LastBlockHeight, + mempoolv0.WithMetrics(memplMetrics), + mempoolv0.WithPreCheck(sm.TxPreCheck(state)), + mempoolv0.WithPostCheck(sm.TxPostCheck(state)), + ) + + mp.SetLogger(logger) + + reactor := mempoolv0.NewReactor( + config.Mempool, + mp, + ) + if config.Consensus.WaitForTxs() { + mp.EnableTxsAvailable() + } + + return mp, reactor + + default: + return nil, nil + } +} + +func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider, + stateStore sm.Store, blockStore *store.BlockStore, logger log.Logger, +) (*evidence.Reactor, *evidence.Pool, error) { + evidenceDB, err := dbProvider(&DBContext{"evidence", config}) + if err != nil { + return nil, nil, err + } + evidenceLogger := logger.With("module", "evidence") + evidencePool, err := evidence.NewPool(evidenceDB, stateStore, blockStore) + if err != nil { + return nil, nil, err + } + evidenceReactor := evidence.NewReactor(evidencePool) + evidenceReactor.SetLogger(evidenceLogger) + return evidenceReactor, evidencePool, nil +} + +func createBlocksyncReactor(config *cfg.Config, + state sm.State, + blockExec *sm.BlockExecutor, + blockStore *store.BlockStore, + blockSync bool, + logger log.Logger, + metrics *blocksync.Metrics, +) (bcReactor p2p.Reactor, err error) { + switch config.BlockSync.Version { + case "v0": + bcReactor = blocksync.NewReactor(state.Copy(), blockExec, blockStore, blockSync, metrics) + case "v1", "v2": + return nil, fmt.Errorf("block sync version %s has been deprecated. Please use v0", config.BlockSync.Version) + default: + return nil, fmt.Errorf("unknown fastsync version %s", config.BlockSync.Version) + } + + bcReactor.SetLogger(logger.With("module", "blocksync")) + return bcReactor, nil +} + +func createConsensusReactor(config *cfg.Config, + state sm.State, + blockExec *sm.BlockExecutor, + blockStore sm.BlockStore, + mempool mempl.Mempool, + evidencePool *evidence.Pool, + privValidator types.PrivValidator, + csMetrics *cs.Metrics, + waitSync bool, + eventBus *types.EventBus, + consensusLogger log.Logger, +) (*cs.Reactor, *cs.State) { + consensusState := cs.NewState( + config.Consensus, + state.Copy(), + blockExec, + blockStore, + mempool, + evidencePool, + cs.StateMetrics(csMetrics), + ) + consensusState.SetLogger(consensusLogger) + if privValidator != nil { + consensusState.SetPrivValidator(privValidator) + } + consensusReactor := cs.NewReactor(consensusState, waitSync, cs.ReactorMetrics(csMetrics)) + consensusReactor.SetLogger(consensusLogger) + // services which will be publishing and/or subscribing for messages (events) + // consensusReactor will set it on consensusState and blockExecutor + consensusReactor.SetEventBus(eventBus) + return consensusReactor, consensusState +} + +func createTransport( + config *cfg.Config, + nodeInfo p2p.NodeInfo, + nodeKey *p2p.NodeKey, + proxyApp proxy.AppConns, +) ( + *p2p.MultiplexTransport, + []p2p.PeerFilterFunc, +) { + var ( + mConnConfig = p2p.MConnConfig(config.P2P) + transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig) + connFilters = []p2p.ConnFilterFunc{} + peerFilters = []p2p.PeerFilterFunc{} + ) + + if !config.P2P.AllowDuplicateIP { + connFilters = append(connFilters, p2p.ConnDuplicateIPFilter()) + } + + // Filter peers by addr or pubkey with an ABCI query. + // If the query return code is OK, add peer. + if config.FilterPeers { + connFilters = append( + connFilters, + // ABCI query for address filtering. + func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error { + res, err := proxyApp.Query().Query(context.TODO(), &abci.RequestQuery{ + Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()), + }) + if err != nil { + return err + } + if res.IsErr() { + return fmt.Errorf("error querying abci app: %v", res) + } + + return nil + }, + ) + + peerFilters = append( + peerFilters, + // ABCI query for ID filtering. + func(_ p2p.IPeerSet, p p2p.Peer) error { + res, err := proxyApp.Query().Query(context.TODO(), &abci.RequestQuery{ + Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()), + }) + if err != nil { + return err + } + if res.IsErr() { + return fmt.Errorf("error querying abci app: %v", res) + } + + return nil + }, + ) + } + + p2p.MultiplexTransportConnFilters(connFilters...)(transport) + + // Limit the number of incoming connections. + max := config.P2P.MaxNumInboundPeers + len(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) + p2p.MultiplexTransportMaxIncomingConnections(max)(transport) + + return transport, peerFilters +} + +func createSwitch(config *cfg.Config, + transport p2p.Transport, + p2pMetrics *p2p.Metrics, + peerFilters []p2p.PeerFilterFunc, + mempoolReactor p2p.Reactor, + bcReactor p2p.Reactor, + stateSyncReactor *statesync.Reactor, + consensusReactor *cs.Reactor, + evidenceReactor *evidence.Reactor, + nodeInfo p2p.NodeInfo, + nodeKey *p2p.NodeKey, + p2pLogger log.Logger, +) *p2p.Switch { + sw := p2p.NewSwitch( + config.P2P, + transport, + p2p.WithMetrics(p2pMetrics), + p2p.SwitchPeerFilters(peerFilters...), + ) + sw.SetLogger(p2pLogger) + sw.AddReactor("MEMPOOL", mempoolReactor) + sw.AddReactor("BLOCKSYNC", bcReactor) + sw.AddReactor("CONSENSUS", consensusReactor) + sw.AddReactor("EVIDENCE", evidenceReactor) + sw.AddReactor("STATESYNC", stateSyncReactor) + + sw.SetNodeInfo(nodeInfo) + sw.SetNodeKey(nodeKey) + + p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile()) + return sw +} + +func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch, + p2pLogger log.Logger, nodeKey *p2p.NodeKey, +) (pex.AddrBook, error) { + addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) + addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) + + // Add ourselves to addrbook to prevent dialing ourselves + if config.P2P.ExternalAddress != "" { + addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ExternalAddress)) + if err != nil { + return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err) + } + addrBook.AddOurAddress(addr) + } + if config.P2P.ListenAddress != "" { + addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ListenAddress)) + if err != nil { + return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err) + } + addrBook.AddOurAddress(addr) + } + + sw.SetAddrBook(addrBook) + + return addrBook, nil +} + +func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, + sw *p2p.Switch, logger log.Logger, +) *pex.Reactor { + // TODO persistent peers ? so we can have their DNS addrs saved + pexReactor := pex.NewReactor(addrBook, + &pex.ReactorConfig{ + Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "), + SeedMode: config.P2P.SeedMode, + // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000 + // blocks assuming 10s blocks ~ 28 hours. + // TODO (melekes): make it dynamic based on the actual block latencies + // from the live network. + // https://github.com/tendermint/tendermint/issues/3523 + SeedDisconnectWaitPeriod: 28 * time.Hour, + PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod, + }) + pexReactor.SetLogger(logger.With("module", "pex")) + sw.AddReactor("PEX", pexReactor) + return pexReactor +} + +// startStateSync starts an asynchronous state sync process, then switches to block sync mode. +func startStateSync(ssR *statesync.Reactor, bcR blockSyncReactor, conR *cs.Reactor, + stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, blockSync bool, + stateStore sm.Store, blockStore *store.BlockStore, state sm.State, +) error { + ssR.Logger.Info("Starting state sync") + + if stateProvider == nil { + var err error + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + stateProvider, err = statesync.NewLightClientStateProvider( + ctx, + state.ChainID, state.Version, state.InitialHeight, + config.RPCServers, light.TrustOptions{ + Period: config.TrustPeriod, + Height: config.TrustHeight, + Hash: config.TrustHashBytes(), + }, ssR.Logger.With("module", "light")) + if err != nil { + return fmt.Errorf("failed to set up light client state provider: %w", err) + } + } + + go func() { + state, commit, err := ssR.Sync(stateProvider, config.DiscoveryTime) + if err != nil { + ssR.Logger.Error("State sync failed", "err", err) + return + } + err = stateStore.Bootstrap(state) + if err != nil { + ssR.Logger.Error("Failed to bootstrap node with new state", "err", err) + return + } + err = blockStore.SaveSeenCommit(state.LastBlockHeight, commit) + if err != nil { + ssR.Logger.Error("Failed to store last seen commit", "err", err) + return + } + + if blockSync { + err = bcR.SwitchToBlockSync(state) + if err != nil { + ssR.Logger.Error("Failed to switch to block sync", "err", err) + return + } + } else { + conR.SwitchToConsensus(state, true) + } + }() + return nil +} + +//------------------------------------------------------------------------------ + +var genesisDocKey = []byte("genesisDoc") + +// LoadStateFromDBOrGenesisDocProvider attempts to load the state from the +// database, or creates one using the given genesisDocProvider. On success this also +// returns the genesis doc loaded through the given provider. +func LoadStateFromDBOrGenesisDocProvider( + stateDB dbm.DB, + genesisDocProvider GenesisDocProvider, +) (sm.State, *types.GenesisDoc, error) { + // Get genesis doc + genDoc, err := loadGenesisDoc(stateDB) + if err != nil { + genDoc, err = genesisDocProvider() + if err != nil { + return sm.State{}, nil, err + } + + err = genDoc.ValidateAndComplete() + if err != nil { + return sm.State{}, nil, fmt.Errorf("error in genesis doc: %w", err) + } + // save genesis doc to prevent a certain class of user errors (e.g. when it + // was changed, accidentally or not). Also good for audit trail. + if err := saveGenesisDoc(stateDB, genDoc); err != nil { + return sm.State{}, nil, err + } + } + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardFinalizeBlockResponses: false, + }) + state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) + if err != nil { + return sm.State{}, nil, err + } + return state, genDoc, nil +} + +// panics if failed to unmarshal bytes +func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) { + b, err := db.Get(genesisDocKey) + if err != nil { + panic(err) + } + if len(b) == 0 { + return nil, errors.New("genesis doc not found") + } + var genDoc *types.GenesisDoc + err = tmjson.Unmarshal(b, &genDoc) + if err != nil { + panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b)) + } + return genDoc, nil +} + +// panics if failed to marshal the given genesis document +func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) error { + b, err := tmjson.Marshal(genDoc) + if err != nil { + return fmt.Errorf("failed to save genesis doc due to marshaling error: %w", err) + } + if err := db.SetSync(genesisDocKey, b); err != nil { + return err + } + + return nil +} + +func createAndStartPrivValidatorSocketClient( + listenAddr, + chainID string, + logger log.Logger, +) (types.PrivValidator, error) { + pve, err := privval.NewSignerListener(listenAddr, logger) + if err != nil { + return nil, fmt.Errorf("failed to start private validator: %w", err) + } + + pvsc, err := privval.NewSignerClient(pve, chainID) + if err != nil { + return nil, fmt.Errorf("failed to start private validator: %w", err) + } + + // try to get a pubkey from private validate first time + _, err = pvsc.GetPubKey() + if err != nil { + return nil, fmt.Errorf("can't get pubkey: %w", err) + } + + const ( + retries = 50 // 50 * 100ms = 5s total + timeout = 100 * time.Millisecond + ) + pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout) + + return pvscWithRetries, nil +} + +// splitAndTrimEmpty slices s into all subslices separated by sep and returns a +// slice of the string s with all leading and trailing Unicode code points +// contained in cutset removed. If sep is empty, SplitAndTrim splits after each +// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of +// -1. also filter out empty strings, only return non-empty strings. +func splitAndTrimEmpty(s, sep, cutset string) []string { + if s == "" { + return []string{} + } + + spl := strings.Split(s, sep) + nonEmptyStrings := make([]string, 0, len(spl)) + for i := 0; i < len(spl); i++ { + element := strings.Trim(spl[i], cutset) + if element != "" { + nonEmptyStrings = append(nonEmptyStrings, element) + } + } + return nonEmptyStrings +} diff --git a/p2p/base_reactor.go b/p2p/base_reactor.go index 86b0d980a..2804c0bdf 100644 --- a/p2p/base_reactor.go +++ b/p2p/base_reactor.go @@ -38,13 +38,9 @@ type Reactor interface { // or other reason). RemovePeer(peer Peer, reason interface{}) - // Receive is called by the switch when msgBytes is received from the peer. - // - // NOTE reactor can not keep msgBytes around after Receive completes without - // copying. - // - // CONTRACT: msgBytes are not nil. - Receive(chID byte, peer Peer, msgBytes []byte) + // Receive is called by the switch when an envelope is received from any connected + // peer on any of the channels registered by the reactor + Receive(Envelope) } //-------------------------------------- @@ -64,8 +60,8 @@ func NewBaseReactor(name string, impl Reactor) *BaseReactor { func (br *BaseReactor) SetSwitch(sw *Switch) { br.Switch = sw } -func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil } -func (*BaseReactor) AddPeer(peer Peer) {} -func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {} -func (*BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {} -func (*BaseReactor) InitPeer(peer Peer) Peer { return peer } +func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil } +func (*BaseReactor) AddPeer(peer Peer) {} +func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {} +func (*BaseReactor) Receive(e Envelope) {} +func (*BaseReactor) InitPeer(peer Peer) Peer { return peer } diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index f52fe73f7..3fd09059c 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -724,6 +724,7 @@ type ChannelDescriptor struct { SendQueueCapacity int RecvBufferCapacity int RecvMessageCapacity int + MessageType proto.Message } func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) { diff --git a/p2p/metrics.gen.go b/p2p/metrics.gen.go index 98fb0121f..e452f1653 100644 --- a/p2p/metrics.gen.go +++ b/p2p/metrics.gen.go @@ -44,15 +44,29 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "num_txs", Help: "Number of transactions submitted by each peer.", }, append(labels, "peer_id")).With(labelsAndValues...), + MessageReceiveBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "message_receive_bytes_total", + Help: "Number of bytes of each message type received.", + }, append(labels, "message_type")).With(labelsAndValues...), + MessageSendBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "message_send_bytes_total", + Help: "Number of bytes of each message type sent.", + }, append(labels, "message_type")).With(labelsAndValues...), } } func NopMetrics() *Metrics { return &Metrics{ - Peers: discard.NewGauge(), - PeerReceiveBytesTotal: discard.NewCounter(), - PeerSendBytesTotal: discard.NewCounter(), - PeerPendingSendBytes: discard.NewGauge(), - NumTxs: discard.NewGauge(), + Peers: discard.NewGauge(), + PeerReceiveBytesTotal: discard.NewCounter(), + PeerSendBytesTotal: discard.NewCounter(), + PeerPendingSendBytes: discard.NewGauge(), + NumTxs: discard.NewGauge(), + MessageReceiveBytesTotal: discard.NewCounter(), + MessageSendBytesTotal: discard.NewCounter(), } } diff --git a/p2p/metrics.go b/p2p/metrics.go index 7e21870c7..808142e9a 100644 --- a/p2p/metrics.go +++ b/p2p/metrics.go @@ -1,6 +1,11 @@ package p2p import ( + "fmt" + "reflect" + "regexp" + "sync" + "github.com/go-kit/kit/metrics" ) @@ -10,6 +15,13 @@ const ( MetricsSubsystem = "p2p" ) +var ( + // valueToLabelRegexp is used to find the golang package name and type name + // so that the name can be turned into a prometheus label where the characters + // in the label do not include prometheus special characters such as '*' and '.'. + valueToLabelRegexp = regexp.MustCompile(`\*?(\w+)\.(.*)`) +) + //go:generate go run ../scripts/metricsgen -struct=Metrics // Metrics contains metrics exposed by this package. @@ -24,4 +36,43 @@ type Metrics struct { PeerPendingSendBytes metrics.Gauge `metrics_labels:"peer_id"` // Number of transactions submitted by each peer. NumTxs metrics.Gauge `metrics_labels:"peer_id"` + // Number of bytes of each message type received. + MessageReceiveBytesTotal metrics.Counter `metrics_labels:"message_type"` + // Number of bytes of each message type sent. + MessageSendBytesTotal metrics.Counter `metrics_labels:"message_type"` +} + +type metricsLabelCache struct { + mtx *sync.RWMutex + messageLabelNames map[reflect.Type]string +} + +// ValueToMetricLabel is a method that is used to produce a prometheus label value of the golang +// type that is passed in. +// This method uses a map on the Metrics struct so that each label name only needs +// to be produced once to prevent expensive string operations. +func (m *metricsLabelCache) ValueToMetricLabel(i interface{}) string { + t := reflect.TypeOf(i) + m.mtx.RLock() + + if s, ok := m.messageLabelNames[t]; ok { + m.mtx.RUnlock() + return s + } + m.mtx.RUnlock() + + s := t.String() + ss := valueToLabelRegexp.FindStringSubmatch(s) + l := fmt.Sprintf("%s_%s", ss[1], ss[2]) + m.mtx.Lock() + defer m.mtx.Unlock() + m.messageLabelNames[t] = l + return l +} + +func newMetricsLabelCache() *metricsLabelCache { + return &metricsLabelCache{ + mtx: &sync.RWMutex{}, + messageLabelNames: map[reflect.Type]string{}, + } } diff --git a/p2p/mock/peer.go b/p2p/mock/peer.go index 10254c343..47117270b 100644 --- a/p2p/mock/peer.go +++ b/p2p/mock/peer.go @@ -42,9 +42,9 @@ func NewPeer(ip net.IP) *Peer { return mp } -func (mp *Peer) FlushStop() { mp.Stop() } //nolint:errcheck //ignore error -func (mp *Peer) TrySend(chID byte, msgBytes []byte) bool { return true } -func (mp *Peer) Send(chID byte, msgBytes []byte) bool { return true } +func (mp *Peer) FlushStop() { mp.Stop() } //nolint:errcheck //ignore error +func (mp *Peer) TrySend(e p2p.Envelope) bool { return true } +func (mp *Peer) Send(e p2p.Envelope) bool { return true } func (mp *Peer) NodeInfo() p2p.NodeInfo { return p2p.DefaultNodeInfo{ DefaultNodeID: mp.addr.ID, diff --git a/p2p/mock/reactor.go b/p2p/mock/reactor.go index 0389a7d19..5e61c3e0b 100644 --- a/p2p/mock/reactor.go +++ b/p2p/mock/reactor.go @@ -19,7 +19,7 @@ func NewReactor() *Reactor { return r } -func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { return r.Channels } -func (r *Reactor) AddPeer(peer p2p.Peer) {} -func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {} -func (r *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {} +func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { return r.Channels } +func (r *Reactor) AddPeer(peer p2p.Peer) {} +func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {} +func (r *Reactor) Receive(e p2p.Envelope) {} diff --git a/p2p/mocks/peer.go b/p2p/mocks/peer.go index a9151c7d8..0850ab588 100644 --- a/p2p/mocks/peer.go +++ b/p2p/mocks/peer.go @@ -234,13 +234,13 @@ func (_m *Peer) Reset() error { return r0 } -// Send provides a mock function with given fields: _a0, _a1 -func (_m *Peer) Send(_a0 byte, _a1 []byte) bool { - ret := _m.Called(_a0, _a1) +// Send provides a mock function with given fields: _a0 +func (_m *Peer) Send(_a0 p2p.Envelope) bool { + ret := _m.Called(_a0) var r0 bool - if rf, ok := ret.Get(0).(func(byte, []byte) bool); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(p2p.Envelope) bool); ok { + r0 = rf(_a0) } else { r0 = ret.Get(0).(bool) } @@ -335,13 +335,13 @@ func (_m *Peer) String() string { return r0 } -// TrySend provides a mock function with given fields: _a0, _a1 -func (_m *Peer) TrySend(_a0 byte, _a1 []byte) bool { - ret := _m.Called(_a0, _a1) +// TrySend provides a mock function with given fields: _a0 +func (_m *Peer) TrySend(_a0 p2p.Envelope) bool { + ret := _m.Called(_a0) var r0 bool - if rf, ok := ret.Get(0).(func(byte, []byte) bool); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(p2p.Envelope) bool); ok { + r0 = rf(_a0) } else { r0 = ret.Get(0).(bool) } diff --git a/p2p/peer.go b/p2p/peer.go index d8d61a7a0..9a61cc896 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -3,8 +3,11 @@ package p2p import ( "fmt" "net" + "reflect" "time" + "github.com/cosmos/gogoproto/proto" + "github.com/tendermint/tendermint/libs/cmap" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" @@ -34,8 +37,8 @@ type Peer interface { Status() tmconn.ConnectionStatus SocketAddr() *NetAddress // actual address of the socket - Send(byte, []byte) bool - TrySend(byte, []byte) bool + Send(Envelope) bool + TrySend(Envelope) bool Set(string, interface{}) Get(string) interface{} @@ -120,6 +123,7 @@ type peer struct { metrics *Metrics metricsTicker *time.Ticker + mlc *metricsLabelCache // When removal of a peer fails, we set this flag removalAttemptFailed bool @@ -132,8 +136,10 @@ func newPeer( mConfig tmconn.MConnConfig, nodeInfo NodeInfo, reactorsByCh map[byte]Reactor, + msgTypeByChID map[byte]proto.Message, chDescs []*tmconn.ChannelDescriptor, onPeerError func(Peer, interface{}), + mlc *metricsLabelCache, options ...PeerOption, ) *peer { p := &peer{ @@ -143,12 +149,14 @@ func newPeer( Data: cmap.NewCMap(), metricsTicker: time.NewTicker(metricsTickerDuration), metrics: NopMetrics(), + mlc: mlc, } p.mconn = createMConnection( pc.conn, p, reactorsByCh, + msgTypeByChID, chDescs, onPeerError, mConfig, @@ -249,40 +257,39 @@ func (p *peer) Status() tmconn.ConnectionStatus { // Send msg bytes to the channel identified by chID byte. Returns false if the // send queue is full after timeout, specified by MConnection. -func (p *peer) Send(chID byte, msgBytes []byte) bool { - if !p.IsRunning() { - // see Switch#Broadcast, where we fetch the list of peers and loop over - // them - while we're looping, one peer may be removed and stopped. - return false - } else if !p.hasChannel(chID) { - return false - } - res := p.mconn.Send(chID, msgBytes) - if res { - labels := []string{ - "peer_id", string(p.ID()), - "chID", fmt.Sprintf("%#x", chID), - } - p.metrics.PeerSendBytesTotal.With(labels...).Add(float64(len(msgBytes))) - } - return res +func (p *peer) Send(e Envelope) bool { + return p.send(e.ChannelID, e.Message, p.mconn.Send) } // TrySend msg bytes to the channel identified by chID byte. Immediately returns // false if the send queue is full. -func (p *peer) TrySend(chID byte, msgBytes []byte) bool { +func (p *peer) TrySend(e Envelope) bool { + return p.send(e.ChannelID, e.Message, p.mconn.TrySend) +} + +func (p *peer) send(chID byte, msg proto.Message, sendFunc func(byte, []byte) bool) bool { if !p.IsRunning() { return false } else if !p.hasChannel(chID) { return false } - res := p.mconn.TrySend(chID, msgBytes) + metricLabelValue := p.mlc.ValueToMetricLabel(msg) + if w, ok := msg.(Wrapper); ok { + msg = w.Wrap() + } + msgBytes, err := proto.Marshal(msg) + if err != nil { + p.Logger.Error("marshaling message to send", "error", err) + return false + } + res := sendFunc(chID, msgBytes) if res { labels := []string{ "peer_id", string(p.ID()), "chID", fmt.Sprintf("%#x", chID), } p.metrics.PeerSendBytesTotal.With(labels...).Add(float64(len(msgBytes))) + p.metrics.MessageSendBytesTotal.With("message_type", metricLabelValue).Add(float64(len(msgBytes))) } return res } @@ -384,6 +391,7 @@ func createMConnection( conn net.Conn, p *peer, reactorsByCh map[byte]Reactor, + msgTypeByChID map[byte]proto.Message, chDescs []*tmconn.ChannelDescriptor, onPeerError func(Peer, interface{}), config tmconn.MConnConfig, @@ -396,12 +404,29 @@ func createMConnection( // which does onPeerError. panic(fmt.Sprintf("Unknown channel %X", chID)) } + mt := msgTypeByChID[chID] + msg := proto.Clone(mt) + err := proto.Unmarshal(msgBytes, msg) + if err != nil { + panic(fmt.Errorf("unmarshaling message: %s into type: %s", err, reflect.TypeOf(mt))) + } labels := []string{ "peer_id", string(p.ID()), "chID", fmt.Sprintf("%#x", chID), } + if w, ok := msg.(Unwrapper); ok { + msg, err = w.Unwrap() + if err != nil { + panic(fmt.Errorf("unwrapping message: %s", err)) + } + } p.metrics.PeerReceiveBytesTotal.With(labels...).Add(float64(len(msgBytes))) - reactor.Receive(chID, p, msgBytes) + p.metrics.MessageReceiveBytesTotal.With("message_type", p.mlc.ValueToMetricLabel(msg)).Add(float64(len(msgBytes))) + reactor.Receive(Envelope{ + ChannelID: chID, + Src: p, + Message: msg, + }) } onError := func(r interface{}) { diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go index db3d9261e..40a345424 100644 --- a/p2p/peer_set_test.go +++ b/p2p/peer_set_test.go @@ -18,22 +18,22 @@ type mockPeer struct { id ID } -func (mp *mockPeer) FlushStop() { mp.Stop() } //nolint:errcheck // ignore error -func (mp *mockPeer) TrySend(chID byte, msgBytes []byte) bool { return true } -func (mp *mockPeer) Send(chID byte, msgBytes []byte) bool { return true } -func (mp *mockPeer) NodeInfo() NodeInfo { return DefaultNodeInfo{} } -func (mp *mockPeer) Status() ConnectionStatus { return ConnectionStatus{} } -func (mp *mockPeer) ID() ID { return mp.id } -func (mp *mockPeer) IsOutbound() bool { return false } -func (mp *mockPeer) IsPersistent() bool { return true } -func (mp *mockPeer) Get(s string) interface{} { return s } -func (mp *mockPeer) Set(string, interface{}) {} -func (mp *mockPeer) RemoteIP() net.IP { return mp.ip } -func (mp *mockPeer) SocketAddr() *NetAddress { return nil } -func (mp *mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } -func (mp *mockPeer) CloseConn() error { return nil } -func (mp *mockPeer) SetRemovalFailed() {} -func (mp *mockPeer) GetRemovalFailed() bool { return false } +func (mp *mockPeer) FlushStop() { mp.Stop() } //nolint:errcheck // ignore error +func (mp *mockPeer) TrySend(e Envelope) bool { return true } +func (mp *mockPeer) Send(e Envelope) bool { return true } +func (mp *mockPeer) NodeInfo() NodeInfo { return DefaultNodeInfo{} } +func (mp *mockPeer) Status() ConnectionStatus { return ConnectionStatus{} } +func (mp *mockPeer) ID() ID { return mp.id } +func (mp *mockPeer) IsOutbound() bool { return false } +func (mp *mockPeer) IsPersistent() bool { return true } +func (mp *mockPeer) Get(s string) interface{} { return s } +func (mp *mockPeer) Set(string, interface{}) {} +func (mp *mockPeer) RemoteIP() net.IP { return mp.ip } +func (mp *mockPeer) SocketAddr() *NetAddress { return nil } +func (mp *mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } +func (mp *mockPeer) CloseConn() error { return nil } +func (mp *mockPeer) SetRemovalFailed() {} +func (mp *mockPeer) GetRemovalFailed() bool { return false } // Returns a mock peer func newMockPeer(ip net.IP) *mockPeer { diff --git a/p2p/peer_test.go b/p2p/peer_test.go index f8808f14d..ddfeb4234 100644 --- a/p2p/peer_test.go +++ b/p2p/peer_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/cosmos/gogoproto/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -14,6 +15,7 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/proto/tendermint/p2p" "github.com/tendermint/tendermint/config" tmconn "github.com/tendermint/tendermint/p2p/conn" @@ -70,7 +72,7 @@ func TestPeerSend(t *testing.T) { }) assert.True(p.CanSend(testCh)) - assert.True(p.Send(testCh, []byte("Asylum"))) + assert.True(p.Send(Envelope{ChannelID: testCh, Message: &p2p.Message{}})) } func createOutboundPeerAndPerformHandshake( @@ -82,6 +84,9 @@ func createOutboundPeerAndPerformHandshake( {ID: testCh, Priority: 1}, } reactorsByCh := map[byte]Reactor{testCh: NewTestReactor(chDescs, true)} + msgTypeByChID := map[byte]proto.Message{ + testCh: &p2p.Message{}, + } pk := ed25519.GenPrivKey() pc, err := testOutboundPeerConn(addr, config, false, pk) if err != nil { @@ -94,7 +99,7 @@ func createOutboundPeerAndPerformHandshake( return nil, err } - p := newPeer(pc, mConfig, peerNodeInfo, reactorsByCh, chDescs, func(p Peer, r interface{}) {}) + p := newPeer(pc, mConfig, peerNodeInfo, reactorsByCh, msgTypeByChID, chDescs, func(p Peer, r interface{}) {}, newMetricsLabelCache()) p.SetLogger(log.TestingLogger().With("peer", addr)) return p, nil } diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go index 006f89cd7..3296648d0 100644 --- a/p2p/pex/pex_reactor.go +++ b/p2p/pex/pex_reactor.go @@ -6,8 +6,6 @@ import ( "sync" "time" - "github.com/cosmos/gogoproto/proto" - "github.com/tendermint/tendermint/libs/cmap" tmmath "github.com/tendermint/tendermint/libs/math" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -184,6 +182,7 @@ func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { Priority: 1, SendQueueCapacity: 10, RecvMessageCapacity: maxMsgSize, + MessageType: &tmp2p.Message{}, }, } } @@ -236,16 +235,10 @@ func (r *Reactor) logErrAddrBook(err error) { } // Receive implements Reactor by handling incoming PEX messages. -func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) { - msg, err := decodeMsg(msgBytes) - if err != nil { - r.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) - r.Switch.StopPeerForError(src, err) - return - } - r.Logger.Debug("Received message", "src", src, "chId", chID, "msg", msg) +func (r *Reactor) Receive(e p2p.Envelope) { + r.Logger.Debug("Received message", "src", e.Src, "chId", e.ChannelID, "msg", e.Message) - switch msg := msg.(type) { + switch msg := e.Message.(type) { case *tmp2p.PexRequest: // NOTE: this is a prime candidate for amplification attacks, @@ -255,8 +248,8 @@ func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) { // If we're a seed and this is an inbound peer, // respond once and disconnect. - if r.config.SeedMode && !src.IsOutbound() { - id := string(src.ID()) + if r.config.SeedMode && !e.Src.IsOutbound() { + id := string(e.Src.ID()) v := r.lastReceivedRequests.Get(id) if v != nil { // FlushStop/StopPeer are already @@ -266,36 +259,36 @@ func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) { r.lastReceivedRequests.Set(id, time.Now()) // Send addrs and disconnect - r.SendAddrs(src, r.book.GetSelectionWithBias(biasToSelectNewPeers)) + r.SendAddrs(e.Src, r.book.GetSelectionWithBias(biasToSelectNewPeers)) go func() { // In a go-routine so it doesn't block .Receive. - src.FlushStop() - r.Switch.StopPeerGracefully(src) + e.Src.FlushStop() + r.Switch.StopPeerGracefully(e.Src) }() } else { // Check we're not receiving requests too frequently. - if err := r.receiveRequest(src); err != nil { - r.Switch.StopPeerForError(src, err) - r.book.MarkBad(src.SocketAddr(), defaultBanTime) + if err := r.receiveRequest(e.Src); err != nil { + r.Switch.StopPeerForError(e.Src, err) + r.book.MarkBad(e.Src.SocketAddr(), defaultBanTime) return } - r.SendAddrs(src, r.book.GetSelection()) + r.SendAddrs(e.Src, r.book.GetSelection()) } case *tmp2p.PexAddrs: // If we asked for addresses, add them to the book addrs, err := p2p.NetAddressesFromProto(msg.Addrs) if err != nil { - r.Switch.StopPeerForError(src, err) - r.book.MarkBad(src.SocketAddr(), defaultBanTime) + r.Switch.StopPeerForError(e.Src, err) + r.book.MarkBad(e.Src.SocketAddr(), defaultBanTime) return } - err = r.ReceiveAddrs(addrs, src) + err = r.ReceiveAddrs(addrs, e.Src) if err != nil { - r.Switch.StopPeerForError(src, err) + r.Switch.StopPeerForError(e.Src, err) if err == ErrUnsolicitedList { - r.book.MarkBad(src.SocketAddr(), defaultBanTime) + r.book.MarkBad(e.Src.SocketAddr(), defaultBanTime) } return } @@ -348,7 +341,10 @@ func (r *Reactor) RequestAddrs(p Peer) { } r.Logger.Debug("Request addrs", "from", p) r.requestsSent.Set(id, struct{}{}) - p.Send(PexChannel, mustEncode(&tmp2p.PexRequest{})) + p.Send(p2p.Envelope{ + ChannelID: PexChannel, + Message: &tmp2p.PexRequest{}, + }) } // ReceiveAddrs adds the given addrs to the addrbook if theres an open @@ -406,7 +402,11 @@ func (r *Reactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { // SendAddrs sends addrs to the peer. func (r *Reactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) { - p.Send(PexChannel, mustEncode(&tmp2p.PexAddrs{Addrs: p2p.NetAddressesToProto(netAddrs)})) + e := p2p.Envelope{ + ChannelID: PexChannel, + Message: &tmp2p.PexAddrs{Addrs: p2p.NetAddressesToProto(netAddrs)}, + } + p.Send(e) } // SetEnsurePeersPeriod sets period to ensure peers connected. @@ -763,43 +763,3 @@ func markAddrInBookBasedOnErr(addr *p2p.NetAddress, book AddrBook, err error) { book.MarkAttempt(addr) } } - -//----------------------------------------------------------------------------- -// Messages - -// mustEncode proto encodes a tmp2p.Message -func mustEncode(pb proto.Message) []byte { - msg := tmp2p.Message{} - switch pb := pb.(type) { - case *tmp2p.PexRequest: - msg.Sum = &tmp2p.Message_PexRequest{PexRequest: pb} - case *tmp2p.PexAddrs: - msg.Sum = &tmp2p.Message_PexAddrs{PexAddrs: pb} - default: - panic(fmt.Sprintf("Unknown message type %T", pb)) - } - - bz, err := msg.Marshal() - if err != nil { - panic(fmt.Errorf("unable to marshal %T: %w", pb, err)) - } - return bz -} - -func decodeMsg(bz []byte) (proto.Message, error) { - pb := &tmp2p.Message{} - - err := pb.Unmarshal(bz) - if err != nil { - return nil, err - } - - switch msg := pb.Sum.(type) { - case *tmp2p.Message_PexRequest: - return msg.PexRequest, nil - case *tmp2p.Message_PexAddrs: - return msg.PexAddrs, nil - default: - return nil, fmt.Errorf("unknown message: %T", msg) - } -} diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index d5e052e91..70e5e8c02 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -131,12 +131,11 @@ func TestPEXReactorReceive(t *testing.T) { r.RequestAddrs(peer) size := book.Size() - msg := mustEncode(&tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{peer.SocketAddr().ToProto()}}) - r.Receive(PexChannel, peer, msg) + msg := &tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{peer.SocketAddr().ToProto()}} + r.Receive(p2p.Envelope{ChannelID: PexChannel, Src: peer, Message: msg}) assert.Equal(t, size+1, book.Size()) - msg = mustEncode(&tmp2p.PexRequest{}) - r.Receive(PexChannel, peer, msg) // should not panic. + r.Receive(p2p.Envelope{ChannelID: PexChannel, Src: peer, Message: &tmp2p.PexRequest{}}) } func TestPEXReactorRequestMessageAbuse(t *testing.T) { @@ -155,20 +154,19 @@ func TestPEXReactorRequestMessageAbuse(t *testing.T) { require.True(t, book.HasAddress(peerAddr)) id := string(peer.ID()) - msg := mustEncode(&tmp2p.PexRequest{}) // first time creates the entry - r.Receive(PexChannel, peer, msg) + r.Receive(p2p.Envelope{ChannelID: PexChannel, Src: peer, Message: &tmp2p.PexRequest{}}) assert.True(t, r.lastReceivedRequests.Has(id)) assert.True(t, sw.Peers().Has(peer.ID())) // next time sets the last time value - r.Receive(PexChannel, peer, msg) + r.Receive(p2p.Envelope{ChannelID: PexChannel, Src: peer, Message: &tmp2p.PexRequest{}}) assert.True(t, r.lastReceivedRequests.Has(id)) assert.True(t, sw.Peers().Has(peer.ID())) // third time is too many too soon - peer is removed - r.Receive(PexChannel, peer, msg) + r.Receive(p2p.Envelope{ChannelID: PexChannel, Src: peer, Message: &tmp2p.PexRequest{}}) assert.False(t, r.lastReceivedRequests.Has(id)) assert.False(t, sw.Peers().Has(peer.ID())) assert.True(t, book.IsBanned(peerAddr)) @@ -192,15 +190,15 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) { assert.True(t, r.requestsSent.Has(id)) assert.True(t, sw.Peers().Has(peer.ID())) - msg := mustEncode(&tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{peer.SocketAddr().ToProto()}}) + msg := &tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{peer.SocketAddr().ToProto()}} // receive some addrs. should clear the request - r.Receive(PexChannel, peer, msg) + r.Receive(p2p.Envelope{ChannelID: PexChannel, Src: peer, Message: msg}) assert.False(t, r.requestsSent.Has(id)) assert.True(t, sw.Peers().Has(peer.ID())) // receiving more unsolicited addrs causes a disconnect and ban - r.Receive(PexChannel, peer, msg) + r.Receive(p2p.Envelope{ChannelID: PexChannel, Src: peer, Message: msg}) assert.False(t, sw.Peers().Has(peer.ID())) assert.True(t, book.IsBanned(peer.SocketAddr())) } @@ -486,8 +484,12 @@ func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) { pexR.RequestAddrs(peer) size := book.Size() - msg := mustEncode(&tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{peer.SocketAddr().ToProto()}}) - pexR.Receive(PexChannel, peer, msg) + msg := &tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{peer.SocketAddr().ToProto()}} + pexR.Receive(p2p.Envelope{ + ChannelID: PexChannel, + Src: peer, + Message: msg, + }) assert.Equal(t, size, book.Size()) pexR.AddPeer(peer) @@ -695,7 +697,9 @@ func TestPexVectors(t *testing.T) { for _, tc := range testCases { tc := tc - bz := mustEncode(tc.msg) + w := tc.msg.(p2p.Wrapper).Wrap() + bz, err := proto.Marshal(w) + require.NoError(t, err) require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) } diff --git a/p2p/switch.go b/p2p/switch.go index 884fd883e..adf1a396c 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -6,9 +6,9 @@ import ( "sync" "time" + "github.com/cosmos/gogoproto/proto" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/cmap" - "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/p2p/conn" @@ -69,16 +69,17 @@ type PeerFilterFunc func(IPeerSet, Peer) error type Switch struct { service.BaseService - config *config.P2PConfig - reactors map[string]Reactor - chDescs []*conn.ChannelDescriptor - reactorsByCh map[byte]Reactor - peers *PeerSet - dialing *cmap.CMap - reconnecting *cmap.CMap - nodeInfo NodeInfo // our node info - nodeKey *NodeKey // our node privkey - addrBook AddrBook + config *config.P2PConfig + reactors map[string]Reactor + chDescs []*conn.ChannelDescriptor + reactorsByCh map[byte]Reactor + msgTypeByChID map[byte]proto.Message + peers *PeerSet + dialing *cmap.CMap + reconnecting *cmap.CMap + nodeInfo NodeInfo // our node info + nodeKey *NodeKey // our node privkey + addrBook AddrBook // peers addresses with whom we'll maintain constant connection persistentPeersAddrs []*NetAddress unconditionalPeerIDs map[ID]struct{} @@ -91,6 +92,7 @@ type Switch struct { rng *rand.Rand // seed for randomizing dial times and orders metrics *Metrics + mlc *metricsLabelCache } // NetAddress returns the address the switch is listening on. @@ -108,11 +110,13 @@ func NewSwitch( transport Transport, options ...SwitchOption, ) *Switch { + sw := &Switch{ config: cfg, reactors: make(map[string]Reactor), chDescs: make([]*conn.ChannelDescriptor, 0), reactorsByCh: make(map[byte]Reactor), + msgTypeByChID: make(map[byte]proto.Message), peers: NewPeerSet(), dialing: cmap.NewCMap(), reconnecting: cmap.NewCMap(), @@ -121,6 +125,7 @@ func NewSwitch( filterTimeout: defaultFilterTimeout, persistentPeersAddrs: make([]*NetAddress, 0), unconditionalPeerIDs: make(map[ID]struct{}), + mlc: newMetricsLabelCache(), } // Ensure we have a completely undeterministic PRNG. @@ -164,6 +169,7 @@ func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor { } sw.chDescs = append(sw.chDescs, chDesc) sw.reactorsByCh[chID] = reactor + sw.msgTypeByChID[chID] = chDesc.MessageType } sw.reactors[name] = reactor reactor.SetSwitch(sw) @@ -182,6 +188,7 @@ func (sw *Switch) RemoveReactor(name string, reactor Reactor) { } } delete(sw.reactorsByCh, chDesc.ID) + delete(sw.msgTypeByChID, chDesc.ID) } delete(sw.reactors, name) reactor.SetSwitch(nil) @@ -261,8 +268,8 @@ func (sw *Switch) OnStop() { // closed once msg bytes are sent to all peers (or time out). // // NOTE: Broadcast uses goroutines, so order of broadcast may not be preserved. -func (sw *Switch) Broadcast(chID byte, msgBytes []byte) chan bool { - sw.Logger.Debug("Broadcast", "channel", chID, "msgBytes", log.NewLazySprintf("%X", msgBytes)) +func (sw *Switch) Broadcast(e Envelope) chan bool { + sw.Logger.Debug("Broadcast", "channel", e.ChannelID) peers := sw.peers.List() var wg sync.WaitGroup @@ -272,7 +279,7 @@ func (sw *Switch) Broadcast(chID byte, msgBytes []byte) chan bool { for _, peer := range peers { go func(p Peer) { defer wg.Done() - success := p.Send(chID, msgBytes) + success := p.Send(e) successChan <- success }(peer) } @@ -623,11 +630,13 @@ func (sw *Switch) IsPeerPersistent(na *NetAddress) bool { func (sw *Switch) acceptRoutine() { for { p, err := sw.transport.Accept(peerConfig{ - chDescs: sw.chDescs, - onPeerError: sw.StopPeerForError, - reactorsByCh: sw.reactorsByCh, - metrics: sw.metrics, - isPersistent: sw.IsPeerPersistent, + chDescs: sw.chDescs, + onPeerError: sw.StopPeerForError, + reactorsByCh: sw.reactorsByCh, + msgTypeByChID: sw.msgTypeByChID, + metrics: sw.metrics, + mlc: sw.mlc, + isPersistent: sw.IsPeerPersistent, }) if err != nil { switch err := err.(type) { @@ -726,11 +735,13 @@ func (sw *Switch) addOutboundPeerWithConfig( } p, err := sw.transport.Dial(*addr, peerConfig{ - chDescs: sw.chDescs, - onPeerError: sw.StopPeerForError, - isPersistent: sw.IsPeerPersistent, - reactorsByCh: sw.reactorsByCh, - metrics: sw.metrics, + chDescs: sw.chDescs, + onPeerError: sw.StopPeerForError, + isPersistent: sw.IsPeerPersistent, + reactorsByCh: sw.reactorsByCh, + msgTypeByChID: sw.msgTypeByChID, + metrics: sw.metrics, + mlc: sw.mlc, }) if err != nil { if e, ok := err.(ErrRejected); ok { diff --git a/p2p/switch_test.go b/p2p/switch_test.go index 9d5466df7..4a75033f1 100644 --- a/p2p/switch_test.go +++ b/p2p/switch_test.go @@ -14,6 +14,7 @@ import ( "testing" "time" + "github.com/cosmos/gogoproto/proto" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -23,6 +24,7 @@ import ( "github.com/tendermint/tendermint/libs/log" tmsync "github.com/tendermint/tendermint/libs/sync" "github.com/tendermint/tendermint/p2p/conn" + p2pproto "github.com/tendermint/tendermint/proto/tendermint/p2p" ) var ( @@ -36,9 +38,8 @@ func init() { } type PeerMessage struct { - PeerID ID - Bytes []byte - Counter int + Contents proto.Message + Counter int } type TestReactor struct { @@ -70,12 +71,12 @@ func (tr *TestReactor) AddPeer(peer Peer) {} func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {} -func (tr *TestReactor) Receive(chID byte, peer Peer, msgBytes []byte) { +func (tr *TestReactor) Receive(e Envelope) { if tr.logMessages { tr.mtx.Lock() defer tr.mtx.Unlock() - // fmt.Printf("Received: %X, %X\n", chID, msgBytes) - tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.ID(), msgBytes, tr.msgsCounter}) + fmt.Printf("Received: %X, %X\n", e.ChannelID, e.Message) + tr.msgsReceived[e.ChannelID] = append(tr.msgsReceived[e.ChannelID], PeerMessage{Contents: e.Message, Counter: tr.msgsCounter}) tr.msgsCounter++ } } @@ -103,12 +104,12 @@ func initSwitchFunc(i int, sw *Switch) *Switch { // Make two reactors of two channels each sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x00), Priority: 10}, - {ID: byte(0x01), Priority: 10}, + {ID: byte(0x00), Priority: 10, MessageType: &p2pproto.Message{}}, + {ID: byte(0x01), Priority: 10, MessageType: &p2pproto.Message{}}, }, true)) sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x02), Priority: 10}, - {ID: byte(0x03), Priority: 10}, + {ID: byte(0x02), Priority: 10, MessageType: &p2pproto.Message{}}, + {ID: byte(0x03), Priority: 10, MessageType: &p2pproto.Message{}}, }, true)) return sw @@ -135,31 +136,47 @@ func TestSwitches(t *testing.T) { } // Lets send some messages - ch0Msg := []byte("channel zero") - ch1Msg := []byte("channel foo") - ch2Msg := []byte("channel bar") - - s1.Broadcast(byte(0x00), ch0Msg) - s1.Broadcast(byte(0x01), ch1Msg) - s1.Broadcast(byte(0x02), ch2Msg) - + ch0Msg := &p2pproto.PexAddrs{ + Addrs: []p2pproto.NetAddress{ + { + ID: "1", + }, + }, + } + ch1Msg := &p2pproto.PexAddrs{ + Addrs: []p2pproto.NetAddress{ + { + ID: "1", + }, + }, + } + ch2Msg := &p2pproto.PexAddrs{ + Addrs: []p2pproto.NetAddress{ + { + ID: "2", + }, + }, + } + s1.Broadcast(Envelope{ChannelID: byte(0x00), Message: ch0Msg}) + s1.Broadcast(Envelope{ChannelID: byte(0x01), Message: ch1Msg}) + s1.Broadcast(Envelope{ChannelID: byte(0x02), Message: ch2Msg}) assertMsgReceivedWithTimeout(t, ch0Msg, byte(0x00), - s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second) + s2.Reactor("foo").(*TestReactor), 200*time.Millisecond, 5*time.Second) assertMsgReceivedWithTimeout(t, ch1Msg, byte(0x01), - s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second) + s2.Reactor("foo").(*TestReactor), 200*time.Millisecond, 5*time.Second) assertMsgReceivedWithTimeout(t, ch2Msg, byte(0x02), - s2.Reactor("bar").(*TestReactor), 10*time.Millisecond, 5*time.Second) + s2.Reactor("bar").(*TestReactor), 200*time.Millisecond, 5*time.Second) } func assertMsgReceivedWithTimeout( t *testing.T, - msgBytes []byte, + msg proto.Message, channel byte, reactor *TestReactor, checkPeriod, @@ -170,9 +187,13 @@ func assertMsgReceivedWithTimeout( select { case <-ticker.C: msgs := reactor.getMsgs(channel) + expectedBytes, err := proto.Marshal(msgs[0].Contents) + require.NoError(t, err) + gotBytes, err := proto.Marshal(msg) + require.NoError(t, err) if len(msgs) > 0 { - if !bytes.Equal(msgs[0].Bytes, msgBytes) { - t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", msgBytes, msgs[0].Bytes) + if !bytes.Equal(expectedBytes, gotBytes) { + t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", msg, msgs[0].Counter) } return } @@ -429,7 +450,10 @@ func TestSwitchStopPeerForError(t *testing.T) { // send messages to the peer from sw1 p := sw1.Peers().List()[0] - p.Send(0x1, []byte("here's a message to send")) + p.Send(Envelope{ + ChannelID: 0x1, + Message: &p2pproto.Message{}, + }) // stop sw2. this should cause the p to fail, // which results in calling StopPeerForError internally @@ -824,7 +848,7 @@ func BenchmarkSwitchBroadcast(b *testing.B) { // Send random message from foo channel to another for i := 0; i < b.N; i++ { chID := byte(i % 4) - successChan := s1.Broadcast(chID, []byte("test data")) + successChan := s1.Broadcast(Envelope{ChannelID: chID}) for s := range successChan { if s { numSuccess++ diff --git a/p2p/test_util.go b/p2p/test_util.go index 4e56f0193..1d9a4883c 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -149,8 +149,10 @@ func (sw *Switch) addPeerWithConnection(conn net.Conn) error { MConnConfig(sw.config), ni, sw.reactorsByCh, + sw.msgTypeByChID, sw.chDescs, sw.StopPeerForError, + sw.mlc, ) if err = sw.addPeer(p); err != nil { diff --git a/p2p/transport.go b/p2p/transport.go index e6e19a901..b5538ff18 100644 --- a/p2p/transport.go +++ b/p2p/transport.go @@ -8,6 +8,7 @@ import ( "golang.org/x/net/netutil" + "github.com/cosmos/gogoproto/proto" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/libs/protoio" "github.com/tendermint/tendermint/p2p/conn" @@ -47,9 +48,11 @@ type peerConfig struct { // isPersistent allows you to set a function, which, given socket address // (for outbound peers) OR self-reported address (for inbound peers), tells // if the peer is persistent or not. - isPersistent func(*NetAddress) bool - reactorsByCh map[byte]Reactor - metrics *Metrics + isPersistent func(*NetAddress) bool + reactorsByCh map[byte]Reactor + msgTypeByChID map[byte]proto.Message + metrics *Metrics + mlc *metricsLabelCache } // Transport emits and connects to Peers. The implementation of Peer is left to @@ -519,8 +522,10 @@ func (mt *MultiplexTransport) wrapPeer( mt.mConfig, ni, cfg.reactorsByCh, + cfg.msgTypeByChID, cfg.chDescs, cfg.onPeerError, + cfg.mlc, PeerMetrics(cfg.metrics), ) diff --git a/p2p/trust/config.go b/p2p/trust/config.go deleted file mode 100644 index 0f990a991..000000000 --- a/p2p/trust/config.go +++ /dev/null @@ -1,55 +0,0 @@ -package trust - -import "time" - -// MetricConfig - Configures the weight functions and time intervals for the metric -type MetricConfig struct { - // Determines the percentage given to current behavior - ProportionalWeight float64 - - // Determines the percentage given to prior behavior - IntegralWeight float64 - - // The window of time that the trust metric will track events across. - // This can be set to cover many days without issue - TrackingWindow time.Duration - - // Each interval should be short for adapability. - // Less than 30 seconds is too sensitive, - // and greater than 5 minutes will make the metric numb - IntervalLength time.Duration -} - -// DefaultConfig returns a config with values that have been tested and produce desirable results -func DefaultConfig() MetricConfig { - return MetricConfig{ - ProportionalWeight: 0.4, - IntegralWeight: 0.6, - TrackingWindow: (time.Minute * 60 * 24) * 14, // 14 days. - IntervalLength: 1 * time.Minute, - } -} - -// Ensures that all configuration elements have valid values -func customConfig(tmc MetricConfig) MetricConfig { - config := DefaultConfig() - - // Check the config for set values, and setup appropriately - if tmc.ProportionalWeight > 0 { - config.ProportionalWeight = tmc.ProportionalWeight - } - - if tmc.IntegralWeight > 0 { - config.IntegralWeight = tmc.IntegralWeight - } - - if tmc.IntervalLength > time.Duration(0) { - config.IntervalLength = tmc.IntervalLength - } - - if tmc.TrackingWindow > time.Duration(0) && - tmc.TrackingWindow >= config.IntervalLength { - config.TrackingWindow = tmc.TrackingWindow - } - return config -} diff --git a/p2p/trust/metric.go b/p2p/trust/metric.go deleted file mode 100644 index dd2d75d43..000000000 --- a/p2p/trust/metric.go +++ /dev/null @@ -1,412 +0,0 @@ -// Copyright 2017 Tendermint. All rights reserved. -// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. - -package trust - -import ( - "math" - "time" - - "github.com/tendermint/tendermint/libs/service" - tmsync "github.com/tendermint/tendermint/libs/sync" -) - -//--------------------------------------------------------------------------------------- - -const ( - // The weight applied to the derivative when current behavior is >= previous behavior - defaultDerivativeGamma1 = 0 - - // The weight applied to the derivative when current behavior is less than previous behavior - defaultDerivativeGamma2 = 1.0 - - // The weight applied to history data values when calculating the history value - defaultHistoryDataWeight = 0.8 -) - -// MetricHistoryJSON - history data necessary to save the trust metric -type MetricHistoryJSON struct { - NumIntervals int `json:"intervals"` - History []float64 `json:"history"` -} - -// Metric - keeps track of peer reliability -// See tendermint/docs/architecture/adr-006-trust-metric.md for details -type Metric struct { - service.BaseService - - // Mutex that protects the metric from concurrent access - mtx tmsync.Mutex - - // Determines the percentage given to current behavior - proportionalWeight float64 - - // Determines the percentage given to prior behavior - integralWeight float64 - - // Count of how many time intervals this metric has been tracking - numIntervals int - - // Size of the time interval window for this trust metric - maxIntervals int - - // The time duration for a single time interval - intervalLen time.Duration - - // Stores the trust history data for this metric - history []float64 - - // Weights applied to the history data when calculating the history value - historyWeights []float64 - - // The sum of the history weights used when calculating the history value - historyWeightSum float64 - - // The current number of history data elements - historySize int - - // The maximum number of history data elements - historyMaxSize int - - // The calculated history value for the current time interval - historyValue float64 - - // The number of recorded good and bad events for the current time interval - bad, good float64 - - // While true, history data is not modified - paused bool - - // Used during testing in order to control the passing of time intervals - testTicker MetricTicker -} - -// NewMetric returns a trust metric with the default configuration. -// Use Start to begin tracking the quality of peer behavior over time -func NewMetric() *Metric { - return NewMetricWithConfig(DefaultConfig()) -} - -// NewMetricWithConfig returns a trust metric with a custom configuration. -// Use Start to begin tracking the quality of peer behavior over time -func NewMetricWithConfig(tmc MetricConfig) *Metric { - tm := new(Metric) - config := customConfig(tmc) - - // Setup using the configuration values - tm.proportionalWeight = config.ProportionalWeight - tm.integralWeight = config.IntegralWeight - tm.intervalLen = config.IntervalLength - // The maximum number of time intervals is the tracking window / interval length - tm.maxIntervals = int(config.TrackingWindow / tm.intervalLen) - // The history size will be determined by the maximum number of time intervals - tm.historyMaxSize = intervalToHistoryOffset(tm.maxIntervals) + 1 - // This metric has a perfect history so far - tm.historyValue = 1.0 - - tm.BaseService = *service.NewBaseService(nil, "Metric", tm) - return tm -} - -// OnStart implements Service -func (tm *Metric) OnStart() error { - if err := tm.BaseService.OnStart(); err != nil { - return err - } - go tm.processRequests() - return nil -} - -// OnStop implements Service -// Nothing to do since the goroutine shuts down by itself via BaseService.Quit() -func (tm *Metric) OnStop() {} - -// Returns a snapshot of the trust metric history data -func (tm *Metric) HistoryJSON() MetricHistoryJSON { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - return MetricHistoryJSON{ - NumIntervals: tm.numIntervals, - History: tm.history, - } -} - -// Instantiates a trust metric by loading the history data for a single peer. -// This is called only once and only right after creation, which is why the -// lock is not held while accessing the trust metric struct members -func (tm *Metric) Init(hist MetricHistoryJSON) { - // Restore the number of time intervals we have previously tracked - if hist.NumIntervals > tm.maxIntervals { - hist.NumIntervals = tm.maxIntervals - } - tm.numIntervals = hist.NumIntervals - // Restore the history and its current size - if len(hist.History) > tm.historyMaxSize { - // Keep the history no larger than historyMaxSize - last := len(hist.History) - tm.historyMaxSize - hist.History = hist.History[last:] - } - tm.history = hist.History - tm.historySize = len(tm.history) - // Create the history weight values and weight sum - for i := 1; i <= tm.numIntervals; i++ { - x := math.Pow(defaultHistoryDataWeight, float64(i)) // Optimistic weight - tm.historyWeights = append(tm.historyWeights, x) - } - - for _, v := range tm.historyWeights { - tm.historyWeightSum += v - } - // Calculate the history value based on the loaded history data - tm.historyValue = tm.calcHistoryValue() -} - -// Pause tells the metric to pause recording data over time intervals. -// All method calls that indicate events will unpause the metric -func (tm *Metric) Pause() { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - // Pause the metric for now - tm.paused = true -} - -// BadEvents indicates that an undesirable event(s) took place -func (tm *Metric) BadEvents(num int) { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - tm.unpause() - tm.bad += float64(num) -} - -// GoodEvents indicates that a desirable event(s) took place -func (tm *Metric) GoodEvents(num int) { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - tm.unpause() - tm.good += float64(num) -} - -// TrustValue gets the dependable trust value; always between 0 and 1 -func (tm *Metric) TrustValue() float64 { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - return tm.calcTrustValue() -} - -// TrustScore gets a score based on the trust value always between 0 and 100 -func (tm *Metric) TrustScore() int { - score := tm.TrustValue() * 100 - - return int(math.Floor(score)) -} - -// NextTimeInterval saves current time interval data and prepares for the following interval -func (tm *Metric) NextTimeInterval() { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - if tm.paused { - // Do not prepare for the next time interval while paused - return - } - - // Add the current trust value to the history data - newHist := tm.calcTrustValue() - tm.history = append(tm.history, newHist) - - // Update history and interval counters - if tm.historySize < tm.historyMaxSize { - tm.historySize++ - } else { - // Keep the history no larger than historyMaxSize - last := len(tm.history) - tm.historyMaxSize - tm.history = tm.history[last:] - } - - if tm.numIntervals < tm.maxIntervals { - tm.numIntervals++ - // Add the optimistic weight for the new time interval - wk := math.Pow(defaultHistoryDataWeight, float64(tm.numIntervals)) - tm.historyWeights = append(tm.historyWeights, wk) - tm.historyWeightSum += wk - } - - // Update the history data using Faded Memories - tm.updateFadedMemory() - // Calculate the history value for the upcoming time interval - tm.historyValue = tm.calcHistoryValue() - tm.good = 0 - tm.bad = 0 -} - -// SetTicker allows a TestTicker to be provided that will manually control -// the passing of time from the perspective of the Metric. -// The ticker must be set before Start is called on the metric -func (tm *Metric) SetTicker(ticker MetricTicker) { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - tm.testTicker = ticker -} - -// Copy returns a new trust metric with members containing the same values -func (tm *Metric) Copy() *Metric { - if tm == nil { - return nil - } - - tm.mtx.Lock() - defer tm.mtx.Unlock() - - return &Metric{ - proportionalWeight: tm.proportionalWeight, - integralWeight: tm.integralWeight, - numIntervals: tm.numIntervals, - maxIntervals: tm.maxIntervals, - intervalLen: tm.intervalLen, - history: tm.history, - historyWeights: tm.historyWeights, - historyWeightSum: tm.historyWeightSum, - historySize: tm.historySize, - historyMaxSize: tm.historyMaxSize, - historyValue: tm.historyValue, - good: tm.good, - bad: tm.bad, - paused: tm.paused, - } - -} - -/* Private methods */ - -// This method is for a goroutine that handles all requests on the metric -func (tm *Metric) processRequests() { - t := tm.testTicker - if t == nil { - // No test ticker was provided, so we create a normal ticker - t = NewTicker(tm.intervalLen) - } - defer t.Stop() - // Obtain the raw channel - tick := t.GetChannel() -loop: - for { - select { - case <-tick: - tm.NextTimeInterval() - case <-tm.Quit(): - // Stop all further tracking for this metric - break loop - } - } -} - -// Wakes the trust metric up if it is currently paused -// This method needs to be called with the mutex locked -func (tm *Metric) unpause() { - // Check if this is the first experience with - // what we are tracking since being paused - if tm.paused { - tm.good = 0 - tm.bad = 0 - // New events cause us to unpause the metric - tm.paused = false - } -} - -// Calculates the trust value for the request processing -func (tm *Metric) calcTrustValue() float64 { - weightedP := tm.proportionalWeight * tm.proportionalValue() - weightedI := tm.integralWeight * tm.historyValue - weightedD := tm.weightedDerivative() - - tv := weightedP + weightedI + weightedD - // Do not return a negative value. - if tv < 0 { - tv = 0 - } - return tv -} - -// Calculates the current score for good/bad experiences -func (tm *Metric) proportionalValue() float64 { - value := 1.0 - - total := tm.good + tm.bad - if total > 0 { - value = tm.good / total - } - return value -} - -// Strengthens the derivative component when the change is negative -func (tm *Metric) weightedDerivative() float64 { - var weight float64 = defaultDerivativeGamma1 - - d := tm.derivativeValue() - if d < 0 { - weight = defaultDerivativeGamma2 - } - return weight * d -} - -// Calculates the derivative component -func (tm *Metric) derivativeValue() float64 { - return tm.proportionalValue() - tm.historyValue -} - -// Calculates the integral (history) component of the trust value -func (tm *Metric) calcHistoryValue() float64 { - var hv float64 - - for i := 0; i < tm.numIntervals; i++ { - hv += tm.fadedMemoryValue(i) * tm.historyWeights[i] - } - - return hv / tm.historyWeightSum -} - -// Retrieves the actual history data value that represents the requested time interval -func (tm *Metric) fadedMemoryValue(interval int) float64 { - first := tm.historySize - 1 - - if interval == 0 { - // Base case - return tm.history[first] - } - - offset := intervalToHistoryOffset(interval) - return tm.history[first-offset] -} - -// Performs the update for our Faded Memories process, which allows the -// trust metric tracking window to be large while maintaining a small -// number of history data values -func (tm *Metric) updateFadedMemory() { - if tm.historySize < 2 { - return - } - - end := tm.historySize - 1 - // Keep the most recent history element - for count := 1; count < tm.historySize; count++ { - i := end - count - // The older the data is, the more we spread it out - x := math.Pow(2, float64(count)) - // Two history data values are merged into a single value - tm.history[i] = ((tm.history[i] * (x - 1)) + tm.history[i+1]) / x - } -} - -// Map the interval value down to an offset from the beginning of history -func intervalToHistoryOffset(interval int) int { - // The system maintains 2^m interval values in the form of m history - // data values. Therefore, we access the ith interval by obtaining - // the history data index = the floor of log2(i) - return int(math.Floor(math.Log2(float64(interval)))) -} diff --git a/p2p/trust/metric_test.go b/p2p/trust/metric_test.go deleted file mode 100644 index c3adfd5d1..000000000 --- a/p2p/trust/metric_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package trust - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestTrustMetricScores(t *testing.T) { - tm := NewMetric() - err := tm.Start() - require.NoError(t, err) - - // Perfect score - tm.GoodEvents(1) - score := tm.TrustScore() - assert.Equal(t, 100, score) - - // Less than perfect score - tm.BadEvents(10) - score = tm.TrustScore() - assert.NotEqual(t, 100, score) - err = tm.Stop() - require.NoError(t, err) -} - -func TestTrustMetricConfig(t *testing.T) { - // 7 days - window := time.Minute * 60 * 24 * 7 - config := MetricConfig{ - TrackingWindow: window, - IntervalLength: 2 * time.Minute, - } - - tm := NewMetricWithConfig(config) - err := tm.Start() - require.NoError(t, err) - - // The max time intervals should be the TrackingWindow / IntervalLen - assert.Equal(t, int(config.TrackingWindow/config.IntervalLength), tm.maxIntervals) - - dc := DefaultConfig() - // These weights should still be the default values - assert.Equal(t, dc.ProportionalWeight, tm.proportionalWeight) - assert.Equal(t, dc.IntegralWeight, tm.integralWeight) - err = tm.Stop() - require.NoError(t, err) - tm.Wait() - - config.ProportionalWeight = 0.3 - config.IntegralWeight = 0.7 - tm = NewMetricWithConfig(config) - err = tm.Start() - require.NoError(t, err) - - // These weights should be equal to our custom values - assert.Equal(t, config.ProportionalWeight, tm.proportionalWeight) - assert.Equal(t, config.IntegralWeight, tm.integralWeight) - err = tm.Stop() - require.NoError(t, err) - tm.Wait() -} - -func TestTrustMetricCopyNilPointer(t *testing.T) { - var tm *Metric - - ctm := tm.Copy() - - assert.Nil(t, ctm) -} - -// XXX: This test fails non-deterministically -// -//nolint:unused,deadcode -func _TestTrustMetricStopPause(t *testing.T) { - // The TestTicker will provide manual control over - // the passing of time within the metric - tt := NewTestTicker() - tm := NewMetric() - tm.SetTicker(tt) - err := tm.Start() - require.NoError(t, err) - // Allow some time intervals to pass and pause - tt.NextTick() - tt.NextTick() - tm.Pause() - - // could be 1 or 2 because Pause and NextTick race - first := tm.Copy().numIntervals - - // Allow more time to pass and check the intervals are unchanged - tt.NextTick() - tt.NextTick() - assert.Equal(t, first, tm.Copy().numIntervals) - - // Get the trust metric activated again - tm.GoodEvents(5) - // Allow some time intervals to pass and stop - tt.NextTick() - tt.NextTick() - err = tm.Stop() - require.NoError(t, err) - tm.Wait() - - second := tm.Copy().numIntervals - // Allow more intervals to pass while the metric is stopped - // and check that the number of intervals match - tm.NextTimeInterval() - tm.NextTimeInterval() - // XXX: fails non-deterministically: - // expected 5, got 6 - assert.Equal(t, second+2, tm.Copy().numIntervals) - - if first > second { - t.Fatalf("numIntervals should always increase or stay the same over time") - } -} diff --git a/p2p/trust/store.go b/p2p/trust/store.go deleted file mode 100644 index e7233c915..000000000 --- a/p2p/trust/store.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2017 Tendermint. All rights reserved. -// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. - -package trust - -import ( - "encoding/json" - "fmt" - "time" - - dbm "github.com/tendermint/tm-db" - - "github.com/tendermint/tendermint/libs/service" - tmsync "github.com/tendermint/tendermint/libs/sync" -) - -const defaultStorePeriodicSaveInterval = 1 * time.Minute - -var trustMetricKey = []byte("trustMetricStore") - -// MetricStore - Manages all trust metrics for peers -type MetricStore struct { - service.BaseService - - // Maps a Peer.Key to that peer's TrustMetric - peerMetrics map[string]*Metric - - // Mutex that protects the map and history data file - mtx tmsync.Mutex - - // The db where peer trust metric history data will be stored - db dbm.DB - - // This configuration will be used when creating new TrustMetrics - config MetricConfig -} - -// NewTrustMetricStore returns a store that saves data to the DB -// and uses the config when creating new trust metrics. -// Use Start to to initialize the trust metric store -func NewTrustMetricStore(db dbm.DB, tmc MetricConfig) *MetricStore { - tms := &MetricStore{ - peerMetrics: make(map[string]*Metric), - db: db, - config: tmc, - } - - tms.BaseService = *service.NewBaseService(nil, "MetricStore", tms) - return tms -} - -// OnStart implements Service -func (tms *MetricStore) OnStart() error { - if err := tms.BaseService.OnStart(); err != nil { - return err - } - - tms.mtx.Lock() - defer tms.mtx.Unlock() - - tms.loadFromDB() - go tms.saveRoutine() - return nil -} - -// OnStop implements Service -func (tms *MetricStore) OnStop() { - tms.BaseService.OnStop() - - tms.mtx.Lock() - defer tms.mtx.Unlock() - - // Stop all trust metric go-routines - for _, tm := range tms.peerMetrics { - if err := tm.Stop(); err != nil { - tms.Logger.Error("unable to stop metric store", "error", err) - } - } - - // Make the final trust history data save - tms.saveToDB() -} - -// Size returns the number of entries in the trust metric store -func (tms *MetricStore) Size() int { - tms.mtx.Lock() - defer tms.mtx.Unlock() - - return tms.size() -} - -// AddPeerTrustMetric takes an existing trust metric and associates it with a peer key. -// The caller is expected to call Start on the TrustMetric being added -func (tms *MetricStore) AddPeerTrustMetric(key string, tm *Metric) { - tms.mtx.Lock() - defer tms.mtx.Unlock() - - if key == "" || tm == nil { - return - } - tms.peerMetrics[key] = tm -} - -// GetPeerTrustMetric returns a trust metric by peer key -func (tms *MetricStore) GetPeerTrustMetric(key string) *Metric { - tms.mtx.Lock() - defer tms.mtx.Unlock() - - tm, ok := tms.peerMetrics[key] - if !ok { - // If the metric is not available, we will create it - tm = NewMetricWithConfig(tms.config) - if err := tm.Start(); err != nil { - tms.Logger.Error("unable to start metric store", "error", err) - } - // The metric needs to be in the map - tms.peerMetrics[key] = tm - } - return tm -} - -// PeerDisconnected pauses the trust metric associated with the peer identified by the key -func (tms *MetricStore) PeerDisconnected(key string) { - tms.mtx.Lock() - defer tms.mtx.Unlock() - - // If the Peer that disconnected has a metric, pause it - if tm, ok := tms.peerMetrics[key]; ok { - tm.Pause() - } -} - -// Saves the history data for all peers to the store DB. -// This public method acquires the trust metric store lock -func (tms *MetricStore) SaveToDB() { - tms.mtx.Lock() - defer tms.mtx.Unlock() - - tms.saveToDB() -} - -/* Private methods */ - -// size returns the number of entries in the store without acquiring the mutex -func (tms *MetricStore) size() int { - return len(tms.peerMetrics) -} - -/* Loading & Saving */ -/* Both loadFromDB and savetoDB assume the mutex has been acquired */ - -// Loads the history data for all peers from the store DB -// cmn.Panics if file is corrupt -func (tms *MetricStore) loadFromDB() bool { - // Obtain the history data we have so far - bytes, err := tms.db.Get(trustMetricKey) - if err != nil { - panic(err) - } - if bytes == nil { - return false - } - - peers := make(map[string]MetricHistoryJSON) - err = json.Unmarshal(bytes, &peers) - if err != nil { - panic(fmt.Sprintf("Could not unmarshal Trust Metric Store DB data: %v", err)) - } - - // If history data exists in the file, - // load it into trust metric - for key, p := range peers { - tm := NewMetricWithConfig(tms.config) - - if err := tm.Start(); err != nil { - tms.Logger.Error("unable to start metric", "error", err) - } - tm.Init(p) - // Load the peer trust metric into the store - tms.peerMetrics[key] = tm - } - return true -} - -// Saves the history data for all peers to the store DB -func (tms *MetricStore) saveToDB() { - tms.Logger.Debug("Saving TrustHistory to DB", "size", tms.size()) - - peers := make(map[string]MetricHistoryJSON) - - for key, tm := range tms.peerMetrics { - // Add an entry for the peer identified by key - peers[key] = tm.HistoryJSON() - } - - // Write all the data back to the DB - bytes, err := json.Marshal(peers) - if err != nil { - tms.Logger.Error("Failed to encode the TrustHistory", "err", err) - return - } - if err := tms.db.SetSync(trustMetricKey, bytes); err != nil { - tms.Logger.Error("failed to flush data to disk", "error", err) - } -} - -// Periodically saves the trust history data to the DB -func (tms *MetricStore) saveRoutine() { - t := time.NewTicker(defaultStorePeriodicSaveInterval) - defer t.Stop() -loop: - for { - select { - case <-t.C: - tms.SaveToDB() - case <-tms.Quit(): - break loop - } - } -} diff --git a/p2p/trust/store_test.go b/p2p/trust/store_test.go deleted file mode 100644 index c583d58aa..000000000 --- a/p2p/trust/store_test.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2017 Tendermint. All rights reserved. -// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. - -package trust - -import ( - "fmt" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - - "github.com/tendermint/tendermint/libs/log" -) - -func TestTrustMetricStoreSaveLoad(t *testing.T) { - dir, err := os.MkdirTemp("", "trust_test") - require.NoError(t, err) - defer os.Remove(dir) - - historyDB, err := dbm.NewDB("trusthistory", "goleveldb", dir) - require.NoError(t, err) - - // 0 peers saved - store := NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - store.saveToDB() - // Load the data from the file - store = NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - err = store.Start() - require.NoError(t, err) - // Make sure we still have 0 entries - assert.Zero(t, store.Size()) - - // 100 TestTickers - var tt []*TestTicker - for i := 0; i < 100; i++ { - // The TestTicker will provide manual control over - // the passing of time within the metric - tt = append(tt, NewTestTicker()) - } - // 100 peers - for i := 0; i < 100; i++ { - key := fmt.Sprintf("peer_%d", i) - tm := NewMetric() - - tm.SetTicker(tt[i]) - err = tm.Start() - require.NoError(t, err) - store.AddPeerTrustMetric(key, tm) - - tm.BadEvents(10) - tm.GoodEvents(1) - } - // Check that we have 100 entries and save - assert.Equal(t, 100, store.Size()) - // Give the 100 metrics time to process the history data - for i := 0; i < 100; i++ { - tt[i].NextTick() - tt[i].NextTick() - } - // Stop all the trust metrics and save - err = store.Stop() - require.NoError(t, err) - - // Load the data from the DB - store = NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - err = store.Start() - require.NoError(t, err) - - // Check that we still have 100 peers with imperfect trust values - assert.Equal(t, 100, store.Size()) - for _, tm := range store.peerMetrics { - assert.NotEqual(t, 1.0, tm.TrustValue()) - } - - err = store.Stop() - require.NoError(t, err) -} - -func TestTrustMetricStoreConfig(t *testing.T) { - historyDB, err := dbm.NewDB("", "memdb", "") - require.NoError(t, err) - - config := MetricConfig{ - ProportionalWeight: 0.5, - IntegralWeight: 0.5, - } - - // Create a store with custom config - store := NewTrustMetricStore(historyDB, config) - store.SetLogger(log.TestingLogger()) - err = store.Start() - require.NoError(t, err) - - // Have the store make us a metric with the config - tm := store.GetPeerTrustMetric("TestKey") - - // Check that the options made it to the metric - assert.Equal(t, 0.5, tm.proportionalWeight) - assert.Equal(t, 0.5, tm.integralWeight) - err = store.Stop() - require.NoError(t, err) -} - -func TestTrustMetricStoreLookup(t *testing.T) { - historyDB, err := dbm.NewDB("", "memdb", "") - require.NoError(t, err) - - store := NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - err = store.Start() - require.NoError(t, err) - - // Create 100 peers in the trust metric store - for i := 0; i < 100; i++ { - key := fmt.Sprintf("peer_%d", i) - store.GetPeerTrustMetric(key) - - // Check that the trust metric was successfully entered - ktm := store.peerMetrics[key] - assert.NotNil(t, ktm, "Expected to find TrustMetric %s but wasn't there.", key) - } - - err = store.Stop() - require.NoError(t, err) -} - -func TestTrustMetricStorePeerScore(t *testing.T) { - historyDB, err := dbm.NewDB("", "memdb", "") - require.NoError(t, err) - - store := NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - err = store.Start() - require.NoError(t, err) - - key := "TestKey" - tm := store.GetPeerTrustMetric(key) - - // This peer is innocent so far - first := tm.TrustScore() - assert.Equal(t, 100, first) - - // Add some undesirable events and disconnect - tm.BadEvents(1) - first = tm.TrustScore() - assert.NotEqual(t, 100, first) - tm.BadEvents(10) - second := tm.TrustScore() - - if second > first { - t.Errorf("a greater number of bad events should lower the trust score") - } - store.PeerDisconnected(key) - - // We will remember our experiences with this peer - tm = store.GetPeerTrustMetric(key) - assert.NotEqual(t, 100, tm.TrustScore()) - err = store.Stop() - require.NoError(t, err) -} diff --git a/p2p/trust/ticker.go b/p2p/trust/ticker.go deleted file mode 100644 index 3f0f30919..000000000 --- a/p2p/trust/ticker.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2017 Tendermint. All rights reserved. -// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. - -package trust - -import ( - "time" -) - -// MetricTicker provides a single ticker interface for the trust metric -type MetricTicker interface { - // GetChannel returns the receive only channel that fires at each time interval - GetChannel() <-chan time.Time - - // Stop will halt further activity on the ticker channel - Stop() -} - -// The ticker used during testing that provides manual control over time intervals -type TestTicker struct { - C chan time.Time - stopped bool -} - -// NewTestTicker returns our ticker used within test routines -func NewTestTicker() *TestTicker { - c := make(chan time.Time) - return &TestTicker{ - C: c, - } -} - -func (t *TestTicker) GetChannel() <-chan time.Time { - return t.C -} - -func (t *TestTicker) Stop() { - t.stopped = true -} - -// NextInterval manually sends Time on the ticker channel -func (t *TestTicker) NextTick() { - if t.stopped { - return - } - t.C <- time.Now() -} - -// Ticker is just a wrap around time.Ticker that allows it -// to meet the requirements of our interface -type Ticker struct { - *time.Ticker -} - -// NewTicker returns a normal time.Ticker wrapped to meet our interface -func NewTicker(d time.Duration) *Ticker { - return &Ticker{time.NewTicker(d)} -} - -func (t *Ticker) GetChannel() <-chan time.Time { - return t.C -} diff --git a/p2p/types.go b/p2p/types.go index b11765bb5..48e295a2b 100644 --- a/p2p/types.go +++ b/p2p/types.go @@ -1,8 +1,40 @@ package p2p import ( + "github.com/cosmos/gogoproto/proto" "github.com/tendermint/tendermint/p2p/conn" + tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ) type ChannelDescriptor = conn.ChannelDescriptor type ConnectionStatus = conn.ConnectionStatus + +// Envelope contains a message with sender routing info. +type Envelope struct { + Src Peer // sender (empty if outbound) + Message proto.Message // message payload + ChannelID byte +} + +// Unwrapper is a Protobuf message that can contain a variety of inner messages +// (e.g. via oneof fields). If a Channel's message type implements Unwrapper, the +// p2p layer will automatically unwrap inbound messages so that reactors do not have to do this themselves. +type Unwrapper interface { + proto.Message + + // Unwrap will unwrap the inner message contained in this message. + Unwrap() (proto.Message, error) +} + +// Wrapper is a companion type to Unwrapper. It is a Protobuf message that can contain a variety of inner messages. The p2p layer will automatically wrap outbound messages so that the reactors do not have to do it themselves. +type Wrapper interface { + proto.Message + + // Wrap will take the underlying message and wrap it in its wrapper type. + Wrap() proto.Message +} + +var ( + _ Wrapper = &tmp2p.PexRequest{} + _ Wrapper = &tmp2p.PexAddrs{} +) diff --git a/proto/tendermint/blocksync/message.go b/proto/tendermint/blocksync/message.go new file mode 100644 index 000000000..a38ee6ca0 --- /dev/null +++ b/proto/tendermint/blocksync/message.go @@ -0,0 +1,73 @@ +package blocksync + +import ( + "fmt" + + "github.com/cosmos/gogoproto/proto" + "github.com/tendermint/tendermint/p2p" +) + +var _ p2p.Wrapper = &StatusRequest{} +var _ p2p.Wrapper = &StatusResponse{} +var _ p2p.Wrapper = &NoBlockResponse{} +var _ p2p.Wrapper = &BlockResponse{} +var _ p2p.Wrapper = &BlockRequest{} + +const ( + BlockResponseMessagePrefixSize = 4 + BlockResponseMessageFieldKeySize = 1 +) + +func (m *BlockRequest) Wrap() proto.Message { + bm := &Message{} + bm.Sum = &Message_BlockRequest{BlockRequest: m} + return bm +} + +func (m *BlockResponse) Wrap() proto.Message { + bm := &Message{} + bm.Sum = &Message_BlockResponse{BlockResponse: m} + return bm +} + +func (m *NoBlockResponse) Wrap() proto.Message { + bm := &Message{} + bm.Sum = &Message_NoBlockResponse{NoBlockResponse: m} + return bm +} + +func (m *StatusRequest) Wrap() proto.Message { + bm := &Message{} + bm.Sum = &Message_StatusRequest{StatusRequest: m} + return bm +} + +func (m *StatusResponse) Wrap() proto.Message { + bm := &Message{} + bm.Sum = &Message_StatusResponse{StatusResponse: m} + return bm +} + +// Unwrap implements the p2p Wrapper interface and unwraps a wrapped blockchain +// message. +func (m *Message) Unwrap() (proto.Message, error) { + switch msg := m.Sum.(type) { + case *Message_BlockRequest: + return m.GetBlockRequest(), nil + + case *Message_BlockResponse: + return m.GetBlockResponse(), nil + + case *Message_NoBlockResponse: + return m.GetNoBlockResponse(), nil + + case *Message_StatusRequest: + return m.GetStatusRequest(), nil + + case *Message_StatusResponse: + return m.GetStatusResponse(), nil + + default: + return nil, fmt.Errorf("unknown message: %T", msg) + } +} diff --git a/proto/tendermint/consensus/message.go b/proto/tendermint/consensus/message.go new file mode 100644 index 000000000..90f95a725 --- /dev/null +++ b/proto/tendermint/consensus/message.go @@ -0,0 +1,109 @@ +package consensus + +import ( + "fmt" + + "github.com/cosmos/gogoproto/proto" + "github.com/tendermint/tendermint/p2p" +) + +var _ p2p.Wrapper = &VoteSetBits{} +var _ p2p.Wrapper = &VoteSetMaj23{} +var _ p2p.Wrapper = &Vote{} +var _ p2p.Wrapper = &ProposalPOL{} +var _ p2p.Wrapper = &Proposal{} +var _ p2p.Wrapper = &NewValidBlock{} +var _ p2p.Wrapper = &NewRoundStep{} +var _ p2p.Wrapper = &HasVote{} +var _ p2p.Wrapper = &BlockPart{} + +func (m *VoteSetBits) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_VoteSetBits{VoteSetBits: m} + return cm + +} + +func (m *VoteSetMaj23) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_VoteSetMaj23{VoteSetMaj23: m} + return cm +} + +func (m *HasVote) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_HasVote{HasVote: m} + return cm +} + +func (m *Vote) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_Vote{Vote: m} + return cm +} + +func (m *BlockPart) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_BlockPart{BlockPart: m} + return cm +} + +func (m *ProposalPOL) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_ProposalPol{ProposalPol: m} + return cm +} + +func (m *Proposal) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_Proposal{Proposal: m} + return cm +} + +func (m *NewValidBlock) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_NewValidBlock{NewValidBlock: m} + return cm +} + +func (m *NewRoundStep) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_NewRoundStep{NewRoundStep: m} + return cm +} + +// Unwrap implements the p2p Wrapper interface and unwraps a wrapped consensus +// proto message. +func (m *Message) Unwrap() (proto.Message, error) { + switch msg := m.Sum.(type) { + case *Message_NewRoundStep: + return m.GetNewRoundStep(), nil + + case *Message_NewValidBlock: + return m.GetNewValidBlock(), nil + + case *Message_Proposal: + return m.GetProposal(), nil + + case *Message_ProposalPol: + return m.GetProposalPol(), nil + + case *Message_BlockPart: + return m.GetBlockPart(), nil + + case *Message_Vote: + return m.GetVote(), nil + + case *Message_HasVote: + return m.GetHasVote(), nil + + case *Message_VoteSetMaj23: + return m.GetVoteSetMaj23(), nil + + case *Message_VoteSetBits: + return m.GetVoteSetBits(), nil + + default: + return nil, fmt.Errorf("unknown message: %T", msg) + } +} diff --git a/proto/tendermint/mempool/message.go b/proto/tendermint/mempool/message.go new file mode 100644 index 000000000..341b62a03 --- /dev/null +++ b/proto/tendermint/mempool/message.go @@ -0,0 +1,30 @@ +package mempool + +import ( + "fmt" + + "github.com/cosmos/gogoproto/proto" + "github.com/tendermint/tendermint/p2p" +) + +var _ p2p.Wrapper = &Txs{} +var _ p2p.Unwrapper = &Message{} + +// Wrap implements the p2p Wrapper interface and wraps a mempool message. +func (m *Txs) Wrap() proto.Message { + mm := &Message{} + mm.Sum = &Message_Txs{Txs: m} + return mm +} + +// Unwrap implements the p2p Wrapper interface and unwraps a wrapped mempool +// message. +func (m *Message) Unwrap() (proto.Message, error) { + switch msg := m.Sum.(type) { + case *Message_Txs: + return m.GetTxs(), nil + + default: + return nil, fmt.Errorf("unknown message: %T", msg) + } +} diff --git a/proto/tendermint/p2p/pex.go b/proto/tendermint/p2p/pex.go new file mode 100644 index 000000000..6d369d4da --- /dev/null +++ b/proto/tendermint/p2p/pex.go @@ -0,0 +1,32 @@ +package p2p + +import ( + "fmt" + + "github.com/cosmos/gogoproto/proto" +) + +func (m *PexAddrs) Wrap() proto.Message { + pm := &Message{} + pm.Sum = &Message_PexAddrs{PexAddrs: m} + return pm +} + +func (m *PexRequest) Wrap() proto.Message { + pm := &Message{} + pm.Sum = &Message_PexRequest{PexRequest: m} + return pm +} + +// Unwrap implements the p2p Wrapper interface and unwraps a wrapped PEX +// message. +func (m *Message) Unwrap() (proto.Message, error) { + switch msg := m.Sum.(type) { + case *Message_PexRequest: + return msg.PexRequest, nil + case *Message_PexAddrs: + return msg.PexAddrs, nil + default: + return nil, fmt.Errorf("unknown pex message: %T", msg) + } +} diff --git a/proto/tendermint/statesync/message.go b/proto/tendermint/statesync/message.go new file mode 100644 index 000000000..f011b8ff6 --- /dev/null +++ b/proto/tendermint/statesync/message.go @@ -0,0 +1,58 @@ +package statesync + +import ( + "fmt" + + "github.com/cosmos/gogoproto/proto" + "github.com/tendermint/tendermint/p2p" +) + +var _ p2p.Wrapper = &ChunkRequest{} +var _ p2p.Wrapper = &ChunkResponse{} +var _ p2p.Wrapper = &SnapshotsRequest{} +var _ p2p.Wrapper = &SnapshotsResponse{} + +func (m *SnapshotsResponse) Wrap() proto.Message { + sm := &Message{} + sm.Sum = &Message_SnapshotsResponse{SnapshotsResponse: m} + return sm +} + +func (m *SnapshotsRequest) Wrap() proto.Message { + sm := &Message{} + sm.Sum = &Message_SnapshotsRequest{SnapshotsRequest: m} + return sm +} + +func (m *ChunkResponse) Wrap() proto.Message { + sm := &Message{} + sm.Sum = &Message_ChunkResponse{ChunkResponse: m} + return sm +} + +func (m *ChunkRequest) Wrap() proto.Message { + sm := &Message{} + sm.Sum = &Message_ChunkRequest{ChunkRequest: m} + return sm +} + +// Unwrap implements the p2p Wrapper interface and unwraps a wrapped state sync +// proto message. +func (m *Message) Unwrap() (proto.Message, error) { + switch msg := m.Sum.(type) { + case *Message_ChunkRequest: + return m.GetChunkRequest(), nil + + case *Message_ChunkResponse: + return m.GetChunkResponse(), nil + + case *Message_SnapshotsRequest: + return m.GetSnapshotsRequest(), nil + + case *Message_SnapshotsResponse: + return m.GetSnapshotsResponse(), nil + + default: + return nil, fmt.Errorf("unknown message: %T", msg) + } +} diff --git a/proto/tendermint/types/types.pb.go b/proto/tendermint/types/types.pb.go index 11edb847f..4f8346253 100644 --- a/proto/tendermint/types/types.pb.go +++ b/proto/tendermint/types/types.pb.go @@ -29,7 +29,7 @@ var _ = time.Kitchen // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -// BlockIdFlag indicates which BlcokID the signature is for +// BlockIdFlag indicates which BlockID the signature is for type BlockIDFlag int32 const ( diff --git a/proto/tendermint/types/types.proto b/proto/tendermint/types/types.proto index 8d4f00972..bf3bc1600 100644 --- a/proto/tendermint/types/types.proto +++ b/proto/tendermint/types/types.proto @@ -9,15 +9,15 @@ import "tendermint/crypto/proof.proto"; import "tendermint/version/types.proto"; import "tendermint/types/validator.proto"; -// BlockIdFlag indicates which BlcokID the signature is for +// BlockIdFlag indicates which BlockID the signature is for enum BlockIDFlag { option (gogoproto.goproto_enum_stringer) = true; option (gogoproto.goproto_enum_prefix) = false; - BLOCK_ID_FLAG_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "BlockIDFlagUnknown"]; - BLOCK_ID_FLAG_ABSENT = 1 [(gogoproto.enumvalue_customname) = "BlockIDFlagAbsent"]; - BLOCK_ID_FLAG_COMMIT = 2 [(gogoproto.enumvalue_customname) = "BlockIDFlagCommit"]; - BLOCK_ID_FLAG_NIL = 3 [(gogoproto.enumvalue_customname) = "BlockIDFlagNil"]; + BLOCK_ID_FLAG_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "BlockIDFlagUnknown"]; // indicates an error condition + BLOCK_ID_FLAG_ABSENT = 1 [(gogoproto.enumvalue_customname) = "BlockIDFlagAbsent"]; // the vote was not received + BLOCK_ID_FLAG_COMMIT = 2 [(gogoproto.enumvalue_customname) = "BlockIDFlagCommit"]; // voted for the block that received the majority + BLOCK_ID_FLAG_NIL = 3 [(gogoproto.enumvalue_customname) = "BlockIDFlagNil"]; // voted for nil } // SignedMsgType is a type of signed message in the consensus. diff --git a/proxy/client.go b/proxy/client.go index c0a634cb8..0a77659cc 100644 --- a/proxy/client.go +++ b/proxy/client.go @@ -39,6 +39,26 @@ func (l *localClientCreator) NewABCIClient() (abcicli.Client, error) { return abcicli.NewLocalClient(l.mtx, l.app), nil } +//--------------------------------------------------------------- +// unsynchronized local proxy on an in-proc app (no mutex) + +type unsyncLocalClientCreator struct { + app types.Application +} + +// NewUnsyncLocalClientCreator returns a ClientCreator for the given app, which +// will be running locally. Unlike NewLocalClientCreator, this leaves +// synchronization up to the application. +func NewUnsyncLocalClientCreator(app types.Application) ClientCreator { + return &unsyncLocalClientCreator{ + app: app, + } +} + +func (l *unsyncLocalClientCreator) NewABCIClient() (abcicli.Client, error) { + return abcicli.NewUnsyncLocalClient(l.app), nil +} + //--------------------------------------------------------------- // remote proxy opens new connections to an external app process @@ -83,6 +103,12 @@ func DefaultClientCreator(addr, transport, dbDir string) ClientCreator { panic(err) } return NewLocalClientCreator(app) + case "e2e_sync": + app, err := e2e.NewSyncApplication(e2e.DefaultConfig(dbDir)) + if err != nil { + panic(err) + } + return NewUnsyncLocalClientCreator(app) case "noop": return NewLocalClientCreator(types.NewBaseApplication()) default: diff --git a/rpc/core/routes.go b/rpc/core/routes.go index fe2d17e8b..cd658889f 100644 --- a/rpc/core/routes.go +++ b/rpc/core/routes.go @@ -17,23 +17,23 @@ var Routes = map[string]*rpc.RPCFunc{ "health": rpc.NewRPCFunc(Health, ""), "status": rpc.NewRPCFunc(Status, ""), "net_info": rpc.NewRPCFunc(NetInfo, ""), - "blockchain": rpc.NewRPCFunc(BlockchainInfo, "minHeight,maxHeight"), - "genesis": rpc.NewRPCFunc(Genesis, ""), - "genesis_chunked": rpc.NewRPCFunc(GenesisChunked, "chunk"), - "block": rpc.NewRPCFunc(Block, "height"), - "block_by_hash": rpc.NewRPCFunc(BlockByHash, "hash"), - "block_results": rpc.NewRPCFunc(BlockResults, "height"), - "commit": rpc.NewRPCFunc(Commit, "height"), - "header": rpc.NewRPCFunc(Header, "height"), - "header_by_hash": rpc.NewRPCFunc(HeaderByHash, "hash"), + "blockchain": rpc.NewRPCFunc(BlockchainInfo, "minHeight,maxHeight", rpc.Cacheable()), + "genesis": rpc.NewRPCFunc(Genesis, "", rpc.Cacheable()), + "genesis_chunked": rpc.NewRPCFunc(GenesisChunked, "chunk", rpc.Cacheable()), + "block": rpc.NewRPCFunc(Block, "height", rpc.Cacheable("height")), + "block_by_hash": rpc.NewRPCFunc(BlockByHash, "hash", rpc.Cacheable()), + "block_results": rpc.NewRPCFunc(BlockResults, "height", rpc.Cacheable("height")), + "commit": rpc.NewRPCFunc(Commit, "height", rpc.Cacheable("height")), + "header": rpc.NewRPCFunc(Header, "height", rpc.Cacheable("height")), + "header_by_hash": rpc.NewRPCFunc(HeaderByHash, "hash", rpc.Cacheable()), "check_tx": rpc.NewRPCFunc(CheckTx, "tx"), - "tx": rpc.NewRPCFunc(Tx, "hash,prove"), + "tx": rpc.NewRPCFunc(Tx, "hash,prove", rpc.Cacheable()), "tx_search": rpc.NewRPCFunc(TxSearch, "query,prove,page,per_page,order_by"), "block_search": rpc.NewRPCFunc(BlockSearch, "query,page,per_page,order_by"), - "validators": rpc.NewRPCFunc(Validators, "height,page,per_page"), + "validators": rpc.NewRPCFunc(Validators, "height,page,per_page", rpc.Cacheable("height")), "dump_consensus_state": rpc.NewRPCFunc(DumpConsensusState, ""), "consensus_state": rpc.NewRPCFunc(ConsensusState, ""), - "consensus_params": rpc.NewRPCFunc(ConsensusParams, "height"), + "consensus_params": rpc.NewRPCFunc(ConsensusParams, "height", rpc.Cacheable("height")), "unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxs, "limit"), "num_unconfirmed_txs": rpc.NewRPCFunc(NumUnconfirmedTxs, ""), @@ -44,7 +44,7 @@ var Routes = map[string]*rpc.RPCFunc{ // abci API "abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,prove"), - "abci_info": rpc.NewRPCFunc(ABCIInfo, ""), + "abci_info": rpc.NewRPCFunc(ABCIInfo, "", rpc.Cacheable()), // evidence API "broadcast_evidence": rpc.NewRPCFunc(BroadcastEvidence, "evidence"), diff --git a/rpc/jsonrpc/jsonrpc_test.go b/rpc/jsonrpc/jsonrpc_test.go index 84956bae9..c322dfcea 100644 --- a/rpc/jsonrpc/jsonrpc_test.go +++ b/rpc/jsonrpc/jsonrpc_test.go @@ -7,8 +7,10 @@ import ( "encoding/json" "fmt" "net/http" + "net/url" "os" "os/exec" + "strings" "testing" "time" @@ -37,9 +39,7 @@ const ( testVal = "acbd" ) -var ( - ctx = context.Background() -) +var ctx = context.Background() type ResultEcho struct { Value string `json:"value"` @@ -57,6 +57,10 @@ type ResultEchoDataBytes struct { Value tmbytes.HexBytes `json:"value"` } +type ResultEchoWithDefault struct { + Value int `json:"value"` +} + // Define some routes var Routes = map[string]*server.RPCFunc{ "echo": server.NewRPCFunc(EchoResult, "arg"), @@ -64,6 +68,7 @@ var Routes = map[string]*server.RPCFunc{ "echo_bytes": server.NewRPCFunc(EchoBytesResult, "arg"), "echo_data_bytes": server.NewRPCFunc(EchoDataBytesResult, "arg"), "echo_int": server.NewRPCFunc(EchoIntResult, "arg"), + "echo_default": server.NewRPCFunc(EchoWithDefault, "arg", server.Cacheable("arg")), } func EchoResult(ctx *types.Context, v string) (*ResultEcho, error) { @@ -86,6 +91,14 @@ func EchoDataBytesResult(ctx *types.Context, v tmbytes.HexBytes) (*ResultEchoDat return &ResultEchoDataBytes{v}, nil } +func EchoWithDefault(ctx *types.Context, v *int) (*ResultEchoWithDefault, error) { + val := -1 + if v != nil { + val = *v + } + return &ResultEchoWithDefault{val}, nil +} + func TestMain(m *testing.M) { setup() code := m.Run() @@ -199,26 +212,47 @@ func echoDataBytesViaHTTP(cl client.Caller, bytes tmbytes.HexBytes) (tmbytes.Hex return result.Value, nil } +func echoWithDefaultViaHTTP(cl client.Caller, v *int) (int, error) { + params := map[string]interface{}{} + if v != nil { + params["arg"] = *v + } + result := new(ResultEchoWithDefault) + if _, err := cl.Call(ctx, "echo_default", params, result); err != nil { + return 0, err + } + return result.Value, nil +} + func testWithHTTPClient(t *testing.T, cl client.HTTPClient) { val := testVal got, err := echoViaHTTP(cl, val) - require.Nil(t, err) + require.NoError(t, err) assert.Equal(t, got, val) val2 := randBytes(t) got2, err := echoBytesViaHTTP(cl, val2) - require.Nil(t, err) + require.NoError(t, err) assert.Equal(t, got2, val2) val3 := tmbytes.HexBytes(randBytes(t)) got3, err := echoDataBytesViaHTTP(cl, val3) - require.Nil(t, err) + require.NoError(t, err) assert.Equal(t, got3, val3) val4 := tmrand.Intn(10000) got4, err := echoIntViaHTTP(cl, val4) - require.Nil(t, err) + require.NoError(t, err) assert.Equal(t, got4, val4) + + got5, err := echoWithDefaultViaHTTP(cl, nil) + require.NoError(t, err) + assert.Equal(t, got5, -1) + + val6 := tmrand.Intn(10000) + got6, err := echoWithDefaultViaHTTP(cl, &val6) + require.NoError(t, err) + assert.Equal(t, got6, val6) } func echoViaWS(cl *client.WSClient, val string) (string, error) { @@ -233,7 +267,6 @@ func echoViaWS(cl *client.WSClient, val string) (string, error) { msg := <-cl.ResponsesCh if msg.Error != nil { return "", err - } result := new(ResultEcho) err = json.Unmarshal(msg.Result, result) @@ -255,7 +288,6 @@ func echoBytesViaWS(cl *client.WSClient, bytes []byte) ([]byte, error) { msg := <-cl.ResponsesCh if msg.Error != nil { return []byte{}, msg.Error - } result := new(ResultEchoBytes) err = json.Unmarshal(msg.Result, result) @@ -399,6 +431,74 @@ func TestWSClientPingPong(t *testing.T) { time.Sleep(6 * time.Second) } +func TestJSONRPCCaching(t *testing.T) { + httpAddr := strings.Replace(tcpAddr, "tcp://", "http://", 1) + cl, err := client.DefaultHTTPClient(httpAddr) + require.NoError(t, err) + + // Not supplying the arg should result in not caching + params := make(map[string]interface{}) + req, err := types.MapToRequest(types.JSONRPCIntID(1000), "echo_default", params) + require.NoError(t, err) + + res1, err := rawJSONRPCRequest(t, cl, httpAddr, req) + defer func() { _ = res1.Body.Close() }() + require.NoError(t, err) + assert.Equal(t, "", res1.Header.Get("Cache-control")) + + // Supplying the arg should result in caching + params["arg"] = tmrand.Intn(10000) + req, err = types.MapToRequest(types.JSONRPCIntID(1001), "echo_default", params) + require.NoError(t, err) + + res2, err := rawJSONRPCRequest(t, cl, httpAddr, req) + defer func() { _ = res2.Body.Close() }() + require.NoError(t, err) + assert.Equal(t, "public, max-age=86400", res2.Header.Get("Cache-control")) +} + +func rawJSONRPCRequest(t *testing.T, cl *http.Client, url string, req interface{}) (*http.Response, error) { + reqBytes, err := json.Marshal(req) + require.NoError(t, err) + + reqBuf := bytes.NewBuffer(reqBytes) + httpReq, err := http.NewRequest(http.MethodPost, url, reqBuf) + require.NoError(t, err) + + httpReq.Header.Set("Content-type", "application/json") + + return cl.Do(httpReq) +} + +func TestURICaching(t *testing.T) { + httpAddr := strings.Replace(tcpAddr, "tcp://", "http://", 1) + cl, err := client.DefaultHTTPClient(httpAddr) + require.NoError(t, err) + + // Not supplying the arg should result in not caching + args := url.Values{} + res1, err := rawURIRequest(t, cl, httpAddr+"/echo_default", args) + defer func() { _ = res1.Body.Close() }() + require.NoError(t, err) + assert.Equal(t, "", res1.Header.Get("Cache-control")) + + // Supplying the arg should result in caching + args.Set("arg", fmt.Sprintf("%d", tmrand.Intn(10000))) + res2, err := rawURIRequest(t, cl, httpAddr+"/echo_default", args) + defer func() { _ = res2.Body.Close() }() + require.NoError(t, err) + assert.Equal(t, "public, max-age=86400", res2.Header.Get("Cache-control")) +} + +func rawURIRequest(t *testing.T, cl *http.Client, url string, args url.Values) (*http.Response, error) { + req, err := http.NewRequest(http.MethodPost, url, strings.NewReader(args.Encode())) + require.NoError(t, err) + + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + return cl.Do(req) +} + func randBytes(t *testing.T) []byte { n := tmrand.Intn(10) + 2 buf := make([]byte, n) diff --git a/rpc/jsonrpc/server/http_json_handler.go b/rpc/jsonrpc/server/http_json_handler.go index c73694d6e..db162f17a 100644 --- a/rpc/jsonrpc/server/http_json_handler.go +++ b/rpc/jsonrpc/server/http_json_handler.go @@ -55,6 +55,11 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han requests = []types.RPCRequest{request} } + // Set the default response cache to true unless + // 1. Any RPC request error. + // 2. Any RPC request doesn't allow to be cached. + // 3. Any RPC request has the height argument and the value is 0 (the default). + cache := true for _, request := range requests { request := request @@ -72,11 +77,13 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han responses, types.RPCInvalidRequestError(request.ID, fmt.Errorf("path %s is invalid", r.URL.Path)), ) + cache = false continue } rpcFunc, ok := funcMap[request.Method] - if !ok || rpcFunc.ws { + if !ok || (rpcFunc.ws) { responses = append(responses, types.RPCMethodNotFoundError(request.ID)) + cache = false continue } ctx := &types.Context{JSONReq: &request, HTTPReq: r} @@ -88,11 +95,16 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han responses, types.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)), ) + cache = false continue } args = append(args, fnArgs...) } + if cache && !rpcFunc.cacheableWithArgs(args) { + cache = false + } + returns := rpcFunc.f.Call(args) result, err := unreflectResult(returns) if err != nil { @@ -103,7 +115,13 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han } if len(responses) > 0 { - if wErr := WriteRPCResponseHTTP(w, responses...); wErr != nil { + var wErr error + if cache { + wErr = WriteCacheableRPCResponseHTTP(w, responses...) + } else { + wErr = WriteRPCResponseHTTP(w, responses...) + } + if wErr != nil { logger.Error("failed to write responses", "res", responses, "err", wErr) } } @@ -128,7 +146,6 @@ func mapParamsToArgs( params map[string]json.RawMessage, argsOffset int, ) ([]reflect.Value, error) { - values := make([]reflect.Value, len(rpcFunc.argNames)) for i, argName := range rpcFunc.argNames { argType := rpcFunc.args[i+argsOffset] @@ -153,7 +170,6 @@ func arrayParamsToArgs( params []json.RawMessage, argsOffset int, ) ([]reflect.Value, error) { - if len(rpcFunc.argNames) != len(params) { return nil, fmt.Errorf("expected %v parameters (%v), got %v (%v)", len(rpcFunc.argNames), rpcFunc.argNames, len(params), params) diff --git a/rpc/jsonrpc/server/http_json_handler_test.go b/rpc/jsonrpc/server/http_json_handler_test.go index fbcb470c0..44caedd3e 100644 --- a/rpc/jsonrpc/server/http_json_handler_test.go +++ b/rpc/jsonrpc/server/http_json_handler_test.go @@ -18,7 +18,8 @@ import ( func testMux() *http.ServeMux { funcMap := map[string]*RPCFunc{ - "c": NewRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), + "c": NewRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), + "block": NewRPCFunc(func(ctx *types.Context, h int) (string, error) { return "block", nil }, "height", Cacheable("height")), } mux := http.NewServeMux() buf := new(bytes.Buffer) @@ -227,3 +228,52 @@ func TestUnknownRPCPath(t *testing.T) { require.Equal(t, http.StatusNotFound, res.StatusCode, "should always return 404") res.Body.Close() } + +func TestRPCResponseCache(t *testing.T) { + mux := testMux() + body := strings.NewReader(`{"jsonrpc": "2.0","method":"block","id": 0, "params": ["1"]}`) + req, _ := http.NewRequest("Get", "http://localhost/", body) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res := rec.Result() + + // Always expecting back a JSONRPCResponse + require.True(t, statusOK(res.StatusCode), "should always return 2XX") + require.Equal(t, "public, max-age=86400", res.Header.Get("Cache-control")) + + _, err := io.ReadAll(res.Body) + res.Body.Close() + require.Nil(t, err, "reading from the body should not give back an error") + + // send a request with default height. + body = strings.NewReader(`{"jsonrpc": "2.0","method":"block","id": 0, "params": ["0"]}`) + req, _ = http.NewRequest("Get", "http://localhost/", body) + rec = httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res = rec.Result() + + // Always expecting back a JSONRPCResponse + require.True(t, statusOK(res.StatusCode), "should always return 2XX") + require.Equal(t, "", res.Header.Get("Cache-control")) + + _, err = io.ReadAll(res.Body) + + res.Body.Close() + require.Nil(t, err, "reading from the body should not give back an error") + + // send a request with default height, but as empty set of parameters. + body = strings.NewReader(`{"jsonrpc": "2.0","method":"block","id": 0, "params": []}`) + req, _ = http.NewRequest("Get", "http://localhost/", body) + rec = httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res = rec.Result() + + // Always expecting back a JSONRPCResponse + require.True(t, statusOK(res.StatusCode), "should always return 2XX") + require.Equal(t, "", res.Header.Get("Cache-control")) + + _, err = io.ReadAll(res.Body) + + res.Body.Close() + require.Nil(t, err, "reading from the body should not give back an error") +} diff --git a/rpc/jsonrpc/server/http_server.go b/rpc/jsonrpc/server/http_server.go index 6eaa0ab93..617e1bbdc 100644 --- a/rpc/jsonrpc/server/http_server.go +++ b/rpc/jsonrpc/server/http_server.go @@ -117,6 +117,22 @@ func WriteRPCResponseHTTPError( // WriteRPCResponseHTTP marshals res as JSON (with indent) and writes it to w. func WriteRPCResponseHTTP(w http.ResponseWriter, res ...types.RPCResponse) error { + return writeRPCResponseHTTP(w, []httpHeader{}, res...) +} + +// WriteCacheableRPCResponseHTTP marshals res as JSON (with indent) and writes +// it to w. Adds cache-control to the response header and sets the expiry to +// one day. +func WriteCacheableRPCResponseHTTP(w http.ResponseWriter, res ...types.RPCResponse) error { + return writeRPCResponseHTTP(w, []httpHeader{{"Cache-Control", "public, max-age=86400"}}, res...) +} + +type httpHeader struct { + name string + value string +} + +func writeRPCResponseHTTP(w http.ResponseWriter, headers []httpHeader, res ...types.RPCResponse) error { var v interface{} if len(res) == 1 { v = res[0] @@ -129,6 +145,9 @@ func WriteRPCResponseHTTP(w http.ResponseWriter, res ...types.RPCResponse) error return fmt.Errorf("json marshal: %w", err) } w.Header().Set("Content-Type", "application/json") + for _, header := range headers { + w.Header().Set(header.name, header.value) + } w.WriteHeader(200) _, err = w.Write(jsonBytes) return err @@ -166,7 +185,6 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler // Without this, Chrome & Firefox were retrying aborted ajax requests, // at least to my localhost. if e := recover(); e != nil { - // If RPCResponse if res, ok := e.(types.RPCResponse); ok { if wErr := WriteRPCResponseHTTP(rww, res); wErr != nil { diff --git a/rpc/jsonrpc/server/http_server_test.go b/rpc/jsonrpc/server/http_server_test.go index 6e2024b8d..e1c499200 100644 --- a/rpc/jsonrpc/server/http_server_test.go +++ b/rpc/jsonrpc/server/http_server_test.go @@ -112,7 +112,7 @@ func TestWriteRPCResponseHTTP(t *testing.T) { // one argument w := httptest.NewRecorder() - err := WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(id, &sampleResult{"hello"})) + err := WriteCacheableRPCResponseHTTP(w, types.NewRPCSuccessResponse(id, &sampleResult{"hello"})) require.NoError(t, err) resp := w.Result() body, err := io.ReadAll(resp.Body) @@ -120,6 +120,7 @@ func TestWriteRPCResponseHTTP(t *testing.T) { require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) + assert.Equal(t, "public, max-age=86400", resp.Header.Get("Cache-control")) assert.Equal(t, `{ "jsonrpc": "2.0", "id": -1, diff --git a/rpc/jsonrpc/server/http_uri_handler.go b/rpc/jsonrpc/server/http_uri_handler.go index 6609cb837..e99a1b0ac 100644 --- a/rpc/jsonrpc/server/http_uri_handler.go +++ b/rpc/jsonrpc/server/http_uri_handler.go @@ -63,7 +63,14 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit } return } - if err := WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(dummyID, result)); err != nil { + + resp := types.NewRPCSuccessResponse(dummyID, result) + if rpcFunc.cacheableWithArgs(args) { + err = WriteCacheableRPCResponseHTTP(w, resp) + } else { + err = WriteRPCResponseHTTP(w, resp) + } + if err != nil { logger.Error("failed to write response", "res", result, "err", err) return } diff --git a/rpc/jsonrpc/server/rpc_func.go b/rpc/jsonrpc/server/rpc_func.go index e5855c314..8a5053666 100644 --- a/rpc/jsonrpc/server/rpc_func.go +++ b/rpc/jsonrpc/server/rpc_func.go @@ -23,40 +23,96 @@ func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, logger lo mux.HandleFunc("/", handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, logger))) } -// Function introspection +type Option func(*RPCFunc) + +// Cacheable enables returning a cache control header from RPC functions to +// which it is applied. +// +// `noCacheDefArgs` is a list of argument names that, if omitted or set to +// their defaults when calling the RPC function, will skip the response +// caching. +func Cacheable(noCacheDefArgs ...string) Option { + return func(r *RPCFunc) { + r.cacheable = true + r.noCacheDefArgs = make(map[string]interface{}) + for _, arg := range noCacheDefArgs { + r.noCacheDefArgs[arg] = nil + } + } +} + +// Ws enables WebSocket communication. +func Ws() Option { + return func(r *RPCFunc) { + r.ws = true + } +} // RPCFunc contains the introspected type information for a function type RPCFunc struct { - f reflect.Value // underlying rpc function - args []reflect.Type // type of each function arg - returns []reflect.Type // type of each return arg - argNames []string // name of each argument - ws bool // websocket only + f reflect.Value // underlying rpc function + args []reflect.Type // type of each function arg + returns []reflect.Type // type of each return arg + argNames []string // name of each argument + cacheable bool // enable cache control + ws bool // enable websocket communication + noCacheDefArgs map[string]interface{} // a lookup table of args that, if not supplied or are set to default values, cause us to not cache } // NewRPCFunc wraps a function for introspection. // f is the function, args are comma separated argument names -func NewRPCFunc(f interface{}, args string) *RPCFunc { - return newRPCFunc(f, args, false) +func NewRPCFunc(f interface{}, args string, options ...Option) *RPCFunc { + return newRPCFunc(f, args, options...) } // NewWSRPCFunc wraps a function for introspection and use in the websockets. -func NewWSRPCFunc(f interface{}, args string) *RPCFunc { - return newRPCFunc(f, args, true) +func NewWSRPCFunc(f interface{}, args string, options ...Option) *RPCFunc { + options = append(options, Ws()) + return newRPCFunc(f, args, options...) } -func newRPCFunc(f interface{}, args string, ws bool) *RPCFunc { +// cacheableWithArgs returns whether or not a call to this function is cacheable, +// given the specified arguments. +func (f *RPCFunc) cacheableWithArgs(args []reflect.Value) bool { + if !f.cacheable { + return false + } + // Skip the context variable common to all RPC functions + for i := 1; i < len(f.args); i++ { + // f.argNames does not include the context variable + argName := f.argNames[i-1] + if _, hasDefault := f.noCacheDefArgs[argName]; hasDefault { + // Argument with default value was not supplied + if i >= len(args) { + return false + } + // Argument with default value is set to its zero value + if args[i].IsZero() { + return false + } + } + } + return true +} + +func newRPCFunc(f interface{}, args string, options ...Option) *RPCFunc { var argNames []string if args != "" { argNames = strings.Split(args, ",") } - return &RPCFunc{ + + r := &RPCFunc{ f: reflect.ValueOf(f), args: funcArgTypes(f), returns: funcReturnTypes(f), argNames: argNames, - ws: ws, } + + for _, opt := range options { + opt(r) + } + + return r } // return a function's argument types diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index 4d5feea44..510cf39d1 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -216,6 +216,9 @@ paths: Please refer to https://docs.tendermint.com/main/tendermint-core/using-tendermint.html#formatting for formatting/encoding rules. + + Upon success, the `Cache-Control` header will be set with the default + maximum age. parameters: - in: query name: tx @@ -621,9 +624,12 @@ paths: tags: - Info description: | - Get block headers for minHeight <= height maxHeight. + Get block headers for minHeight <= height <= maxHeight. At most 20 items will be returned. + + Upon success, the `Cache-Control` header will be set with the default + maximum age. responses: "200": description: Block headers, returned in descending order (highest first). @@ -653,6 +659,9 @@ paths: - Info description: | Get Header. + + If the `height` field is set to a non-default value, upon success, the + `Cache-Control` header will be set with the default maximum age. responses: "200": description: Header informations. @@ -682,6 +691,9 @@ paths: - Info description: | Get Header By Hash. + + Upon success, the `Cache-Control` header will be set with the default + maximum age. responses: "200": description: Header informations. @@ -711,6 +723,9 @@ paths: - Info description: | Get Block. + + If the `height` field is set to a non-default value, upon success, the + `Cache-Control` header will be set with the default maximum age. responses: "200": description: Block informations. @@ -740,6 +755,9 @@ paths: - Info description: | Get Block By Hash. + + Upon success, the `Cache-Control` header will be set with the default + maximum age. responses: "200": description: Block informations. @@ -760,7 +778,7 @@ paths: parameters: - in: query name: height - description: height to return. If no height is provided, it will fetch informations regarding the latest block. + description: height to return. If no height is provided, it will fetch information regarding the latest block. schema: type: integer default: 0 @@ -769,6 +787,9 @@ paths: - Info description: | Get block_results. + + If the `height` field is set to a non-default value, upon success, the + `Cache-Control` header will be set with the default maximum age. responses: "200": description: Block results. @@ -798,6 +819,9 @@ paths: - Info description: | Get Commit. + + If the `height` field is set to a non-default value, upon success, the + `Cache-Control` header will be set with the default maximum age. responses: "200": description: | @@ -845,7 +869,11 @@ paths: tags: - Info description: | - Get Validators. Validators are sorted first by voting power (descending), then by address (ascending). + Get Validators. Validators are sorted first by voting power + (descending), then by address (ascending). + + If the `height` field is set to a non-default value, upon success, the + `Cache-Control` header will be set with the default maximum age. responses: "200": description: Commit results. @@ -867,6 +895,9 @@ paths: - Info description: | Get genesis. + + Upon success, the `Cache-Control` header will be set with the default + maximum age. responses: "200": description: Genesis results. @@ -945,6 +976,9 @@ paths: - Info description: | Get consensus parameters. + + If the `height` field is set to a non-default value, upon success, the + `Cache-Control` header will be set with the default maximum age. responses: "200": description: consensus parameters results. @@ -1135,14 +1169,14 @@ paths: parameters: - in: query name: hash - description: transaction Hash to retrive + description: hash of transaction to retrieve required: true schema: type: string example: "0xD70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED" - in: query name: prove - description: Include proofs of the transactions inclusion in the block + description: Include proofs of the transaction's inclusion in the block required: false schema: type: boolean @@ -1151,7 +1185,10 @@ paths: tags: - Info description: | - Get a trasasction + Get a transaction + + Upon success, the `Cache-Control` header will be set with the default + maximum age. responses: "200": description: Get a transaction` @@ -1167,12 +1204,15 @@ paths: $ref: "#/components/schemas/ErrorResponse" /abci_info: get: - summary: Get some info about the application. + summary: Get info about the application. operationId: abci_info tags: - ABCI description: | - Get some info about the application. + Get info about the application. + + Upon success, the `Cache-Control` header will be set with the default + maximum age. responses: "200": description: Get some info about the application. diff --git a/scripts/qa/reporting/README.md b/scripts/qa/reporting/README.md new file mode 100644 index 000000000..088332837 --- /dev/null +++ b/scripts/qa/reporting/README.md @@ -0,0 +1,48 @@ +# Reporting Scripts + +This directory contains just one utility script at present that is used in +reporting/QA. + +## Latency vs Throughput Plotting + +[`latency_throughput.py`](./latency_throughput.py) is a Python script that uses +[matplotlib] to plot a graph of transaction latency vs throughput rate based on +the CSV output generated by the [loadtime reporting +tool](../../../test/loadtime/cmd/report/). + +### Setup + +Execute the following within this directory (the same directory as the +`latency_throughput.py` file). + +```bash +# Create a virtual environment into which to install your dependencies +python3 -m venv .venv + +# Activate the virtual environment +source .venv/bin/activate + +# Install dependencies listed in requirements.txt +pip install -r requirements.txt + +# Show usage instructions and parameters +./latency_throughput.py --help +``` + +### Running + +```bash +# Do the following while ensuring that the virtual environment is activated (see +# the Setup steps). +# +# This will generate a plot in a PNG file called 'tm034.png' in the current +# directory based on the reporting tool CSV output in the "raw.csv" file. The +# '-t' flag overrides the default title at the top of the plot. + +./latency_throughput.py \ + -t 'Tendermint v0.34.x Latency vs Throughput' \ + ./tm034.png \ + /path/to/csv/files/raw.csv +``` + +[matplotlib]: https://matplotlib.org/ diff --git a/scripts/qa/reporting/latency_throughput.py b/scripts/qa/reporting/latency_throughput.py new file mode 100755 index 000000000..2cdab72ac --- /dev/null +++ b/scripts/qa/reporting/latency_throughput.py @@ -0,0 +1,170 @@ +#!/usr/bin/env python3 +""" +A simple script to parse the CSV output from the loadtime reporting tool (see +https://github.com/tendermint/tendermint/tree/main/test/loadtime/cmd/report). + +Produces a plot of average transaction latency vs total transaction throughput +according to the number of load testing tool WebSocket connections to the +Tendermint node. +""" + +import argparse +import csv +import logging +import sys +import matplotlib.pyplot as plt +import numpy as np + +DEFAULT_TITLE = "Tendermint latency vs throughput" + + +def main(): + parser = argparse.ArgumentParser( + description="Renders a latency vs throughput diagram " + "for a set of transactions provided by the loadtime reporting tool", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('-t', + '--title', + default=DEFAULT_TITLE, + help='Plot title') + parser.add_argument('output_image', + help='Output image file (in PNG format)') + parser.add_argument( + 'input_csv_file', + nargs='+', + help="CSV input file from which to read transaction data " + "- must have been generated by the loadtime reporting tool") + args = parser.parse_args() + + logging.basicConfig(format='%(levelname)s\t%(message)s', + stream=sys.stdout, + level=logging.INFO) + plot_latency_vs_throughput(args.input_csv_file, + args.output_image, + title=args.title) + + +def plot_latency_vs_throughput(input_files, output_image, title=DEFAULT_TITLE): + avg_latencies, throughput_rates = process_input_files(input_files, ) + + fig, ax = plt.subplots() + + connections = sorted(avg_latencies.keys()) + for c in connections: + tr = np.array(throughput_rates[c]) + al = np.array(avg_latencies[c]) + label = '%d connection%s' % (c, '' if c == 1 else 's') + ax.plot(tr, al, 'o-', label=label) + + ax.set_title(title) + ax.set_xlabel('Throughput rate (tx/s)') + ax.set_ylabel('Average transaction latency (s)') + + plt.legend(loc='upper left') + plt.savefig(output_image) + + +def process_input_files(input_files): + # Experimental data from which we will derive the latency vs throughput + # statistics + experiments = {} + + for input_file in input_files: + logging.info('Reading %s...' % input_file) + + with open(input_file, 'rt') as inf: + reader = csv.DictReader(inf) + for tx in reader: + experiments = process_tx(experiments, tx) + + return compute_experiments_stats(experiments) + + +def process_tx(experiments, tx): + exp_id = tx['experiment_id'] + # Block time is nanoseconds from the epoch - convert to seconds + block_time = float(tx['block_time']) / (10**9) + # Duration is also in nanoseconds - convert to seconds + duration = float(tx['duration_ns']) / (10**9) + connections = int(tx['connections']) + rate = int(tx['rate']) + + if exp_id not in experiments: + experiments[exp_id] = { + 'connections': connections, + 'rate': rate, + 'block_time_min': block_time, + # We keep track of the latency associated with the minimum block + # time to estimate the start time of the experiment + 'block_time_min_duration': duration, + 'block_time_max': block_time, + 'total_latencies': duration, + 'tx_count': 1, + } + logging.info('Found experiment %s with rate=%d, connections=%d' % + (exp_id, rate, connections)) + else: + # Validation + for field in ['connections', 'rate']: + val = int(tx[field]) + if val != experiments[exp_id][field]: + raise Exception( + 'Found multiple distinct values for field ' + '"%s" for the same experiment (%s): %d and %d' % + (field, exp_id, val, experiments[exp_id][field])) + + if block_time < experiments[exp_id]['block_time_min']: + experiments[exp_id]['block_time_min'] = block_time + experiments[exp_id]['block_time_min_duration'] = duration + if block_time > experiments[exp_id]['block_time_max']: + experiments[exp_id]['block_time_max'] = block_time + + experiments[exp_id]['total_latencies'] += duration + experiments[exp_id]['tx_count'] += 1 + + return experiments + + +def compute_experiments_stats(experiments): + """Compute average latency vs throughput rate statistics from the given + experiments""" + stats = {} + + # Compute average latency and throughput rate for each experiment + for exp_id, exp in experiments.items(): + conns = exp['connections'] + avg_latency = exp['total_latencies'] / exp['tx_count'] + exp_start_time = exp['block_time_min'] - exp['block_time_min_duration'] + exp_duration = exp['block_time_max'] - exp_start_time + throughput_rate = exp['tx_count'] / exp_duration + if conns not in stats: + stats[conns] = [] + + stats[conns].append({ + 'avg_latency': avg_latency, + 'throughput_rate': throughput_rate, + }) + + # Sort stats for each number of connections in order of increasing + # throughput rate, and then extract average latencies and throughput rates + # as separate data series. + conns = sorted(stats.keys()) + avg_latencies = {} + throughput_rates = {} + for c in conns: + stats[c] = sorted(stats[c], key=lambda s: s['throughput_rate']) + avg_latencies[c] = [] + throughput_rates[c] = [] + for s in stats[c]: + avg_latencies[c].append(s['avg_latency']) + throughput_rates[c].append(s['throughput_rate']) + logging.info('For %d connection(s): ' + 'throughput rate = %.6f tx/s\t' + 'average latency = %.6fs' % + (c, s['throughput_rate'], s['avg_latency'])) + + return (avg_latencies, throughput_rates) + + +if __name__ == "__main__": + main() diff --git a/scripts/qa/reporting/requirements.txt b/scripts/qa/reporting/requirements.txt new file mode 100644 index 000000000..6c6fb0097 --- /dev/null +++ b/scripts/qa/reporting/requirements.txt @@ -0,0 +1,11 @@ +contourpy==1.0.5 +cycler==0.11.0 +fonttools==4.37.4 +kiwisolver==1.4.4 +matplotlib==3.6.1 +numpy==1.23.4 +packaging==21.3 +Pillow==9.3.0 +pyparsing==3.0.9 +python-dateutil==2.8.2 +six==1.16.0 diff --git a/spec/abci/abci++_basic_concepts.md b/spec/abci/abci++_basic_concepts.md index 06b44b834..2ceb3fd23 100644 --- a/spec/abci/abci++_basic_concepts.md +++ b/spec/abci/abci++_basic_concepts.md @@ -82,7 +82,8 @@ call sequences of these methods. been locked at Tendermint level. Tendermint gathers outstanding transactions from the mempool, generates a block header, and uses them to create a block to propose. Then, it calls `RequestPrepareProposal` with the newly created proposal, called *raw proposal*. The Application - can make changes to the raw proposal, such as modifying transactions, and returns the + can make changes to the raw proposal, such as modifying the set of transactions or the order + in which they appear, and returns the (potentially) modified proposal, called *prepared proposal* in the `ResponsePrepareProposal` call. The logic modifying the raw proposal can be non-deterministic. @@ -109,9 +110,9 @@ call sequences of these methods. returned by `DeliverTx` are included in the header of the next block. - [**EndBlock**](./abci++_methods.md#endblock) It is executed once all transactions have been processed via - `DeliverTx` to inform the application that the block can now be committed and inform it of potential changes such - as a new validator set to be proposed in the next round. As with `DeliverTx`, cryptographic commitments of the responses returned - are included in the header of the next block. + `DeliverTx` to inform the application that no other transactions will be delivered as part of the current + block and to ask for changes of the validator set and consensus parameters to be used in the following block. + As with `DeliverTx`, cryptographic commitments of the responses returned are included in the header of the next block.