diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 30218e098..7993419e8 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,5 +7,4 @@ # global owners are only requested if there isn't a more specific # codeowner specified below. For this reason, the global codeowners # are often repeated in package-level definitions. -* @alexanderbez @ebuchman @cmwaters @tessr @tychoish @williambanfield - +* @alexanderbez @ebuchman @cmwaters @tessr @tychoish @williambanfield @creachadair diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 12dd504e3..7d312b4f8 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -121,7 +121,7 @@ jobs: - run: | cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt if: env.GIT_DIFF - - uses: codecov/codecov-action@v2.0.2 + - uses: codecov/codecov-action@v2.0.3 with: file: ./coverage.txt if: env.GIT_DIFF diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 009f16898..89797a581 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -50,7 +50,7 @@ jobs: password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Publish to Docker Hub - uses: docker/build-push-action@v2.6.1 + uses: docker/build-push-action@v2.7.0 with: context: . file: ./DOCKER/Dockerfile diff --git a/.github/workflows/proto-docker.yml b/.github/workflows/proto-docker.yml index 8dc612602..ed31025b9 100644 --- a/.github/workflows/proto-docker.yml +++ b/.github/workflows/proto-docker.yml @@ -43,7 +43,7 @@ jobs: password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Publish to Docker Hub - uses: docker/build-push-action@v2.6.1 + uses: docker/build-push-action@v2.7.0 with: context: ./tools/proto file: ./tools/proto/Dockerfile diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 771b8ab7c..dd18e750b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -2,7 +2,7 @@ name: "Release" on: push: - branches: + branches: - "RC[0-9]/**" tags: - "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10 @@ -20,9 +20,6 @@ jobs: with: go-version: '1.16' - - run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md - if: startsWith(github.ref, 'refs/tags/') - - name: Build uses: goreleaser/goreleaser-action@v2 if: ${{ github.event_name == 'pull_request' }} @@ -35,6 +32,6 @@ jobs: if: startsWith(github.ref, 'refs/tags/') with: version: latest - args: release --rm-dist --release-notes=../release_notes.md + args: release --rm-dist env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.golangci.yml b/.golangci.yml index f05cde90c..574ed22b0 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -26,7 +26,7 @@ linters: # - maligned - nakedret - prealloc - - scopelint + - exportloopref - staticcheck - structcheck - stylecheck diff --git a/CHANGELOG.md b/CHANGELOG.md index cbda6a678..df18653c5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,27 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint). +## v0.34.12 + +*August 17, 2021* + +Special thanks to external contributors on this release: @JayT106. + +### FEATURES + +- [rpc] [\#6717](https://github.com/tendermint/tendermint/pull/6717) introduce + `/genesis_chunked` rpc endpoint for handling large genesis files by chunking them (@tychoish) + +### IMPROVEMENTS + +- [rpc] [\#6825](https://github.com/tendermint/tendermint/issues/6825) Remove egregious INFO log from `ABCI#Query` RPC. (@alexanderbez) + +### BUG FIXES + +- [light] [\#6685](https://github.com/tendermint/tendermint/pull/6685) fix bug + with incorrectly handling contexts that would occasionally freeze state sync. (@cmwaters) +- [privval] [\#6748](https://github.com/tendermint/tendermint/issues/6748) Fix vote timestamp to prevent chain halt (@JayT106) + ## v0.34.11 *June 18, 2021* @@ -12,25 +33,25 @@ adding two new parameters to the state sync config. ### BREAKING CHANGES - Apps - - [Version] \#6494 `TMCoreSemVer` is not required to be set as a ldflag any longer. + - [Version] [\#6494](https://github.com/tendermint/tendermint/pull/6494) `TMCoreSemVer` is not required to be set as a ldflag any longer. ### IMPROVEMENTS -- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez) -- [statesync] \#6378 Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. (@tychoish) -- [statesync] \#6582 Increase chunk priority and add multiple retry chunk requests (@cmwaters) +- [statesync] [\#6566](https://github.com/tendermint/tendermint/pull/6566) Allow state sync fetchers and request timeout to be configurable. (@alexanderbez) +- [statesync] [\#6378](https://github.com/tendermint/tendermint/pull/6378) Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. (@tychoish) +- [statesync] [\#6582](https://github.com/tendermint/tendermint/pull/6582) Increase chunk priority and add multiple retry chunk requests (@cmwaters) ### BUG FIXES -- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (@cmwaters) +- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (@cmwaters) ## v0.34.10 *April 14, 2021* -This release fixes a bug where peers would sometimes try to send messages +This release fixes a bug where peers would sometimes try to send messages on incorrect channels. Special thanks to our friends at Oasis Labs for surfacing -this issue! +this issue! - [p2p/node] [\#6339](https://github.com/tendermint/tendermint/issues/6339) Fix bug with using custom channels (@cmwaters) - [light] [\#6346](https://github.com/tendermint/tendermint/issues/6346) Correctly handle too high errors to improve client robustness (@cmwaters) @@ -39,7 +60,7 @@ this issue! *April 8, 2021* -This release fixes a moderate severity security issue, Security Advisory Alderfly, +This release fixes a moderate severity security issue, Security Advisory Alderfly, which impacts all networks that rely on Tendermint light clients. Further details will be released once networks have upgraded. @@ -112,7 +133,7 @@ shout-out to @marbar3778 for diagnosing it quickly. ## v0.34.6 -*February 18, 2021* +*February 18, 2021* _Tendermint Core v0.34.5 and v0.34.6 have been recalled due to release tooling problems._ @@ -120,9 +141,9 @@ _Tendermint Core v0.34.5 and v0.34.6 have been recalled due to release tooling p *February 11, 2021* -This release includes a fix for a memory leak in the evidence reactor (see #6068, below). -All Tendermint clients are recommended to upgrade. -Thank you to our friends at Crypto.com for the initial report of this memory leak! +This release includes a fix for a memory leak in the evidence reactor (see #6068, below). +All Tendermint clients are recommended to upgrade. +Thank you to our friends at Crypto.com for the initial report of this memory leak! Special thanks to other external contributors on this release: @yayajacky, @odidev, @laniehei, and @c29r3! @@ -132,17 +153,17 @@ Special thanks to other external contributors on this release: @yayajacky, @odid - [light] [\#6026](https://github.com/tendermint/tendermint/pull/6026) Fix a bug when height isn't provided for the rpc calls: `/commit` and `/validators` (@cmwaters) - [evidence] [\#6068](https://github.com/tendermint/tendermint/pull/6068) Terminate broadcastEvidenceRoutine when peer is stopped (@melekes) -## v0.34.3 +## v0.34.3 *January 19, 2021* -This release includes a fix for a high-severity security vulnerability, +This release includes a fix for a high-severity security vulnerability, a DoS-vector that impacted Tendermint Core v0.34.0-v0.34.2. For more details, see -[Security Advisory Mulberry](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg) -or https://nvd.nist.gov/vuln/detail/CVE-2021-21271. +[Security Advisory Mulberry](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg) +or https://nvd.nist.gov/vuln/detail/CVE-2021-21271. Tendermint Core v0.34.3 also updates GoGo Protobuf to 1.3.2 in order to pick up the fix for -https://nvd.nist.gov/vuln/detail/CVE-2021-3121. +https://nvd.nist.gov/vuln/detail/CVE-2021-3121. ### BUG FIXES @@ -234,14 +255,14 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze, - [blockchain] [\#4637](https://github.com/tendermint/tendermint/pull/4637) Migrate blockchain reactor(s) to Protobuf encoding (@marbar3778) - [evidence] [\#4949](https://github.com/tendermint/tendermint/pull/4949) Migrate evidence reactor to Protobuf encoding (@marbar3778) - [mempool] [\#4940](https://github.com/tendermint/tendermint/pull/4940) Migrate mempool from to Protobuf encoding (@marbar3778) - - [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes) + - [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes) - `MaxBatchBytes` new config setting defines the max size of one batch. - [p2p/pex] [\#4973](https://github.com/tendermint/tendermint/pull/4973) Migrate `p2p/pex` reactor to Protobuf encoding (@marbar3778) - [statesync] [\#4943](https://github.com/tendermint/tendermint/pull/4943) Migrate state sync reactor to Protobuf encoding (@marbar3778) - Blockchain Protocol - - [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778) + - [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778) - [evidence] [\#5499](https://github.com/tendermint/tendermint/pull/5449) Cap evidence to a maximum number of bytes (supercedes [\#4780](https://github.com/tendermint/tendermint/pull/4780)) (@cmwaters) - [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) Header hashes are no longer empty for empty inputs, notably `DataHash`, `EvidenceHash`, and `LastResultsHash` (@erikgrinaker) - [state] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Include `GasWanted` and `GasUsed` into `LastResultsHash` (@melekes) @@ -300,7 +321,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze, - [types] [\#4852](https://github.com/tendermint/tendermint/pull/4852) Vote & Proposal `SignBytes` is now func `VoteSignBytes` & `ProposalSignBytes` (@marbar3778) - [types] [\#4798](https://github.com/tendermint/tendermint/pull/4798) Simplify `VerifyCommitTrusting` func + remove extra validation (@melekes) - [types] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Remove `ABCIResult` (@melekes) - - [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778) + - [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778) - [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) `Total` in `Parts` & `PartSetHeader` has been changed from a `int` to a `uint32` (@marbar3778) - [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Vote: `ValidatorIndex` & `Round` are now `int32` (@marbar3778) - [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Proposal: `POLRound` & `Round` are now `int32` (@marbar3778) @@ -338,7 +359,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze, - [evidence] [\#4722](https://github.com/tendermint/tendermint/pull/4722) Consolidate evidence store and pool types to improve evidence DB (@cmwaters) - [evidence] [\#4839](https://github.com/tendermint/tendermint/pull/4839) Reject duplicate evidence from being proposed (@cmwaters) - [evidence] [\#5219](https://github.com/tendermint/tendermint/pull/5219) Change the source of evidence time to block time (@cmwaters) -- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778) +- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778) - [light] [\#4935](https://github.com/tendermint/tendermint/pull/4935) Fetch and compare a new header with witnesses in parallel (@melekes) - [light] [\#4929](https://github.com/tendermint/tendermint/pull/4929) Compare header with witnesses only when doing bisection (@melekes) - [light] [\#4916](https://github.com/tendermint/tendermint/pull/4916) Validate basic for inbound validator sets and headers before further processing them (@cmwaters) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 9de5b8bcb..6c25ef89e 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -25,7 +25,9 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi - [rpc/grpc] \#6725 Mark gRPC in the RPC layer as deprecated. - [blockchain/v2] \#6730 Fast Sync v2 is deprecated, please use v0 - [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents. - + - [rpc/jsonrpc/server] \#6785 `Listen` function updated to take an `int` argument, `maxOpenConnections`, instead of an entire config object. (@williambanfield) + - [rpc] \#6820 Update RPC methods to reflect changes in the p2p layer, disabling support for `UnsafeDialPeers` and `UnsafeDialPeers` when used with the new p2p layer, and changing the response format of the peer list in `NetInfo` for all users. + - [cli] \#6854 Remove deprecated snake case commands. (@tychoish) - Apps - [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez) - [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface @@ -33,7 +35,7 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi - [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters. - [ABCI] \#3546 Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield) - [Version] \#6494 `TMCoreSemVer` has been renamed to `TMVersion`. - - It is not required any longer to set ldflags to set version strings + - It is not required any longer to set ldflags to set version strings - [abci/counter] \#6684 Delete counter example app - P2P Protocol @@ -56,25 +58,25 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi - [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters) - [state] \#5864 Use an iterator when pruning state (@cmwaters) - [types] \#6023 Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`. - - Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic. + - Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic. - [light] \#6054 Move `MaxRetryAttempt` option from client to provider. - - `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters) + - `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters) - [all] \#6077 Change spelling from British English to American (@cmwaters) - - Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub - - Rename "behaviour" pkg to "behavior" and internalized it in blockchain v2 + - Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub + - Rename "behaviour" pkg to "behavior" and internalized it in blockchain v2 - [rpc/client/http] \#6176 Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes) - [rpc/client/http] \#6176 Unexpose `WSEvents` (@melekes) - [rpc/jsonrpc/client/ws_client] \#6176 `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes) - [internal/libs] \#6366 Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder - [libs/rand] \#6364 Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi) - [mempool] \#6466 The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package. - Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself - and `TxInfo`. (@alexanderbez) + Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself + and `TxInfo`. (@alexanderbez) - [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning) - - [types] \#6627 Move `NodeKey` to types to make the type public. + - [types] \#6627 Move `NodeKey` to types to make the type public. - [config] \#6627 Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID` - [blocksync] \#6755 Rename `FastSync` and `Blockchain` package to `BlockSync` - (@cmwaters) + (@cmwaters) - Blockchain Protocol @@ -105,8 +107,10 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi - [config/indexer] \#6411 Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106) - [fastsync/event] \#6619 Emit fastsync status event when switching consensus/fastsync (@JayT106) - [statesync/event] \#6700 Emit statesync status start/end event (@JayT106) +- [inspect] \#6785 Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield) ### IMPROVEMENTS + - [libs/log] Console log formatting changes as a result of \#6534 and \#6589. (@tychoish) - [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez) - [types] \#6478 Add `block_id` to `newblock` event (@jeebster) @@ -154,8 +158,7 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi - [blockchain/v1] [\#5701](https://github.com/tendermint/tendermint/pull/5701) Handle peers without blocks (@melekes) - [blockchain/v1] \#5711 Fix deadlock (@melekes) - [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters) -- [rpc] \#6507 fix RPC client doesn't handle url's without ports (@JayT106) +- [rpc] \#6507 Ensure RPC client can handle URLs without ports (@JayT106) - [statesync] \#6463 Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters) - [fastsync] \#6590 Update the metrics during fast-sync (@JayT106) - [gitignore] \#6668 Fix gitignore of abci-cli (@tanyabouman) -- [light] \#6687 Fix bug with incorrecly handled contexts in the light client (@cmwaters) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a8bc8aa8f..23bfafcdf 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -227,16 +227,96 @@ Fixes #nnnn Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though! -### Release Procedure +### Release procedure -#### Major Release +#### A note about backport branches +Tendermint's `master` branch is under active development. +Releases are specified using tags and are built from long-lived "backport" branches. +Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, +and the backport branches have names like `v0.34.x` or `v0.33.x` +(literally, `x`; it is not a placeholder in this case). + +As non-breaking changes land on `master`, they should also be backported (cherry-picked) +to these backport branches. + +We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport +to the needed branch. There should be a label for any backport branch that you'll be targeting. +To notify the bot to backport a pull request, mark the pull request with +the label `S:backport-to-`. +Once the original pull request is merged, the bot will try to cherry-pick the pull request +to the backport branch. If the bot fails to backport, it will open a pull request. +The author of the original pull request is responsible for solving the conflicts and +merging the pull request. + +#### Creating a backport branch +If this is the first release candidate for a major release, you get to have the honor of creating +the backport branch! + +Note that, after creating the backport branch, you'll also need to update the tags on `master` +so that `go mod` is able to order the branches correctly. You should tag `master` with a "dev" tag +that is "greater than" the backport branches tags. See #6072 for more context. + +In the following example, we'll assume that we're making a backport branch for +the 0.35.x line. + +1. Start on `master` +2. Create the backport branch: + `git checkout -b v0.35.x` +3. Go back to master and tag it as the dev branch for the _next_ major release and push it back up: + `git tag -a v0.36.0-dev; git push v0.36.0-dev` +4. Create a new workflow to run the e2e nightlies for this backport branch. + (See https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-34x.yml + for an example.) + +#### Release candidates + +Before creating an official release, especially a major release, we may want to create a +release candidate (RC) for our friends and partners to test out. We use git tags to +create RCs, and we build them off of backport branches. + +Tags for RCs should follow the "standard" release naming conventions, with `-rcX` at the end +(for example, `v0.35.0-rc0`). + +(Note that branches and tags _cannot_ have the same names, so it's important that these branches +have distinct names from the tags/release names.) + +If this is the first RC for a major release, you'll have to make a new backport branch (see above). +Otherwise: + +1. Start from the backport branch (e.g. `v0.35.x`). +1. Run the integration tests and the e2e nightlies + (which can be triggered from the Github UI; + e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml). +1. Prepare the changelog: + - Move the changes included in `CHANGELOG_PENDING.md` into `CHANGELOG.md`. + - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for + all PRs + - Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes + or other upgrading flows. + - Bump TMVersionDefault version in `version.go` + - Bump P2P and block protocol versions in `version.go`, if necessary + - Bump ABCI protocol version in `version.go`, if necessary +1. Open a PR with these changes against the backport branch. +1. Once these changes have landed on the backport branch, be sure to pull them back down locally. +2. Once you have the changes locally, create the new tag, specifying a name and a tag "message": + `git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0` +3. Push the tag back up to origin: + `git push origin v0.35.0-rc0` + Now the tag should be available on the repo's releases page. +4. Future RCs will continue to be built off of this branch. + +Note that this process should only be used for "true" RCs-- +release candidates that, if successful, will be the next release. +For more experimental "RCs," create a new, short-lived branch and tag that instead. + +#### Major release This major release process assumes that this release was preceded by release candidates. -If there were no release candidates, and you'd like to cut a major release directly from master, see below. +If there were no release candidates, begin by creating a backport branch, as described above. -1. Start on the latest RC branch (`RCx/vX.X.0`). -2. Run integration tests. -3. Branch off of the RC branch (`git checkout -b release-prep`) and prepare the release: +1. Start on the backport branch (e.g. `v0.35.x`) +2. Run integration tests and the e2e nightlies. +3. Prepare the release: - "Squash" changes from the changelog entries for the RCs into a single entry, and add all changes included in `CHANGELOG_PENDING.md`. (Squashing includes both combining all entries, as well as removing or simplifying @@ -248,58 +328,24 @@ If there were no release candidates, and you'd like to cut a major release direc - Bump TMVersionDefault version in `version.go` - Bump P2P and block protocol versions in `version.go`, if necessary - Bump ABCI protocol version in `version.go`, if necessary - - Add any release notes you would like to be added to the body of the release to `release_notes.md`. -4. Open a PR with these changes against the RC branch (`RCx/vX.X.0`). -5. Once these changes are on the RC branch, branch off of the RC branch again to create a release branch: - - `git checkout RCx/vX.X.0` - - `git checkout -b release/vX.X.0` -6. Push a tag with prepared release details. This will trigger the actual release `vX.X.0`. - - `git tag -a vX.X.0 -m 'Release vX.X.0'` - - `git push origin vX.X.0` +4. Open a PR with these changes against the backport branch. +5. Once these changes are on the backport branch, push a tag with prepared release details. + This will trigger the actual release `v0.35.0`. + - `git tag -a v0.35.0 -m 'Release v0.35.0'` + - `git push origin v0.35.0` 7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`. -8. Create the long-lived minor release branch `RC0/vX.X.1` for the next point release on this - new major release series. -##### Major Release (from `master`) - -1. Start on `master` -2. Run integration tests (see `test_integrations` in Makefile) -3. Prepare release in a pull request against `master` (to be squash merged): - - Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`; if this release - had release candidates, squash all the RC updates into one - - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for - all issues - - Run `bash ./scripts/authors.sh` to get a list of authors since the latest - release, and add the github aliases of external contributors to the top of - the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh ` - - Reset the `CHANGELOG_PENDING.md` - - Bump TMVersionDefault version in `version.go` - - Bump P2P and block protocol versions in `version.go`, if necessary - - Bump ABCI protocol version in `version.go`, if necessary - - Make sure all significant breaking changes are covered in `UPGRADING.md` - - Add any release notes you would like to be added to the body of the release to `release_notes.md`. -4. Push a tag with prepared release details (this will trigger the release `vX.X.0`) - - `git tag -a vX.X.x -m 'Release vX.X.x'` - - `git push origin vX.X.x` -5. Update the `CHANGELOG.md` file on master with the releases changelog. -6. Delete any RC branches and tags for this release (if applicable) - -#### Minor Release (Point Releases) +#### Minor release (point releases) Minor releases are done differently from major releases: They are built off of long-lived backport branches, rather than from master. -Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, and -the backport branches have names like `v0.34.x` or `v0.33.x` (literally, `x`; it is not a placeholder in this case). - As non-breaking changes land on `master`, they should also be backported (cherry-picked) to these backport branches. -We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport to the needed branch. Depending on which backport branch you need to backport to there will be labels for them. To notify the bot to backport a pull request, mark the pull request with the label `backport-to-`. Once the original pull request is merged, the bot will try to cherry-pick the pull request to the backport branch. If the bot fails to backport, it will open a pull request. The author of the original pull request is responsible for solving the conflicts and merging the pull request. - Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate. To create a minor release: -1. Checkout the long-lived backport branch: `git checkout vX.X.x` -2. Run integration tests: `make test_integrations` +1. Checkout the long-lived backport branch: `git checkout v0.35.x` +2. Run integration tests (`make test_integrations`) and the nightlies. 3. Check out a new branch and prepare the release: - Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md` - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues @@ -308,35 +354,14 @@ To create a minor release: - Bump the ABCI version number, if necessary. (Note that ABCI follows semver, and that ABCI versions are the only versions which can change during minor releases, and only field additions are valid minor changes.) - - Add any release notes you would like to be added to the body of the release to `release_notes.md`. -4. Open a PR with these changes that will land them back on `vX.X.x` +4. Open a PR with these changes that will land them back on `v0.35.x` 5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag. - - `git tag -a vX.X.x -m 'Release vX.X.x'` - - `git push origin vX.X.x` + - `git tag -a v0.35.1 -m 'Release v0.35.1'` + - `git push origin v0.35.1` 6. Create a pull request back to master with the CHANGELOG & version changes from the latest release. - Remove all `R:minor` labels from the pull requests that were included in the release. - Do not merge the backport branch into master. -#### Release Candidates - -Before creating an official release, especially a major release, we may want to create a -release candidate (RC) for our friends and partners to test out. We use git tags to -create RCs, and we build them off of RC branches. RC branches typically have names formatted -like `RCX/vX.X.X` (or, concretely, `RC0/v0.34.0`), while the tags themselves follow -the "standard" release naming conventions, with `-rcX` at the end (`vX.X.X-rcX`). - -(Note that branches and tags _cannot_ have the same names, so it's important that these branches -have distinct names from the tags/release names.) - -1. Start from the RC branch (e.g. `RC0/v0.34.0`). -2. Create the new tag, specifying a name and a tag "message": - `git tag -a v0.34.0-rc0 -m "Release Candidate v0.34.0-rc0` -3. Push the tag back up to origin: - `git push origin v0.34.0-rc4` - Now the tag should be available on the repo's releases page. -4. Create a new release candidate branch for any possible updates to the RC: - `git checkout -b RC1/v0.34.0; git push origin RC1/v0.34.0` - ## Testing ### Unit tests diff --git a/UPGRADING.md b/UPGRADING.md index ef0a29fb4..9d1a426ea 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -24,10 +24,10 @@ This guide provides instructions for upgrading to specific versions of Tendermin * Added `--mode` flag and `mode` config variable on `config.toml` for setting Mode of the Node: `full` | `validator` | `seed` (default: `full`) [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) - + * `BootstrapPeers` has been added as part of the new p2p stack. This will eventually replace `Seeds`. Bootstrap peers are connected with on startup if needed for peer discovery. Unlike - persistent peers, there's no guarantee that the node will remain connected with these peers. + persistent peers, there's no gaurantee that the node will remain connected with these peers. * configuration values starting with `priv-validator-` have moved to the new `priv-validator` section, without the `priv-validator-` prefix. @@ -35,6 +35,27 @@ This guide provides instructions for upgrading to specific versions of Tendermin * The fast sync process as well as the blockchain package and service has all been renamed to block sync +### Key Format Changes + +The format of all tendermint on-disk database keys changes in +0.35. Upgrading nodes must either re-sync all data or run a migration +script provided in this release. The script located in +`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go` +provides the function `Migrate(context.Context, db.DB)` which you can +operationalize as makes sense for your deployment. + +For ease of use the `tendermint` command includes a CLI version of the +migration script, which you can invoke, as in: + + tendermint key-migrate + +This reads the configuration file as normal and allows the +`--db-backend` and `--db-dir` flags to change database operations as +needed. + +The migration operation is idempotent and can be run more than once, +if needed. + ### CLI Changes * You must now specify the node mode (validator|full|seed) in `tendermint init [mode]` @@ -66,7 +87,7 @@ are: - `blockchain` - `evidence` -Accordingly, the space `node` package was changed to reduce access to +Accordingly, the `node` package was changed to reduce access to tendermint internals: applications that use tendermint as a library will need to change to accommodate these changes. Most notably: @@ -79,8 +100,32 @@ will need to change to accommodate these changes. Most notably: ### RPC changes +#### gRPC Support + Mark gRPC in the RPC layer as deprecated and to be removed in 0.36. +#### Peer Management Interface + +When running with the new P2P Layer, the methods `UnsafeDialSeeds` and +`UnsafeDialPeers` RPC methods will always return an error. They are +deprecated and will be removed in 0.36 when the legacy peer stack is +removed. + +Additionally the format of the Peer list returned in the `NetInfo` +method changes in this release to accommodate the different way that +the new stack tracks data about peers. This change affects users of +both stacks. + +### Support for Custom Reactor and Mempool Implementations + +The changes to p2p layer removed existing support for custom +reactors. Based on our understanding of how this functionality was +used, the introduction of the prioritized mempool covers nearly all of +the use cases for custom reactors. If you are currently running custom +reactors and mempools and are having trouble seeing the migration path +for your project please feel free to reach out to the Tendermint Core +development team directly. + ## v0.34.0 **Upgrading to Tendermint 0.34 requires a blockchain restart.** @@ -234,8 +279,8 @@ Other user-relevant changes include: * The old `lite` package was removed; the new light client uses the `light` package. * The `Verifier` was broken up into two pieces: - * Core verification logic (pure `VerifyX` functions) - * `Client` object, which represents the complete light client + * Core verification logic (pure `VerifyX` functions) + * `Client` object, which represents the complete light client * The new light clients stores headers & validator sets as `LightBlock`s * The RPC client can be found in the `/rpc` directory. * The HTTP(S) proxy is located in the `/proxy` directory. @@ -367,12 +412,12 @@ Evidence Params has been changed to include duration. ### Go API * `libs/common` has been removed in favor of specific pkgs. - * `async` - * `service` - * `rand` - * `net` - * `strings` - * `cmap` + * `async` + * `service` + * `rand` + * `net` + * `strings` + * `cmap` * removal of `errors` pkg ### RPC Changes @@ -441,9 +486,9 @@ Prior to the update, suppose your `ResponseDeliverTx` look like: ```go abci.ResponseDeliverTx{ Tags: []kv.Pair{ - {Key: []byte("sender"), Value: []byte("foo")}, - {Key: []byte("recipient"), Value: []byte("bar")}, - {Key: []byte("amount"), Value: []byte("35")}, + {Key: []byte("sender"), Value: []byte("foo")}, + {Key: []byte("recipient"), Value: []byte("bar")}, + {Key: []byte("amount"), Value: []byte("35")}, } } ``` @@ -462,14 +507,14 @@ the following `Events`: ```go abci.ResponseDeliverTx{ Events: []abci.Event{ - { - Type: "transfer", - Attributes: kv.Pairs{ - {Key: []byte("sender"), Value: []byte("foo")}, - {Key: []byte("recipient"), Value: []byte("bar")}, - {Key: []byte("amount"), Value: []byte("35")}, - }, - } + { + Type: "transfer", + Attributes: kv.Pairs{ + {Key: []byte("sender"), Value: []byte("foo")}, + {Key: []byte("recipient"), Value: []byte("bar")}, + {Key: []byte("amount"), Value: []byte("35")}, + }, + } } ``` @@ -517,9 +562,9 @@ In this case, the WS client will receive an error with description: "jsonrpc": "2.0", "id": "{ID}#event", "error": { - "code": -32000, - "msg": "Server error", - "data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)" + "code": -32000, + "msg": "Server error", + "data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)" } } @@ -725,9 +770,9 @@ just the `Data` field set: ```go []ProofOp{ - ProofOp{ - Data: , - } + ProofOp{ + Data: , + } } ``` diff --git a/cmd/tendermint/commands/gen_node_key.go b/cmd/tendermint/commands/gen_node_key.go index f796f4b7f..d8b493e3c 100644 --- a/cmd/tendermint/commands/gen_node_key.go +++ b/cmd/tendermint/commands/gen_node_key.go @@ -12,11 +12,9 @@ import ( // GenNodeKeyCmd allows the generation of a node key. It prints JSON-encoded // NodeKey to the standard output. var GenNodeKeyCmd = &cobra.Command{ - Use: "gen-node-key", - Aliases: []string{"gen_node_key"}, - Short: "Generate a new node key", - RunE: genNodeKey, - PreRun: deprecateSnakeCase, + Use: "gen-node-key", + Short: "Generate a new node key", + RunE: genNodeKey, } func genNodeKey(cmd *cobra.Command, args []string) error { diff --git a/cmd/tendermint/commands/gen_validator.go b/cmd/tendermint/commands/gen_validator.go index 09f84b09e..830518ce9 100644 --- a/cmd/tendermint/commands/gen_validator.go +++ b/cmd/tendermint/commands/gen_validator.go @@ -13,11 +13,9 @@ import ( // GenValidatorCmd allows the generation of a keypair for a // validator. var GenValidatorCmd = &cobra.Command{ - Use: "gen-validator", - Aliases: []string{"gen_validator"}, - Short: "Generate new validator keypair", - RunE: genValidator, - PreRun: deprecateSnakeCase, + Use: "gen-validator", + Short: "Generate new validator keypair", + RunE: genValidator, } func init() { diff --git a/cmd/tendermint/commands/inspect.go b/cmd/tendermint/commands/inspect.go new file mode 100644 index 000000000..de31d33d4 --- /dev/null +++ b/cmd/tendermint/commands/inspect.go @@ -0,0 +1,87 @@ +package commands + +import ( + "context" + "os" + "os/signal" + "syscall" + + "github.com/spf13/cobra" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/inspect" + "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/state/indexer/sink" + "github.com/tendermint/tendermint/store" + "github.com/tendermint/tendermint/types" +) + +// InspectCmd is the command for starting an inspect server. +var InspectCmd = &cobra.Command{ + Use: "inspect", + Short: "Run an inspect server for investigating Tendermint state", + Long: ` + inspect runs a subset of Tendermint's RPC endpoints that are useful for debugging + issues with Tendermint. + + When the Tendermint consensus engine detects inconsistent state, it will crash the + tendermint process. Tendermint will not start up while in this inconsistent state. + The inspect command can be used to query the block and state store using Tendermint + RPC calls to debug issues of inconsistent state. + `, + + RunE: runInspect, +} + +func init() { + InspectCmd.Flags(). + String("rpc.laddr", + config.RPC.ListenAddress, "RPC listenener address. Port required") + InspectCmd.Flags(). + String("db-backend", + config.DBBackend, "database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb") + InspectCmd.Flags(). + String("db-dir", config.DBPath, "database directory") +} + +func runInspect(cmd *cobra.Command, args []string) error { + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() + + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGTERM, syscall.SIGINT) + go func() { + <-c + cancel() + }() + + blockStoreDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "blockstore", Config: config}) + if err != nil { + return err + } + blockStore := store.NewBlockStore(blockStoreDB) + stateDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "state", Config: config}) + if err != nil { + if err := blockStoreDB.Close(); err != nil { + logger.Error("error closing block store db", "error", err) + } + return err + } + genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) + if err != nil { + return err + } + sinks, err := sink.EventSinksFromConfig(config, cfg.DefaultDBProvider, genDoc.ChainID) + if err != nil { + return err + } + stateStore := state.NewStore(stateDB) + + ins := inspect.New(config.RPC, blockStore, stateStore, sinks, logger) + + logger.Info("starting inspect server") + if err := ins.Run(ctx); err != nil { + return err + } + return nil +} diff --git a/cmd/tendermint/commands/key_migrate.go b/cmd/tendermint/commands/key_migrate.go new file mode 100644 index 000000000..739af4a7d --- /dev/null +++ b/cmd/tendermint/commands/key_migrate.go @@ -0,0 +1,64 @@ +package commands + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/scripts/keymigrate" +) + +func MakeKeyMigrateCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "key-migrate", + Short: "Run Database key migration", + RunE: func(cmd *cobra.Command, args []string) error { + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() + + contexts := []string{ + // this is ordered to put the + // (presumably) biggest/most important + // subsets first. + "blockstore", + "state", + "peerstore", + "tx_index", + "evidence", + "light", + } + + for idx, dbctx := range contexts { + logger.Info("beginning a key migration", + "dbctx", dbctx, + "num", idx+1, + "total", len(contexts), + ) + + db, err := cfg.DefaultDBProvider(&cfg.DBContext{ + ID: dbctx, + Config: config, + }) + + if err != nil { + return fmt.Errorf("constructing database handle: %w", err) + } + + if err = keymigrate.Migrate(ctx, db); err != nil { + return fmt.Errorf("running migration for context %q: %w", + dbctx, err) + } + } + + logger.Info("completed database migration successfully") + + return nil + }, + } + + // allow database info to be overridden via cli + addDBFlags(cmd) + + return cmd +} diff --git a/cmd/tendermint/commands/probe_upnp.go b/cmd/tendermint/commands/probe_upnp.go index 4471024f9..4c71e099a 100644 --- a/cmd/tendermint/commands/probe_upnp.go +++ b/cmd/tendermint/commands/probe_upnp.go @@ -11,11 +11,9 @@ import ( // ProbeUpnpCmd adds capabilities to test the UPnP functionality. var ProbeUpnpCmd = &cobra.Command{ - Use: "probe-upnp", - Aliases: []string{"probe_upnp"}, - Short: "Test UPnP functionality", - RunE: probeUpnp, - PreRun: deprecateSnakeCase, + Use: "probe-upnp", + Short: "Test UPnP functionality", + RunE: probeUpnp, } func probeUpnp(cmd *cobra.Command, args []string) error { diff --git a/cmd/tendermint/commands/reindex_event.go b/cmd/tendermint/commands/reindex_event.go index ddc585c1f..1dbce2f74 100644 --- a/cmd/tendermint/commands/reindex_event.go +++ b/cmd/tendermint/commands/reindex_event.go @@ -31,7 +31,7 @@ var ReIndexEventCmd = &cobra.Command{ Long: ` reindex-event is an offline tooling to re-index block and tx events to the eventsinks, you can run this command when the event store backend dropped/disconnected or you want to replace the backend. - The default start-height is 0, meaning the tooling will start reindex from the base block height(inclusive); and the + The default start-height is 0, meaning the tooling will start reindex from the base block height(inclusive); and the default end-height is 0, meaning the tooling will reindex until the latest block height(inclusive). User can omits either or both arguments. `, @@ -106,7 +106,7 @@ func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) { if conn == "" { return nil, errors.New("the psql connection settings cannot be empty") } - es, _, err := psql.NewEventSink(conn, chainID) + es, err := psql.NewEventSink(conn, chainID) if err != nil { return nil, err } diff --git a/cmd/tendermint/commands/replay.go b/cmd/tendermint/commands/replay.go index 6e736bca2..e92274042 100644 --- a/cmd/tendermint/commands/replay.go +++ b/cmd/tendermint/commands/replay.go @@ -17,11 +17,9 @@ var ReplayCmd = &cobra.Command{ // ReplayConsoleCmd allows replaying of messages from the WAL in a // console. var ReplayConsoleCmd = &cobra.Command{ - Use: "replay-console", - Aliases: []string{"replay_console"}, - Short: "Replay messages from WAL in a console", + Use: "replay-console", + Short: "Replay messages from WAL in a console", Run: func(cmd *cobra.Command, args []string) { consensus.RunReplayFile(config.BaseConfig, config.Consensus, true) }, - PreRun: deprecateSnakeCase, } diff --git a/cmd/tendermint/commands/reset_priv_validator.go b/cmd/tendermint/commands/reset_priv_validator.go index 046780ef1..8745e55d8 100644 --- a/cmd/tendermint/commands/reset_priv_validator.go +++ b/cmd/tendermint/commands/reset_priv_validator.go @@ -14,11 +14,9 @@ import ( // ResetAllCmd removes the database of this Tendermint core // instance. var ResetAllCmd = &cobra.Command{ - Use: "unsafe-reset-all", - Aliases: []string{"unsafe_reset_all"}, - Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state", - RunE: resetAll, - PreRun: deprecateSnakeCase, + Use: "unsafe-reset-all", + Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state", + RunE: resetAll, } var keepAddrBook bool @@ -31,11 +29,9 @@ func init() { // ResetPrivValidatorCmd resets the private validator files. var ResetPrivValidatorCmd = &cobra.Command{ - Use: "unsafe-reset-priv-validator", - Aliases: []string{"unsafe_reset_priv_validator"}, - Short: "(unsafe) Reset this node's validator to genesis state", - RunE: resetPrivValidator, - PreRun: deprecateSnakeCase, + Use: "unsafe-reset-priv-validator", + Short: "(unsafe) Reset this node's validator to genesis state", + RunE: resetPrivValidator, } // XXX: this is totally unsafe. diff --git a/cmd/tendermint/commands/root.go b/cmd/tendermint/commands/root.go index 02f260de5..2289ae363 100644 --- a/cmd/tendermint/commands/root.go +++ b/cmd/tendermint/commands/root.go @@ -2,7 +2,6 @@ package commands import ( "fmt" - "strings" "time" "github.com/spf13/cobra" @@ -65,10 +64,3 @@ var RootCmd = &cobra.Command{ return nil }, } - -// deprecateSnakeCase is a util function for 0.34.1. Should be removed in 0.35 -func deprecateSnakeCase(cmd *cobra.Command, args []string) { - if strings.Contains(cmd.CalledAs(), "_") { - fmt.Println("Deprecated: snake_case commands will be replaced by hyphen-case commands in the next major release") - } -} diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index 1c68fcffe..97d6197a2 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -83,7 +83,10 @@ func AddNodeFlags(cmd *cobra.Command) { config.Consensus.CreateEmptyBlocksInterval.String(), "the possible interval between empty blocks") - // db flags + addDBFlags(cmd) +} + +func addDBFlags(cmd *cobra.Command) { cmd.Flags().String( "db-backend", config.DBBackend, diff --git a/cmd/tendermint/commands/show_node_id.go b/cmd/tendermint/commands/show_node_id.go index 7a5814c3b..488f4c322 100644 --- a/cmd/tendermint/commands/show_node_id.go +++ b/cmd/tendermint/commands/show_node_id.go @@ -8,11 +8,9 @@ import ( // ShowNodeIDCmd dumps node's ID to the standard output. var ShowNodeIDCmd = &cobra.Command{ - Use: "show-node-id", - Aliases: []string{"show_node_id"}, - Short: "Show this node's ID", - RunE: showNodeID, - PreRun: deprecateSnakeCase, + Use: "show-node-id", + Short: "Show this node's ID", + RunE: showNodeID, } func showNodeID(cmd *cobra.Command, args []string) error { diff --git a/cmd/tendermint/commands/show_validator.go b/cmd/tendermint/commands/show_validator.go index 240ed943f..47b372c61 100644 --- a/cmd/tendermint/commands/show_validator.go +++ b/cmd/tendermint/commands/show_validator.go @@ -16,11 +16,9 @@ import ( // ShowValidatorCmd adds capabilities for showing the validator info. var ShowValidatorCmd = &cobra.Command{ - Use: "show-validator", - Aliases: []string{"show_validator"}, - Short: "Show this node's validator info", - RunE: showValidator, - PreRun: deprecateSnakeCase, + Use: "show-validator", + Short: "Show this node's validator info", + RunE: showValidator, } func showValidator(cmd *cobra.Command, args []string) error { diff --git a/cmd/tendermint/main.go b/cmd/tendermint/main.go index b40624cc3..c006c297d 100644 --- a/cmd/tendermint/main.go +++ b/cmd/tendermint/main.go @@ -28,6 +28,8 @@ func main() { cmd.ShowNodeIDCmd, cmd.GenNodeKeyCmd, cmd.VersionCmd, + cmd.InspectCmd, + cmd.MakeKeyMigrateCommand(), debug.DebugCmd, cli.NewCompletionCmd(rootCmd, true), ) diff --git a/config/config.go b/config/config.go index 1f33917c5..b6d7ed3ab 100644 --- a/config/config.go +++ b/config/config.go @@ -694,13 +694,14 @@ type P2PConfig struct { //nolint: maligned // Force dial to fail TestDialFail bool `mapstructure:"test-dial-fail"` - // DisableLegacy is used mostly for testing to enable or disable the legacy - // P2P stack. - DisableLegacy bool `mapstructure:"disable-legacy"` + // UseLegacy enables the "legacy" P2P implementation and + // disables the newer default implementation. This flag will + // be removed in a future release. + UseLegacy bool `mapstructure:"use-legacy"` // Makes it possible to configure which queue backend the p2p // layer uses. Options are: "fifo", "priority" and "wdrr", - // with the default being "fifo". + // with the default being "priority". QueueType string `mapstructure:"queue-type"` } @@ -732,6 +733,7 @@ func DefaultP2PConfig() *P2PConfig { DialTimeout: 3 * time.Second, TestDialFail: false, QueueType: "priority", + UseLegacy: false, } } diff --git a/config/toml.go b/config/toml.go index 9ba154289..c59f69d97 100644 --- a/config/toml.go +++ b/config/toml.go @@ -271,7 +271,7 @@ pprof-laddr = "{{ .RPC.PprofListenAddress }}" [p2p] # Enable the new p2p layer. -disable-legacy = {{ .P2P.DisableLegacy }} +use-legacy = {{ .P2P.UseLegacy }} # Select the p2p internal queue queue-type = "{{ .P2P.QueueType }}" diff --git a/crypto/secp256k1/secp256k1.go b/crypto/secp256k1/secp256k1.go index dfe34ae10..c2c0c6017 100644 --- a/crypto/secp256k1/secp256k1.go +++ b/crypto/secp256k1/secp256k1.go @@ -13,7 +13,7 @@ import ( tmjson "github.com/tendermint/tendermint/libs/json" // necessary for Bitcoin address format - "golang.org/x/crypto/ripemd160" // nolint: staticcheck + "golang.org/x/crypto/ripemd160" // nolint ) //------------------------------------- diff --git a/docs/architecture/README.md b/docs/architecture/README.md index a4e326274..f6c12996f 100644 --- a/docs/architecture/README.md +++ b/docs/architecture/README.md @@ -97,3 +97,6 @@ Note the context/background should be written in the present tense. - [ADR-041: Proposer-Selection-via-ABCI](./adr-041-proposer-selection-via-abci.md) - [ADR-045: ABCI-Evidence](./adr-045-abci-evidence.md) - [ADR-057: RPC](./adr-057-RPC.md) +- [ADR-069: Node Initialization](./adr-069-flexible-node-initialization.md) +- [ADR-071: Proposer-Based Timestamps](adr-071-proposer-based-timestamps.md) +- [ADR-072: Restore Requests for Comments](./adr-072-request-for-comments.md) diff --git a/docs/architecture/adr-069-flexible-node-intitalization.md b/docs/architecture/adr-069-flexible-node-intitalization.md new file mode 100644 index 000000000..ec66725be --- /dev/null +++ b/docs/architecture/adr-069-flexible-node-intitalization.md @@ -0,0 +1,273 @@ +# ADR 069: Flexible Node Initialization + +## Changlog + +- 2021-06-09: Initial Draft (@tychoish) + +- 2021-07-21: Major Revision (@tychoish) + +## Status + +Proposed. + +## Context + +In an effort to support [Go-API-Stability](./adr-060-go-api-stability.md), +during the 0.35 development cycle, we have attempted to reduce the the API +surface area by moving most of the interface of the `node` package into +unexported functions, as well as moving the reactors to an `internal` +package. Having this coincide with the 0.35 release made a lot of sense +because these interfaces were _already_ changing as a result of the `p2p` +[refactor](./adr-061-p2p-refactor-scope.md), so it made sense to think a bit +more about how tendermint exposes this API. + +While the interfaces of the P2P layer and most of the node package are already +internalized, this precludes some operational patterns that are important to +users who use tendermint as a library. Specifically, introspecting the +tendermint node service and replacing components is not supported in the latest +version of the code, and some of these use cases would require maintaining a +vendor copy of the code. Adding these features requires rather extensive +(internal/implementation) changes to the `node` and `rpc` packages, and this +ADR describes a model for changing the way that tendermint nodes initialize, in +service of providing this kind of functionality. + +We consider node initialization, because the current implemention +provides strong connections between all components, as well as between +the components of the node and the RPC layer, and being able to think +about the interactions of these components will help enable these +features and help define the requirements of the node package. + +## Alternative Approaches + +These alternatives are presented to frame the design space and to +contextualize the decision in terms of product requirements. These +ideas are not inherently bad, and may even be possible or desireable +in the (distant) future, and merely provide additional context for how +we, in the moment came to our decision(s). + +### Do Nothing + +The current implementation is functional and sufficient for the vast +majority of use cases (e.g., all users of the Cosmos-SDK as well as +anyone who runs tendermint and the ABCI application in separate +processes). In the current implementation, and even previous versions, +modifying node initialization or injecting custom components required +copying most of the `node` package, which required such users +to maintain a vendored copy of tendermint. + +While this is (likely) not tenable in the long term, as users do want +more modularity, and the current service implementation is brittle and +difficult to maintain, in the short term it may be possible to delay +implementation somewhat. Eventually, however, we will need to make the +`node` package easier to maintain and reason about. + +### Generic Service Pluggability + +One possible system design would export interfaces (in the Golang +sense) for all components of the system, to permit runtime dependency +injection of all components in the system, so that users can compose +tendermint nodes of arbitrary user-supplied components. + +Although this level of customization would provide benefits, it would be a huge +undertaking (particularly with regards to API design work) that we do not have +scope for at the moment. Eventually providing support for some kinds of +pluggability may be useful, so the current solution does not explicitly +foreclose the possibility of this alternative. + +### Abstract Dependency Based Startup and Shutdown + +The main proposal in this document makes tendermint node initialization simpler +and more abstract, but the system lacks a number of +features which daemon/service initialization could provide, such as a +system allowing the authors of services to control initialization and shutdown order +of components using dependency relationships. + +Such a system could work by allowing services to declare +initialization order dependencies to other reactors (by ID, perhaps) +so that the node could decide the initialization based on the +dependencies declared by services rather than requiring the node to +encode this logic directly. + +This level of configuration is probably more complicated than is needed. Given +that the authors of components in the current implementation of tendermint +already *do* need to know about other components, a dependency-based system +would probably be overly-abstract at this stage. + +## Decisions + +- To the greatest extent possible, factor the code base so that + packages are responsible for their own initialization, and minimize + the amount of code in the `node` package itself. + +- As a design goal, reduce direct coupling and dependencies between + components in the implementation of `node`. + +- Begin iterating on a more-flexible internal framework for + initializing tendermint nodes to make the initatilization process + less hard-coded by the implementation of the node objects. + + - Reactors should not need to expose their interfaces *within* the + implementation of the node type + + - This refactoring should be entirely opaque to users. + + - These node initialization changes should not require a + reevaluation of the `service.Service` or a generic initialization + orchestration framework. + +- Do not proactively provide a system for injecting + components/services within a tendtermint node, though make it + possible to retrofit this kind of plugability in the future if + needed. + +- Prioritize implementation of p2p-based statesync reactor to obviate + need for users to inject a custom state-sync provider. + +## Detailed Design + +The [current +nodeImpl](https://github.com/tendermint/tendermint/blob/master/node/node.go#L47) +includes direct references to the implementations of each of the +reactors, which should be replaced by references to `service.Service` +objects. This will require moving construction of the [rpc +service](https://github.com/tendermint/tendermint/blob/master/node/node.go#L771) +into the constructor of +[makeNode](https://github.com/tendermint/tendermint/blob/master/node/node.go#L126). One +possible implementation of this would be to eliminate the current +`ConfigureRPC` method on the node package and instead [configure it +here](https://github.com/tendermint/tendermint/pull/6798/files#diff-375d57e386f20eaa5f09f02bb9d28bfc48ac3dca18d0325f59492208219e5618R441). + +To avoid adding complexity to the `node` package, we will add a +composite service implementation to the `service` package +that implements `service.Service` and is composed of a sequence of +underlying `service.Service` objects and handles their +startup/shutdown in the specified sequential order. + +Consensus, blocksync (*née* fast sync), and statesync all depend on +each other, and have significant initialization dependencies that are +presently encoded in the `node` package. As part of this change, a +new package/component (likely named `blocks` located at +`internal/blocks`) will encapsulate the initialization of these block +management areas of the code. + +### Injectable Component Option + +This section briefly describes a possible implementation for +user-supplied services running within a node. This should not be +implemented unless user-supplied components are a hard requirement for +a user. + +In order to allow components to be replaced, a new public function +will be added to the public interface of `node` with a signature that +resembles the following: + +```go +func NewWithServices(conf *config.Config, + logger log.Logger, + cf proxy.ClientCreator, + gen *types.GenesisDoc, + srvs []service.Service, +) (service.Service, error) { +``` + +The `service.Service` objects will be initialized in the order supplied, after +all pre-configured/default services have started (and shut down in reverse +order). The given services may implement additional interfaces, allowing them +to replace specific default services. `NewWithServices` will validate input +service lists with the following rules: + +- None of the services may already be running. +- The caller may not supply more than one replacement reactor for a given + default service type. + +If callers violate any of these rules, `NewWithServices` will return +an error. To retract support for this kind of operation in the future, +the function can be modified to *always* return an error. + +## Consequences + +### Positive + +- The node package will become easier to maintain. + +- It will become easier to add additional services within tendermint + nodes. + +- It will become possible to replace default components in the node + package without vendoring the tendermint repo and modifying internal + code. + +- The current end-to-end (e2e) test suite will be able to prevent any + regressions, and the new functionality can be thoroughly unit tested. + +- The scope of this project is very narrow, which minimizes risk. + +### Negative + +- This increases our reliance on the `service.Service` interface which + is probably not an interface that we want to fully commit to. + +- This proposal implements a fairly minimal set of functionality and + leaves open the possibility for many additional features which are + not included in the scope of this proposal. + +### Neutral + +N/A + +## Open Questions + +- To what extent does this new initialization framework need to accommodate + the legacy p2p stack? Would it be possible to delay a great deal of this + work to the 0.36 cycle to avoid this complexity? + + - Answer: _depends on timing_, and the requirement to ship pluggable reactors in 0.35. + +- Where should additional public types be exported for the 0.35 + release? + + Related to the general project of API stabilization we want to deprecate + the `types` package, and move its contents into a new `pkg` hierarchy; + however, the design of the `pkg` interface is currently underspecified. + If `types` is going to remain for the 0.35 release, then we should consider + the impact of using multiple organizing modalities for this code within a + single release. + +## Future Work + +- Improve or simplify the `service.Service` interface. There are some + pretty clear limitations with this interface as written (there's no + way to timeout slow startup or shut down, the cycle between the + `service.BaseService` and `service.Service` implementations is + troubling, the default panic in `OnReset` seems troubling.) + +- As part of the refactor of `service.Service` have all services/nodes + respect the lifetime of a `context.Context` object, and avoid the + current practice of creating `context.Context` objects in p2p and + reactor code. This would be required for in-process multi-tenancy. + +- Support explicit dependencies between components and allow for + parallel startup, so that different reactors can startup at the same + time, where possible. + +## References + +- [this + branch](https://github.com/tendermint/tendermint/tree/tychoish/scratch-node-minimize) + contains experimental work in the implementation of the node package + to unwind some of the hard dependencies between components. + +- [the component + graph](https://peter.bourgon.org/go-for-industrial-programming/#the-component-graph) + as a framing for internal service construction. + +## Appendix + +### Dependencies + +There's a relationship between the blockchain and consensus reactor +described by the following dependency graph makes replacing some of +these components more difficult relative to other reactors or +components. + +![consensus blockchain dependency graph](./img/consensus_blockchain.png) diff --git a/docs/architecture/adr-071-proposer-based-timestamps.md b/docs/architecture/adr-071-proposer-based-timestamps.md new file mode 100644 index 000000000..c23488005 --- /dev/null +++ b/docs/architecture/adr-071-proposer-based-timestamps.md @@ -0,0 +1,445 @@ +# ADR 71: Proposer-Based Timestamps + +* [Changelog](#changelog) +* [Status](#status) +* [Context](#context) +* [Alternative Approaches](#alternative-approaches) + * [Remove timestamps altogether](#remove-timestamps-altogether) +* [Decision](#decision) +* [Detailed Design](#detailed-design) + * [Overview](#overview) + * [Proposal Timestamp and Block Timestamp](#proposal-timestamp-and-block-timestamp) + * [Saving the timestamp across heights](#saving-the-timestamp-across-heights) + * [Changes to `CommitSig`](#changes-to-commitsig) + * [Changes to `Commit`](#changes-to-commit) + * [Changes to `Vote` messages](#changes-to-vote-messages) + * [New consensus parameters](#new-consensus-parameters) + * [Changes to `Header`](#changes-to-header) + * [Changes to the block proposal step](#changes-to-the-block-proposal-step) + * [Proposer selects proposal timestamp](#proposer-selects-proposal-timestamp) + * [Proposer selects block timestamp](#proposer-selects-block-timestamp) + * [Proposer waits](#proposer-waits) + * [Changes to the propose step timeout](#changes-to-the-propose-step-timeout) + * [Changes to validation rules](#changes-to-validation-rules) + * [Proposal timestamp validation](#proposal-timestamp-validation) + * [Block timestamp validation](#block-timestamp-validation) + * [Changes to the prevote step](#changes-to-the-prevote-step) + * [Changes to the precommit step](#changes-to-the-precommit-step) + * [Changes to locking a block](#changes-to-locking-a-block) + * [Remove voteTime Completely](#remove-votetime-completely) +* [Future Improvements](#future-improvements) +* [Consequences](#consequences) + * [Positive](#positive) + * [Neutral](#neutral) + * [Negative](#negative) +* [References](#references) + +## Changelog + + - July 15 2021: Created by @williambanfield + - Aug 4 2021: Draft completed by @williambanfield + - Aug 5 2021: Draft updated to include data structure changes by @williambanfield + - Aug 20 2021: Language edits completed by @williambanfield + +## Status + + **Accepted** + +## Context + +Tendermint currently provides a monotonically increasing source of time known as [BFTTime](https://github.com/tendermint/spec/blob/master/spec/consensus/bft-time.md). +This mechanism for producing a source of time is reasonably simple. +Each correct validator adds a timestamp to each `Precommit` message it sends. +The timestamp it sends is either the validator's current known Unix time or one millisecond greater than the previous block time, depending on which value is greater. +When a block is produced, the proposer chooses the block timestamp as the weighted median of the times in all of the `Precommit` messages the proposer received. +The weighting is proportional to the amount of voting power, or stake, a validator has on the network. +This mechanism for producing timestamps is both deterministic and byzantine fault tolerant. + +This current mechanism for producing timestamps has a few drawbacks. +Validators do not have to agree at all on how close the selected block timestamp is to their own currently known Unix time. +Additionally, any amount of voting power `>1/3` may directly control the block timestamp. +As a result, it is quite possible that the timestamp is not particularly meaningful. + +These drawbacks present issues in the Tendermint protocol. +Timestamps are used by light clients to verify blocks. +Light clients rely on correspondence between their own currently known Unix time and the block timestamp to verify blocks they see; +However, their currently known Unix time may be greatly divergent from the block timestamp as a result of the limitations of `BFTTime`. + +The proposer-based timestamps specification suggests an alternative approach for producing block timestamps that remedies these issues. +Proposer-based timestamps alter the current mechanism for producing block timestamps in two main ways: + +1. The block proposer is amended to offer up its currently known Unix time as the timestamp for the next block. +1. Correct validators only approve the proposed block timestamp if it is close enough to their own currently known Unix time. + +The result of these changes is a more meaningful timestamp that cannot be controlled by `<= 2/3` of the validator voting power. +This document outlines the necessary code changes in Tendermint to implement the corresponding [proposer-based timestamps specification](https://github.com/tendermint/spec/tree/master/spec/consensus/proposer-based-timestamp). + +## Alternative Approaches + +### Remove timestamps altogether + +Computer clocks are bound to skew for a variety of reasons. +Using timestamps in our protocol means either accepting the timestamps as not reliable or impacting the protocol’s liveness guarantees. +This design requires impacting the protocol’s liveness in order to make the timestamps more reliable. +An alternate approach is to remove timestamps altogether from the block protocol. +`BFTTime` is deterministic but may be arbitrarily inaccurate. +However, having a reliable source of time is quite useful for applications and protocols built on top of a blockchain. + +We therefore decided not to remove the timestamp. +Applications often wish for some transactions to occur on a certain day, on a regular period, or after some time following a different event. +All of these require some meaningful representation of agreed upon time. +The following protocols and application features require a reliable source of time: +* Tendermint Light Clients [rely on correspondence between their known time](https://github.com/tendermint/spec/blob/master/spec/light-client/verification/README.md#definitions-1) and the block time for block verification. +* Tendermint Evidence validity is determined [either in terms of heights or in terms of time](https://github.com/tendermint/spec/blob/8029cf7a0fcc89a5004e173ec065aa48ad5ba3c8/spec/consensus/evidence.md#verification). +* Unbonding of staked assets in the Cosmos Hub [occurs after a period of 21 days](https://github.com/cosmos/governance/blob/ce75de4019b0129f6efcbb0e752cd2cc9e6136d3/params-change/Staking.md#unbondingtime). +* IBC packets can use either a [timestamp or a height to timeout packet delivery](https://docs.cosmos.network/v0.43/ibc/overview.html#acknowledgements). + +Finally, inflation distribution in the Cosmos Hub uses an approximation of time to calculate an annual percentage rate. +This approximation of time is calculated using [block heights with an estimated number of blocks produced in a year](https://github.com/cosmos/governance/blob/master/params-change/Mint.md#blocksperyear). +Proposer-based timestamps will allow this inflation calculation to use a more meaningful and accurate source of time. + + +## Decision + +Implement proposer-based timestamps and remove `BFTTime`. + +## Detailed Design + +### Overview + +Implementing proposer-based timestamps will require a few changes to Tendermint’s code. +These changes will be to the following components: +* The `internal/consensus/` package. +* The `state/` package. +* The `Vote`, `CommitSig`, `Commit` and `Header` types. +* The consensus parameters. + +### Proposal Timestamp and Block Timestamp + +This design discusses two timestamps: (1) The timestamp in the block and (2) the timestamp in the proposal message. +The existence and use of both of these timestamps can get a bit confusing, so some background is given here to clarify their uses. + +The [proposal message currently has a timestamp](https://github.com/tendermint/tendermint/blob/e5312942e30331e7c42b75426da2c6c9c00ae476/types/proposal.go#L31). +This timestamp is the current Unix time known to the proposer when sending the `Proposal` message. +This timestamp is not currently used as part of consensus. +The changes in this ADR will begin using the proposal message timestamp as part of consensus. +We will refer to this as the **proposal timestamp** throughout this design. + +The block has a timestamp field [in the header](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/types/block.go#L338). +This timestamp is set currently as part of Tendermint’s `BFTtime` algorithm. +It is set when a block is proposed and it is checked by the validators when they are deciding to prevote the block. +This field will continue to be used but the logic for creating and validating this timestamp will change. +We will refer to this as the **block timestamp** throughout this design. + +At a high level, the proposal timestamp from height `H` is used as the block timestamp at height `H+1`. +The following image shows this relationship. +The rest of this document describes the code changes that will make this possible. + +![](./img/pbts-message.png) + +### Saving the timestamp across heights + +Currently, `BFTtime` uses `LastCommit` to construct the block timestamp. +The `LastCommit` is created at height `H-1` and is saved in the state store to be included in the block at height `H`. +`BFTtime` takes the weighted median of the timestamps in `LastCommit.CommitSig` to build the timestamp for height `H`. + +For proposer-based timestamps, the `LastCommit.CommitSig` timestamps will no longer be used to build the timestamps for height `H`. +Instead, the proposal timestamp from height `H-1` will become the block timestamp for height `H`. +To enable this, we will add a `Timestamp` field to the `Commit` struct. +This field will be populated at each height with the proposal timestamp decided on at the previous height. +This timestamp will also be saved with the rest of the commit in the state store [when the commit is finalized](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/state.go#L1611) so that it can be recovered if Tendermint crashes. +Changes to the `CommitSig` and `Commit` struct are detailed below. + +### Changes to `CommitSig` + +The [CommitSig](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L604) struct currently contains a timestamp. +This timestamp is the current Unix time known to the validator when it issued a `Precommit` for the block. +This timestamp is no longer used and will be removed in this change. + +`CommitSig` will be updated as follows: + +```diff +type CommitSig struct { + BlockIDFlag BlockIDFlag `json:"block_id_flag"` + ValidatorAddress Address `json:"validator_address"` +-- Timestamp time.Time `json:"timestamp"` + Signature []byte `json:"signature"` +} +``` + +### Changes to `Commit` + +The [Commit](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L746) struct does not currently contain a timestamp. +The timestamps in the `Commit.CommitSig` entries are currently used to build the block timestamp. +With these timestamps removed, the commit time will instead be stored in the `Commit` struct. + +`Commit` will be updated as follows. + +```diff +type Commit struct { + Height int64 `json:"height"` + Round int32 `json:"round"` +++ Timestamp time.Time `json:"timestamp"` + BlockID BlockID `json:"block_id"` + Signatures []CommitSig `json:"signatures"` +} +``` + +### Changes to `Vote` messages + +`Precommit` and `Prevote` messages use a common [Vote struct](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/vote.go#L50). +This struct currently contains a timestamp. +This timestamp is set using the [voteTime](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/state.go#L2241) function and therefore vote times correspond to the current Unix time known to the validator. +For precommits, this timestamp is used to construct the [CommitSig that is included in the block in the LastCommit](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/block.go#L754) field. +For prevotes, this field is unused. +Proposer-based timestamps will use the [RoundState.Proposal](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/internal/consensus/types/round_state.go#L76) timestamp to construct the `signedBytes` `CommitSig`. +This timestamp is therefore no longer useful and will be dropped. + +`Vote` will be updated as follows: + +```diff +type Vote struct { + Type tmproto.SignedMsgType `json:"type"` + Height int64 `json:"height"` + Round int32 `json:"round"` + BlockID BlockID `json:"block_id"` // zero if vote is nil. +-- Timestamp time.Time `json:"timestamp"` + ValidatorAddress Address `json:"validator_address"` + ValidatorIndex int32 `json:"validator_index"` + Signature []byte `json:"signature"` +} +``` + +### New consensus parameters + +The proposer-based timestamp specification includes multiple new parameters that must be the same among all validators. +These parameters are `PRECISION`, `MSGDELAY`, and `ACCURACY`. + +The `PRECISION` and `MSGDELAY` parameters are used to determine if the proposed timestamp is acceptable. +A validator will only Prevote a proposal if the proposal timestamp is considered `timely`. +A proposal timestamp is considered `timely` if it is within `PRECISION` and `MSGDELAY` of the Unix time known to the validator. +More specifically, a proposal timestamp is `timely` if `validatorLocalTime - PRECISION < proposalTime < validatorLocalTime + PRECISION + MSGDELAY`. + +Because the `PRECISION` and `MSGDELAY` parameters must be the same across all validators, they will be added to the [consensus parameters](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/types/params.proto#L13) as [durations](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration). + +The proposer-based timestamp specification also includes a [new ACCURACY parameter](https://github.com/tendermint/spec/blob/master/spec/consensus/proposer-based-timestamp/pbts-sysmodel_001_draft.md#pbts-clocksync-external0). +Intuitively, `ACCURACY` represents the difference between the ‘real’ time and the currently known time of correct validators. +The currently known Unix time of any validator is always somewhat different from real time. +`ACCURACY` is the largest such difference between each validator's time and real time taken as an absolute value. +This is not something a computer can determine on its own and must be specified as an estimate by community running a Tendermint-based chain. +It is used in the new algorithm to [calculate a timeout for the propose step](https://github.com/tendermint/spec/blob/master/spec/consensus/proposer-based-timestamp/pbts-algorithm_001_draft.md#pbts-alg-startround0). +`ACCURACY` is assumed to be the same across all validators and therefore should be included as a consensus parameter. + +The consensus will be updated to include this `Timestamp` field as follows: + +```diff +type ConsensusParams struct { + Block BlockParams `json:"block"` + Evidence EvidenceParams `json:"evidence"` + Validator ValidatorParams `json:"validator"` + Version VersionParams `json:"version"` +++ Timestamp TimestampParams `json:"timestamp"` +} +``` + +```go +type TimestampParams struct { + Accuracy time.Duration `json:"accuracy"` + Precision time.Duration `json:"precision"` + MsgDelay time.Duration `json:"msg_delay"` +} +``` + +### Changes to `Header` + +The [Header](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L338) struct currently contains a timestamp. +This timestamp is set as the `BFTtime` derived from the block's `LastCommit.CommitSig` timestamps. +This timestamp will no longer be derived from the `LastCommit.CommitSig` timestamps and will instead be included directly into the block's `LastCommit`. +This timestamp will therfore be identical in both the `Header` and the `LastCommit`. +To clarify that the timestamp in the header corresponds to the `LastCommit`'s time, we will rename this timestamp field to `last_timestamp`. + +`Header` will be updated as follows: + +```diff +type Header struct { + // basic block info + Version version.Consensus `json:"version"` + ChainID string `json:"chain_id"` + Height int64 `json:"height"` +-- Time time.Time `json:"time"` +++ LastTimestamp time.Time `json:"last_timestamp"` + + // prev block info + LastBlockID BlockID `json:"last_block_id"` + + // hashes of block data + LastCommitHash tmbytes.HexBytes `json:"last_commit_hash"` + DataHash tmbytes.HexBytes `json:"data_hash"` + + // hashes from the app output from the prev block + ValidatorsHash tmbytes.HexBytes `json:"validators_hash"` + NextValidatorsHash tmbytes.HexBytes `json:"next_validators_hash"` + ConsensusHash tmbytes.HexBytes `json:"consensus_hash"` + AppHash tmbytes.HexBytes `json:"app_hash"` + + // root hash of all results from the txs from the previous block + LastResultsHash tmbytes.HexBytes `json:"last_results_hash"` + + // consensus info + EvidenceHash tmbytes.HexBytes `json:"evidence_hash"` + ProposerAddress Address `json:"proposer_address"` +} +``` + +### Changes to the block proposal step + +#### Proposer selects proposal timestamp + +The proposal logic already [sets the Unix time known to the validator](https://github.com/tendermint/tendermint/blob/2abfe20114ee3bb3adfee817589033529a804e4d/types/proposal.go#L44) into the `Proposal` message. +This satisfies the proposer-based timestamp specification and does not need to change. + +#### Proposer selects block timestamp + +The proposal timestamp that was decided in height `H-1` will be stored in the `State` struct's in the `RoundState.LastCommit` field. +The proposer will select this timestamp to use as the block timestamp at height `H`. + +#### Proposer waits + +Block timestamps must be monotonically increasing. +In `BFTTime`, if a validator’s clock was behind, the [validator added 1 millisecond to the previous block’s time and used that in its vote messages](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/state.go#L2246). +A goal of adding proposer-based timestamps is to enforce some degree of clock synchronization, so having a mechanism that completely ignores the Unix time of the validator time no longer works. + +Validator clocks will not be perfectly in sync. +Therefore, the proposer’s current known Unix time may be less than the `LastCommit.Timestamp`. +If the proposer’s current known Unix time is less than the `LastCommit.Timestamp`, the proposer will sleep until its known Unix time exceeds `LastCommit.Timestamp`. + +This change will require amending the [defaultDecideProposal](https://github.com/tendermint/tendermint/blob/822893615564cb20b002dd5cf3b42b8d364cb7d9/internal/consensus/state.go#L1180) method. +This method should now block until the proposer’s time is greater than `LastCommit.Timestamp`. + +#### Changes to the propose step timeout + +Currently, a validator waiting for a proposal will proceed past the propose step if the configured propose timeout is reached and no proposal is seen. +Proposer-based timestamps requires changing this timeout logic. + +The proposer will now wait until its current known Unix time exceeds the `LastCommit.Timestamp` to propose a block. +The validators must now take this and some other factors into account when deciding when to timeout the propose step. +Specifically, the propose step timeout must also take into account potential inaccuracy in the validator’s clock and in the clock of the proposer. +Additionally, there may be a delay communicating the proposal message from the proposer to the other validators. + +Therefore, validators waiting for a proposal must wait until after the `LastCommit.Timestamp` before timing out. +To account for possible inaccuracy in its own clock, inaccuracy in the proposer’s clock, and message delay, validators waiting for a proposal will wait until `LastCommit.Timesatmp + 2*ACCURACY + MSGDELAY`. + The spec defines this as `waitingTime`. + +The [propose step’s timeout is set in enterPropose](https://github.com/tendermint/tendermint/blob/822893615564cb20b002dd5cf3b42b8d364cb7d9/internal/consensus/state.go#L1108) in `state.go`. +`enterPropose` will be changed to calculate waiting time using the new consensus parameters. +The timeout in `enterPropose` will then be set as the maximum of `waitingTime` and the [configured proposal step timeout](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/config/config.go#L1013). + +### Changes to validation rules + +The rules for validating that a proposal is valid will need slight modification to implement proposer-based timestamps. +Specifically, we will change the validation logic to ensure that the proposal timestamp is `timely` and we will modify the way the block timestamp is validated as well. + +#### Proposal timestamp validation + +Adding proposal timestamp validation is a reasonably straightforward change. +The current Unix time known to the proposer is already included in the [Proposal message](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/types/proposal.go#L31). +Once the proposal is received, the complete message is stored in the `RoundState.Proposal` field. +The precommit and prevote validation logic does not currently use this timestamp. +This validation logic will be updated to check that the proposal timestamp is within `PRECISION` of the current Unix time known to the validators. +If the timestamp is not within `PRECISION` of the current Unix time known to the validator, the proposal will not be considered it valid. +The validator will also check that the proposal time is greater than the block timestamp from the previous height. + +If no valid proposal is received by the proposal timeout, the validator will prevote nil. +This is identical to the current logic. + +#### Block timestamp validation + +The [validBlock function](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/state/validation.go#L14) currently [validates the proposed block timestamp in three ways](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/state/validation.go#L118). +First, the validation logic checks that this timestamp is greater than the previous block’s timestamp. +Additionally, it validates that the block timestamp is correctly calculated as the weighted median of the timestamps in the [block’s LastCommit](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/block.go#L48). +Finally, the logic also authenticates the timestamps in the `LastCommit`. +The cryptographic signature in each `CommitSig` is created by signing a hash of fields in the block with the validator’s private key. +One of the items in this `signedBytes` hash is derived from the timestamp in the `CommitSig`. +To authenticate the `CommitSig` timestamp, the validator builds a hash of fields that includes the timestamp and checks this hash against the provided signature. +This takes place in the [VerifyCommit function](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/validation.go#L25). + +The logic to validate that the block timestamp is greater than the previous block’s timestamp also works for proposer-based timestamps and will not change. + +`BFTTime` validation is no longer applicable and will be removed. +Validators will no longer check that the block timestamp is a weighted median of `LastCommit` timestamps. +This will mean removing the call to [MedianTime in the validateBlock function](https://github.com/tendermint/tendermint/blob/4db71da68e82d5cb732b235eeb2fd69d62114b45/state/validation.go#L117). +The `MedianTime` function can be completely removed. +The `LastCommit` timestamps may also be removed. + +The `signedBytes` validation logic in `VerifyCommit` will be slightly altered. +The `CommitSig`s in the block’s `LastCommit` will no longer each contain a timestamp. +The validation logic will instead include the `LastCommit.Timestamp` in the hash of fields for generating the `signedBytes`. +The cryptographic signatures included in the `CommitSig`s will then be checked against this `signedBytes` hash to authenticate the timestamp. +Specifically, the `VerifyCommit` function will be updated to use this new timestamp. + +### Changes to the prevote step + +Currently, a validator will prevote a proposal in one of three cases: + +* Case 1: Validator has no locked block and receives a valid proposal. +* Case 2: Validator has a locked block and receives a valid proposal matching its locked block. +* Case 3: Validator has a locked block, sees a valid proposal not matching its locked block but sees +⅔ prevotes for the new proposal’s block. + +The only change we will make to the prevote step is to what a validator considers a valid proposal as detailed above. + +### Changes to the precommit step + +The precommit step will not require much modification. +Its proposal validation rules will change in the same ways that validation will change in the prevote step. + +### Changes to locking a block +When a validator receives a valid proposed block and +2/3 prevotes for that block, it stores the block as its ‘locked block’ in the [RoundState.ValidBlock](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/types/round_state.go#L85) field. +In each subsequent round it will prevote that block. +A validator will only change which block it has locked if it sees +2/3 prevotes for a different block. + +This mechanism will remain largely unchanged. +The only difference is the addition of proposal timestamp validation. +A validator will prevote nil in a round if the proposal message it received is not `timely`. +Prevoting nil in this case will not cause a validator to ‘unlock’ its locked block. +This difference is an incidental result of the changes to prevote validation. +It is included in this design for completeness and to clarify that no additional changes will be made to block locking. + +### Remove voteTime Completely + +[voteTime](https://github.com/tendermint/tendermint/blob/822893615564cb20b002dd5cf3b42b8d364cb7d9/internal/consensus/state.go#L2229) is a mechanism for calculating the next `BFTTime` given both the validator's current known Unix time and the previous block timestamp. +If the previous block timestamp is greater than the validator's current known Unix time, then voteTime returns a value one millisecond greater than the previous block timestamp. +This logic is used in multiple places and is no longer needed for proposer-based timestamps. +It should therefore be removed completely. + +## Future Improvements + +* Implement BLS signature aggregation. +By removing fields from the `Precommit` messages, we are able to aggregate signatures. + +## Consequences + +### Positive + +* `<2/3` of validators can no longer influence block timestamps. +* Block timestamp will have stronger correspondence to real time. +* Improves the reliability of light client block verification. +* Enables BLS signature aggregation. +* Enables evidence handling to use time instead of height for evidence validity. + +### Neutral + +* Alters Tendermint’s liveness properties. +Liveness now requires that all correct validators have synchronized clocks within a bound. +Liveness will now also require that validators’ clocks move forward, which was not required under `BFTTime`. + +### Negative + +* May increase the length of the propose step if there is a large skew between the previous proposer and the current proposer’s local Unix time. +This skew will be bound by the `PRECISION` value, so it is unlikely to be too large. + +* Current chains with block timestamps far in the future will either need to pause consensus until after the erroneous block timestamp or must maintain synchronized but very inaccurate clocks. + +## References + +* [PBTS Spec](https://github.com/tendermint/spec/tree/master/spec/consensus/proposer-based-timestamp) +* [BFTTime spec](https://github.com/tendermint/spec/blob/master/spec/consensus/bft-time.md) diff --git a/docs/architecture/adr-072-request-for-comments.md b/docs/architecture/adr-072-request-for-comments.md new file mode 100644 index 000000000..7eb22ebc9 --- /dev/null +++ b/docs/architecture/adr-072-request-for-comments.md @@ -0,0 +1,105 @@ +# ADR 72: Restore Requests for Comments + +## Changelog + +- 20-Aug-2021: Initial draft (@creachadair) + +## Status + +Proposed + +## Context + +In the past, we kept a collection of Request for Comments (RFC) documents in `docs/rfc`. +Prior to the creation of the ADR process, these documents were used to document +design and implementation decisions about Tendermint Core. The RFC directory +was removed in favor of ADRs, in commit 3761aa69 (PR +[\#6345](https://github.com/tendermint/tendermint/pull/6345)). + +For issues where an explicit design decision or implementation change is +required, an ADR is generally preferable to an open-ended RFC: An ADR is +relatively narrowly-focused, identifies a specific design or implementation +question, and documents the consensus answer to that question. + +Some discussions are more open-ended, however, or don't require a specific +decision to be made (yet). Such conversations are still valuable to document, +and several members of the Tendermint team have been doing so by writing gists +or Google docs to share them around. That works well enough in the moment, but +gists do not support any kind of collaborative editing, and both gists and docs +are hard to discover after the fact. Google docs have much better collaborative +editing, but are worse for discoverability, especially when contributors span +different Google accounts. + +Discoverability is important, because these kinds of open-ended discussions are +useful to people who come later -- either as new team members or as outside +contributors seeking to use and understand the thoughts behind our designs and +the architectural decisions that arose from those discussion. + +With these in mind, I propose that: + +- We re-create a new, initially empty `docs/rfc` directory in the repository, + and use it to capture these kinds of open-ended discussions in supplement to + ADRs. + +- Unlike in the previous RFC scheme, documents in this new directory will + _not_ be used directly for decision-making. This is the key difference + between an RFC and an ADR. + + Instead, an RFC will exist to document background, articulate general + principles, and serve as a historical record of discussion and motivation. + + In this system, an RFC may _only_ result in a decision indirectly, via ADR + documents created in response to the RFC. + + **In short:** If a decision is required, write an ADR; otherwise if a + sufficiently broad discussion is needed, write an RFC. + +Just so that there is a consistent format, I also propose that: + +- RFC files are named `rfc-XXX-title.{md,rst,txt}` and are written in plain + text, Markdown, or ReStructured Text. + +- Like an ADR, an RFC should include a high-level change log at the top of the + document, and sections for: + + * Abstract: A brief, high-level synopsis of the topic. + * Background: Any background necessary to understand the topic. + * Discussion: Detailed discussion of the issue being considered. + +- Unlike an ADR, an RFC does _not_ include sections for Decisions, Detailed + Design, or evaluation of proposed solutions. If an RFC leads to a proposal + for an actual architectural change, that must be recorded in an ADR in the + usual way, and may refer back to the RFC in its References section. + +## Alternative Approaches + +Leaving aside implementation details, the main alternative to this proposal is +to leave things as they are now, with ADRs as the only log of record and other +discussions being held informally in whatever medium is convenient at the time. + +## Decision + +(pending) + +## Detailed Design + +- Create a new `docs/rfc` directory in the `tendermint` repository. Note that + this proposal intentionally does _not_ pull back the previous contents of + that path from Git history, as those documents were appropriately merged into + the ADR process. + +- Create a `README.md` for RFCs that explains the rules and their relationship + to ADRs. + +- Create an `rfc-template.md` file for RFC files. + +## Consequences + +### Positive + +- We will have a more discoverable place to record open-ended discussions that + do not immediately result in a design change. + +### Negative + +- Potentially some people could be confused about the RFC/ADR distinction. diff --git a/docs/architecture/img/consensus_blockchain.png b/docs/architecture/img/consensus_blockchain.png new file mode 100644 index 000000000..dd0f4daa8 Binary files /dev/null and b/docs/architecture/img/consensus_blockchain.png differ diff --git a/docs/architecture/img/pbts-message.png b/docs/architecture/img/pbts-message.png new file mode 100644 index 000000000..400f35690 Binary files /dev/null and b/docs/architecture/img/pbts-message.png differ diff --git a/docs/rfc/README.md b/docs/rfc/README.md new file mode 100644 index 000000000..c05853aca --- /dev/null +++ b/docs/rfc/README.md @@ -0,0 +1,40 @@ +--- +order: 1 +parent: + order: false +--- + +# Requests for Comments + +A Request for Comments (RFC) is a record of discussion on an open-ended topic +related to the design and implementation of Tendermint Core, for which no +immediate decision is required. + +The purpose of an RFC is to serve as a historical record of a high-level +discussion that might otherwise only be recorded in an ad hoc way (for example, +via gists or Google docs) that are difficult to discover for someone after the +fact. An RFC _may_ give rise to more specific architectural _decisions_ for +Tendermint, but those decisions must be recorded separately in [Architecture +Decision Records (ADR)](./../architecture). + +As a rule of thumb, if you can articulate a specific question that needs to be +answered, write an ADR. If you need to explore the topic and get input from +others to know what questions need to be answered, an RFC may be appropriate. + +## RFC Content + +An RFC should provide: + +- A **changelog**, documenting when and how the RFC has changed. +- An **abstract**, briefly summarizing the topic so the reader can quickly tell + whether it is relevant to their interest. +- Any **background** a reader will need to understand and participate in the + substance of the discussion (links to other documents are fine here). +- The **discussion**, the primary content of the document. + +The [rfc-template.md](./rfc-template.md) file includes placeholders for these +sections. + +## Table of Contents + + diff --git a/docs/rfc/rfc-template.md b/docs/rfc/rfc-template.md new file mode 100644 index 000000000..b3f404775 --- /dev/null +++ b/docs/rfc/rfc-template.md @@ -0,0 +1,35 @@ +# RFC {RFC-NUMBER}: {TITLE} + +## Changelog + +- {date}: {changelog} + +## Abstract + +> A brief high-level synopsis of the topic of discussion for this RFC, ideally +> just a few sentences. This should help the reader quickly decide whether the +> rest of the discussion is relevant to their interest. + +## Background + +> Any context or orientation needed for a reader to understand and participate +> in the substance of the Discussion. If necessary, this section may include +> links to other documentation or sources rather than restating existing +> material, but should provide enough detail that the reader can tell what they +> need to read to be up-to-date. + +### References + +> Links to external materials needed to follow the discussion may be added here. +> +> In addition, if the discussion in a request for comments leads to any design +> decisions, it may be helpful to add links to the ADR documents here after the +> discussion has settled. + +## Discussion + +> This section contains the core of the discussion. +> +> There is no fixed format for this section, but ideally changes to this +> section should be updated before merging to reflect any discussion that took +> place on the PR that made those changes. diff --git a/docs/tools/debugging/README.md b/docs/tools/debugging/README.md index 2932f6e86..053b43624 100644 --- a/docs/tools/debugging/README.md +++ b/docs/tools/debugging/README.md @@ -62,3 +62,30 @@ given destination directory. Each archive will contain: Note: goroutine.out and heap.out will only be written if a profile address is provided and is operational. This command is blocking and will log any error. + +## Tendermint Inspect + +Tendermint includes an `inspect` command for querying Tendermint's state store and block +store over Tendermint RPC. + +When the Tendermint consensus engine detects inconsistent state, it will crash the +entire Tendermint process. +While in this inconsistent state, a node running Tendermint's consensus engine will not start up. +The `inspect` command runs only a subset of Tendermint's RPC endpoints for querying the block store +and state store. +`inspect` allows operators to query a read-only view of the stage. +`inspect` does not run the consensus engine at all and can therefore be used to debug +processes that have crashed due to inconsistent state. + + +To start the `inspect` process, run +```bash +tendermint inspect +``` + +### RPC endpoints +The list of available RPC endpoints can be found by making a request to the RPC port. +For an `inspect` process running on `127.0.0.1:26657`, navigate your browser to +`http://127.0.0.1:26657/` to retrieve the list of enabled RPC endpoints. + +Additional information on the Tendermint RPC endpoints can be found in the [rpc documentation](https://docs.tendermint.com/master/rpc). diff --git a/docs/tools/debugging/pro.md b/docs/tools/debugging/pro.md index 3342deb49..b43ed5cba 100644 --- a/docs/tools/debugging/pro.md +++ b/docs/tools/debugging/pro.md @@ -64,13 +64,42 @@ It won’t kill the node, but it will gather all of the above data and package i At this point, depending on how severe the degradation is, you may want to restart the process. +## Tendermint Inspect + +What if the Tendermint node will not start up due to inconsistent consensus state? + +When a node running the Tendermint consensus engine detects an inconsistent state +it will crash the entire Tendermint process. +The Tendermint consensus engine cannot be run in this inconsistent state and the so node +will fail to start up as a result. +The Tendermint RPC server can provide valuable information for debugging in this situation. +The Tendermint `inspect` command will run a subset of the Tendermint RPC server +that is useful for debugging inconsistent state. + +### Running inspect + +Start up the `inspect` tool on the machine where Tendermint crashed using: +```bash +tendermint inspect --home= +``` + +`inspect` will use the data directory specified in your Tendermint configuration file. +`inspect` will also run the RPC server at the address specified in your Tendermint configuration file. + +### Using inspect + +With the `inspect` server running, you can access RPC endpoints that are critically important +for debugging. +Calling the `/status`, `/consensus_state` and `/dump_consensus_state` RPC endpoint +will return useful information about the Tendermint consensus state. + ## Outro -We’re hoping that the `tendermint debug` subcommand will become de facto the first response to any accidents. +We’re hoping that these Tendermint tools will become de facto the first response for any accidents. -Let us know what your experience has been so far! Have you had a chance to try `tendermint debug` yet? +Let us know what your experience has been so far! Have you had a chance to try `tendermint debug` or `tendermint inspect` yet? -Join our chat, where we discuss the current issues and future improvements. +Join our [discord chat](https://discord.gg/vcExX9T), where we discuss the current issues and future improvements. — diff --git a/go.mod b/go.mod index b6e0fea38..c7723b096 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/tendermint/tendermint go 1.16 require ( - github.com/BurntSushi/toml v0.3.1 + github.com/BurntSushi/toml v0.4.1 github.com/Masterminds/squirrel v1.5.0 github.com/Workiva/go-datastructures v1.0.53 github.com/adlio/schema v1.1.13 @@ -13,7 +13,7 @@ require ( github.com/go-kit/kit v0.11.0 github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.2 - github.com/golangci/golangci-lint v1.41.1 + github.com/golangci/golangci-lint v1.42.0 github.com/google/orderedcode v0.0.1 github.com/google/uuid v1.3.0 github.com/gorilla/websocket v1.4.2 @@ -35,8 +35,10 @@ require ( github.com/stretchr/testify v1.7.0 github.com/tendermint/tm-db v0.6.4 github.com/vektra/mockery/v2 v2.9.0 - golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b - golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 - google.golang.org/grpc v1.39.0 + golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a + golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect + google.golang.org/grpc v1.40.0 gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect + pgregory.net/rapid v0.4.7 ) diff --git a/go.sum b/go.sum index 55e08e088..fff11dad3 100644 --- a/go.sum +++ b/go.sum @@ -44,10 +44,13 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Antonboom/errname v0.1.3 h1:qKV8gSzPzBqrG/q0dgraZXJCymWt6KuD9+Y7K7xtzN8= +github.com/Antonboom/errname v0.1.3/go.mod h1:jRXo3m0E0EuCnK3wbsSVH3X55Z4iTDLl6ZfCxwFj4TM= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= @@ -72,7 +75,7 @@ github.com/OpenPeeDeeP/depguard v1.0.1 h1:VlW4R6jmBIv3/u1JNlawEvJMM4J+dPORPaZasQ github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/Workiva/go-datastructures v1.0.53 h1:J6Y/52yX10Xc5JjXmGtWoSSxs3mZnGSaq37xZZh7Yig= @@ -175,8 +178,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/daixiang0/gci v0.2.8 h1:1mrIGMBQsBu0P7j7m1M8Lb+ZeZxsZL+jyGX4YoMJJpg= -github.com/daixiang0/gci v0.2.8/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= +github.com/daixiang0/gci v0.2.9 h1:iwJvwQpBZmMg31w+QQ6jsyZ54KEATn6/nfARbBNW294= +github.com/daixiang0/gci v0.2.9/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -256,7 +259,7 @@ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vb github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= @@ -288,8 +291,8 @@ github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.8.0 h1:MSdYClljsF3PbENUUEx85nkWfJSGfzYI9yEBZOJz6CY= -github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -340,8 +343,8 @@ github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 h1:9kfjN3AdxcbsZB github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.41.1 h1:KH28pTSqRu6DTXIAANl1sPXNCmqg4VEH21z6G9Wj4SM= -github.com/golangci/golangci-lint v1.41.1/go.mod h1:LPtcY3aAAU8wydHrKpnanx9Og8K/cblZSyGmI5CJZUk= +github.com/golangci/golangci-lint v1.42.0 h1:hqf1zo6zY3GKGjjBk3ttdH22tGwF6ZRpk6j6xyJmE8I= +github.com/golangci/golangci-lint v1.42.0/go.mod h1:wgkGQnU9lOUFvTFo5QBSOvaSSddEV21Z1zYkJSbppZA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= @@ -393,6 +396,7 @@ github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4Mgqvf github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -548,8 +552,8 @@ github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= -github.com/ldez/gomoddirectives v0.2.1 h1:9pAcW9KRZW7HQjFwbozNvFMcNVwdCBufU7os5QUwLIY= -github.com/ldez/gomoddirectives v0.2.1/go.mod h1:sGicqkRgBOg//JfpXwkB9Hj0X5RyJ7mlACM5B9f6Me4= +github.com/ldez/gomoddirectives v0.2.2 h1:p9/sXuNFArS2RLc+UpYZSI4KQwGMEDWC/LbtF5OPFVg= +github.com/ldez/gomoddirectives v0.2.2/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= github.com/ldez/tagliatelle v0.2.0 h1:693V8Bf1NdShJ8eu/s84QySA0J2VWBanVBa2WwXD/Wk= github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= @@ -597,8 +601,8 @@ github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwg github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 h1:QASJXOGm2RZ5Ardbc86qNFvby9AqkLDibfChMtAg5QM= github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.0.7 h1:5kEWTY/W5a0Eiqnkn2BAWsRZpxbs1ft15PsyNC7Rml8= -github.com/mgechev/revive v1.0.7/go.mod h1:vuE5ox/4L/HDd63MCcCk3H6wTLQ6XXezRphJ8cJJOxY= +github.com/mgechev/revive v1.1.0 h1:TvabpsolbtlzZTyJcgMRN38MHrgi8C0DhmGE5dhscGY= +github.com/mgechev/revive v1.1.0/go.mod h1:PKqk4L74K6wVNwY2b6fr+9Qqr/3hIsHVfZCJdbvozrY= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= @@ -631,7 +635,7 @@ github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwd github.com/moricho/tparallel v0.2.1 h1:95FytivzT6rYzdJLdtfn6m1bfFJylOJK41+lgv/EHf4= github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8= -github.com/mozilla/tls-observatory v0.0.0-20210209181001-cf43108d6880/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= +github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= @@ -649,8 +653,8 @@ github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6Fx github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/exhaustive v0.1.0 h1:kVlMw8h2LHPMGUVqUj6230oQjjTMFjwcZrnkhXzFfl8= -github.com/nishanths/exhaustive v0.1.0/go.mod h1:S1j9110vxV1ECdCudXRkeMnFQ/DQk9ajLT0Uf2MYZQQ= +github.com/nishanths/exhaustive v0.2.3 h1:+ANTMqRNrqwInnP9aszg/0jDo+zbXa4x66U19Bx/oTk= +github.com/nishanths/exhaustive v0.2.3/go.mod h1:bhIX678Nx8inLM9PbpvK1yv6oGtoP8BfaIeMzgBNKvc= github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= github.com/nishanths/predeclared v0.2.1 h1:1TXtjmy4f3YCFjTxRd8zcFHOmoUir+gp0ESzjFzG2sw= github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= @@ -670,14 +674,15 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.1 h1:foqVmeWDD6yYpK+Yz3fHyNIxFYNxswxqNFjSKe+vI54= -github.com/onsi/ginkgo v1.16.1/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.11.0 h1:+CqWgvj0OZycCaqclBD1pxKHAU+tOkHmQIWvDHq2aug= -github.com/onsi/gomega v1.11.0/go.mod h1:azGKhqFUon9Vuj0YmTfLSmx0FUwqXYSTl5re8lQLTUg= +github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -768,8 +773,8 @@ github.com/rs/zerolog v1.23.0 h1:UskrK+saS9P9Y789yNNulYKdARjPZuS35B8gJF2x60g= github.com/rs/zerolog v1.23.0/go.mod h1:6c7hFfxPOy7TacJc4Fcdi24/J0NKYGzjG8FWRI916Qo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.2.2 h1:ZJQeYHZ2kaJpojoQBaGqpsn5g7GMcePY36uUGW1umbs= -github.com/ryancurrah/gomodguard v1.2.2/go.mod h1:tpI+C/nzvfUR3bF28b5QHpTn/jM/zlGniI++6ZlIWeE= +github.com/ryancurrah/gomodguard v1.2.3 h1:ww2fsjqocGCAFamzvv/b8IsRduuHHeK2MHTcTxZTQX8= +github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoLtnBZ7/TEhXAbg= github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw= github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -778,12 +783,12 @@ github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dms github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa h1:0U2s5loxrTy6/VgfVoLuVLFJcURKLH49ie0zSch7gh4= github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/securego/gosec/v2 v2.8.0 h1:iHg9cVmHWf5n6/ijUJ4F10h5bKlNtvXmcWzRw0lxiKE= -github.com/securego/gosec/v2 v2.8.0/go.mod h1:hJZ6NT5TqoY+jmOsaxAV4cXoEdrMRLVaNPnSpUCvCZs= +github.com/securego/gosec/v2 v2.8.1 h1:Tyy/nsH39TYCOkqf5HAgRE+7B5D8sHDwPdXRgFWokh8= +github.com/securego/gosec/v2 v2.8.1/go.mod h1:pUmsq6+VyFEElJMUX+QB3p3LWNHXg1R3xh2ssVJPs8Q= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil/v3 v3.21.5/go.mod h1:ghfMypLDrFSWN2c9cDYFLHyynQ+QUht0cv/18ZqVczw= +github.com/shirou/gopsutil/v3 v3.21.7/go.mod h1:RGl11Y7XMTQPmHh8F0ayC6haKNBgH4PXMJuTAcMOlz4= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -818,7 +823,6 @@ github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -861,18 +865,18 @@ github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzH github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= github.com/tendermint/tm-db v0.6.4 h1:3N2jlnYQkXNQclQwd/eKV/NzlqPlfK21cpRRIx80XXQ= github.com/tendermint/tm-db v0.6.4/go.mod h1:dptYhIpJ2M5kUuenLr+Yyf3zQOv1SgBZcl8/BmWlMBw= -github.com/tetafro/godot v1.4.7 h1:zBaoSY4JRVVz33y/qnODsdaKj2yAaMr91HCbqHCifVc= -github.com/tetafro/godot v1.4.7/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= +github.com/tetafro/godot v1.4.8 h1:rhuUH+tBrx24yVAr6Ox3/UxcsiUPPJcGhinfLdbdew0= +github.com/tetafro/godot v1.4.8/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94 h1:ig99OeTyDwQWhPe2iw9lwfQVF1KB3Q4fpP3X7/2VBG8= github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= -github.com/tklauser/go-sysconf v0.3.4/go.mod h1:Cl2c8ZRWfHD5IrfHo9VN+FX9kCFjIOyVklgXycLB6ek= -github.com/tklauser/numcpus v0.2.1/go.mod h1:9aU+wOc6WjUIZEwWMP62PL/41d65P+iks1gBkr4QyP8= +github.com/tklauser/go-sysconf v0.3.7/go.mod h1:JZIdXh4RmBvZDBZ41ld2bGxRV3n4daiiqA3skYhAoQ4= +github.com/tklauser/numcpus v0.2.3/go.mod h1:vpEPS/JC+oZGGQ/My/vJnNsvMDQL6PwOqt8dsCw5j+E= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck/v2 v2.1.0 h1:LTzwrYlgBUwi9JldazhbJN84fN9nS2UNGrZIo2syqxE= -github.com/tomarrell/wrapcheck/v2 v2.1.0/go.mod h1:crK5eI4RGSUrb9duDTQ5GqcukbKZvi85vX6nbhsBAeI= +github.com/tomarrell/wrapcheck/v2 v2.3.0 h1:i3DNjtyyL1xwaBQOsPPk8LAcpayWfQv2rxNi9b/eEx4= +github.com/tomarrell/wrapcheck/v2 v2.3.0/go.mod h1:aF5rnkdtqNWP/gC7vPUO5pKsB0Oac2FDTQP4F+dpZMU= github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= github.com/tommy-muehle/go-mnd/v2 v2.4.0 h1:1t0f8Uiaq+fqKteUR4N9Umr6E99R+lDnLnq7PwX2PPE= github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= @@ -887,8 +891,8 @@ github.com/ultraware/whitespace v0.0.4 h1:If7Va4cM03mpgrNH9k49/VOicWpGoG70XPBFFO github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/uudashr/gocognit v1.0.1 h1:MoG2fZ0b/Eo7NXoIwCVFLG5JED3qgQz5/NEE+rOsjPs= -github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= +github.com/uudashr/gocognit v1.0.5 h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4= +github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= @@ -957,8 +961,9 @@ golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b h1:wSOdpTq0/eI46Ez/LkDwIsAKA71YP2SRKBODiRWM0as= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1040,13 +1045,13 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1070,6 +1075,7 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1094,6 +1100,7 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1133,16 +1140,17 @@ golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1152,8 +1160,9 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1239,7 +1248,6 @@ golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82u golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201011145850-ed2f50202694/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1256,8 +1264,10 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3 h1:L69ShwSZEyCsLKoAxDKeMvLDZkumEe8gXUZAjab0tX8= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1367,8 +1377,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1427,8 +1437,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.2.0 h1:ws8AfbgTX3oIczLPNPCu5166oBg9ST2vNs0rcht+mDE= -honnef.co/go/tools v0.2.0/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= +honnef.co/go/tools v0.2.1 h1:/EPr//+UMMXwMTkXvCCoaJDq8cpjMO80Ou+L4PDo2mY= +honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= mvdan.cc/gofumpt v0.1.1 h1:bi/1aS/5W00E2ny5q65w9SnKpWEF/UIOqDYBILpo9rA= mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= @@ -1437,6 +1447,8 @@ mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphD mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7 h1:HT3e4Krq+IE44tiN36RvVEb6tvqeIdtsVSsxmNPqlFU= mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= +pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= +pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/inspect/doc.go b/inspect/doc.go new file mode 100644 index 000000000..c53049e1a --- /dev/null +++ b/inspect/doc.go @@ -0,0 +1,36 @@ +/* +Package inspect provides a tool for investigating the state of a +failed Tendermint node. + +This package provides the Inspector type. The Inspector type runs a subset of the Tendermint +RPC endpoints that are useful for debugging issues with Tendermint consensus. + +When a node running the Tendermint consensus engine detects an inconsistent consensus state, +the entire node will crash. The Tendermint consensus engine cannot run in this +inconsistent state so the node will not be able to start up again. + +The RPC endpoints provided by the Inspector type allow for a node operator to inspect +the block store and state store to better understand what may have caused the inconsistent state. + + +The Inspector type's lifecycle is controlled by a context.Context + ins := inspect.NewFromConfig(rpcConfig) + ctx, cancelFunc:= context.WithCancel(context.Background()) + + // Run blocks until the Inspector server is shut down. + go ins.Run(ctx) + ... + + // calling the cancel function will stop the running inspect server + cancelFunc() + +Inspector serves its RPC endpoints on the address configured in the RPC configuration + + rpcConfig.ListenAddress = "tcp://127.0.0.1:26657" + ins := inspect.NewFromConfig(rpcConfig) + go ins.Run(ctx) + +The list of available RPC endpoints can then be viewed by navigating to +http://127.0.0.1:26657/ in the web browser. +*/ +package inspect diff --git a/inspect/inspect.go b/inspect/inspect.go new file mode 100644 index 000000000..38bc9ed5d --- /dev/null +++ b/inspect/inspect.go @@ -0,0 +1,149 @@ +package inspect + +import ( + "context" + "errors" + "fmt" + "net" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/inspect/rpc" + "github.com/tendermint/tendermint/libs/log" + tmstrings "github.com/tendermint/tendermint/libs/strings" + rpccore "github.com/tendermint/tendermint/rpc/core" + "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/state/indexer" + "github.com/tendermint/tendermint/state/indexer/sink" + "github.com/tendermint/tendermint/store" + "github.com/tendermint/tendermint/types" + + "golang.org/x/sync/errgroup" +) + +// Inspector manages an RPC service that exports methods to debug a failed node. +// After a node shuts down due to a consensus failure, it will no longer start +// up its state cannot easily be inspected. An Inspector value provides a similar interface +// to the node, using the underlying Tendermint data stores, without bringing up +// any other components. A caller can query the Inspector service to inspect the +// persisted state and debug the failure. +type Inspector struct { + routes rpccore.RoutesMap + + config *config.RPCConfig + + indexerService *indexer.Service + eventBus *types.EventBus + logger log.Logger +} + +// New returns an Inspector that serves RPC on the specified BlockStore and StateStore. +// The Inspector type does not modify the state or block stores. +// The sinks are used to enable block and transaction querying via the RPC server. +// The caller is responsible for starting and stopping the Inspector service. +/// +//nolint:lll +func New(cfg *config.RPCConfig, bs state.BlockStore, ss state.Store, es []indexer.EventSink, logger log.Logger) *Inspector { + routes := rpc.Routes(*cfg, ss, bs, es, logger) + eb := types.NewEventBus() + eb.SetLogger(logger.With("module", "events")) + is := indexer.NewIndexerService(es, eb) + is.SetLogger(logger.With("module", "txindex")) + return &Inspector{ + routes: routes, + config: cfg, + logger: logger, + eventBus: eb, + indexerService: is, + } +} + +// NewFromConfig constructs an Inspector using the values defined in the passed in config. +func NewFromConfig(cfg *config.Config) (*Inspector, error) { + bsDB, err := config.DefaultDBProvider(&config.DBContext{ID: "blockstore", Config: cfg}) + if err != nil { + return nil, err + } + bs := store.NewBlockStore(bsDB) + sDB, err := config.DefaultDBProvider(&config.DBContext{ID: "state", Config: cfg}) + if err != nil { + return nil, err + } + genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) + if err != nil { + return nil, err + } + sinks, err := sink.EventSinksFromConfig(cfg, config.DefaultDBProvider, genDoc.ChainID) + if err != nil { + return nil, err + } + logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) + ss := state.NewStore(sDB) + return New(cfg.RPC, bs, ss, sinks, logger), nil +} + +// Run starts the Inspector servers and blocks until the servers shut down. The passed +// in context is used to control the lifecycle of the servers. +func (ins *Inspector) Run(ctx context.Context) error { + err := ins.eventBus.Start() + if err != nil { + return fmt.Errorf("error starting event bus: %s", err) + } + defer func() { + err := ins.eventBus.Stop() + if err != nil { + ins.logger.Error("event bus stopped with error", "err", err) + } + }() + err = ins.indexerService.Start() + if err != nil { + return fmt.Errorf("error starting indexer service: %s", err) + } + defer func() { + err := ins.indexerService.Stop() + if err != nil { + ins.logger.Error("indexer service stopped with error", "err", err) + } + }() + return startRPCServers(ctx, ins.config, ins.logger, ins.routes) +} + +func startRPCServers(ctx context.Context, cfg *config.RPCConfig, logger log.Logger, routes rpccore.RoutesMap) error { + g, tctx := errgroup.WithContext(ctx) + listenAddrs := tmstrings.SplitAndTrimEmpty(cfg.ListenAddress, ",", " ") + rh := rpc.Handler(cfg, routes, logger) + for _, listenerAddr := range listenAddrs { + server := rpc.Server{ + Logger: logger, + Config: cfg, + Handler: rh, + Addr: listenerAddr, + } + if cfg.IsTLSEnabled() { + keyFile := cfg.KeyFile() + certFile := cfg.CertFile() + listenerAddr := listenerAddr + g.Go(func() error { + logger.Info("RPC HTTPS server starting", "address", listenerAddr, + "certfile", certFile, "keyfile", keyFile) + err := server.ListenAndServeTLS(tctx, certFile, keyFile) + if !errors.Is(err, net.ErrClosed) { + return err + } + logger.Info("RPC HTTPS server stopped", "address", listenerAddr) + return nil + }) + } else { + listenerAddr := listenerAddr + g.Go(func() error { + logger.Info("RPC HTTP server starting", "address", listenerAddr) + err := server.ListenAndServe(tctx) + if !errors.Is(err, net.ErrClosed) { + return err + } + logger.Info("RPC HTTP server stopped", "address", listenerAddr) + return nil + }) + } + } + return g.Wait() +} diff --git a/inspect/inspect_test.go b/inspect/inspect_test.go new file mode 100644 index 000000000..c2a1df571 --- /dev/null +++ b/inspect/inspect_test.go @@ -0,0 +1,583 @@ +package inspect_test + +import ( + "context" + "fmt" + "net" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + abcitypes "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/inspect" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/proto/tendermint/state" + httpclient "github.com/tendermint/tendermint/rpc/client/http" + "github.com/tendermint/tendermint/state/indexer" + indexermocks "github.com/tendermint/tendermint/state/indexer/mocks" + statemocks "github.com/tendermint/tendermint/state/mocks" + "github.com/tendermint/tendermint/types" +) + +func TestInspectConstructor(t *testing.T) { + cfg := config.ResetTestRoot("test") + t.Cleanup(leaktest.Check(t)) + defer func() { _ = os.RemoveAll(cfg.RootDir) }() + t.Run("from config", func(t *testing.T) { + d, err := inspect.NewFromConfig(cfg) + require.NoError(t, err) + require.NotNil(t, d) + }) + +} + +func TestInspectRun(t *testing.T) { + cfg := config.ResetTestRoot("test") + t.Cleanup(leaktest.Check(t)) + defer func() { _ = os.RemoveAll(cfg.RootDir) }() + t.Run("from config", func(t *testing.T) { + d, err := inspect.NewFromConfig(cfg) + require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + stoppedWG := &sync.WaitGroup{} + stoppedWG.Add(1) + go func() { + require.NoError(t, d.Run(ctx)) + stoppedWG.Done() + }() + cancel() + stoppedWG.Wait() + }) + +} + +func TestBlock(t *testing.T) { + testHeight := int64(1) + testBlock := new(types.Block) + testBlock.Header.Height = testHeight + testBlock.Header.LastCommitHash = []byte("test hash") + stateStoreMock := &statemocks.Store{} + + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("Height").Return(testHeight) + blockStoreMock.On("Base").Return(int64(0)) + blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{}) + blockStoreMock.On("LoadBlock", testHeight).Return(testBlock) + eventSinkMock := &indexermocks.EventSink{} + eventSinkMock.On("Stop").Return(nil) + + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress) + require.NoError(t, err) + resultBlock, err := cli.Block(context.Background(), &testHeight) + require.NoError(t, err) + require.Equal(t, testBlock.Height, resultBlock.Block.Height) + require.Equal(t, testBlock.LastCommitHash, resultBlock.Block.LastCommitHash) + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func TestTxSearch(t *testing.T) { + testHash := []byte("test") + testTx := []byte("tx") + testQuery := fmt.Sprintf("tx.hash='%s'", string(testHash)) + testTxResult := &abcitypes.TxResult{ + Height: 1, + Index: 100, + Tx: testTx, + } + + stateStoreMock := &statemocks.Store{} + blockStoreMock := &statemocks.BlockStore{} + eventSinkMock := &indexermocks.EventSink{} + eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.KV) + eventSinkMock.On("SearchTxEvents", mock.Anything, + mock.MatchedBy(func(q *query.Query) bool { return testQuery == q.String() })). + Return([]*abcitypes.TxResult{testTxResult}, nil) + + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress) + require.NoError(t, err) + + var page = 1 + resultTxSearch, err := cli.TxSearch(context.Background(), testQuery, false, &page, &page, "") + require.NoError(t, err) + require.Len(t, resultTxSearch.Txs, 1) + require.Equal(t, types.Tx(testTx), resultTxSearch.Txs[0].Tx) + + cancel() + wg.Wait() + + eventSinkMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) + blockStoreMock.AssertExpectations(t) +} +func TestTx(t *testing.T) { + testHash := []byte("test") + testTx := []byte("tx") + + stateStoreMock := &statemocks.Store{} + blockStoreMock := &statemocks.BlockStore{} + eventSinkMock := &indexermocks.EventSink{} + eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.KV) + eventSinkMock.On("GetTxByHash", testHash).Return(&abcitypes.TxResult{ + Tx: testTx, + }, nil) + + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress) + require.NoError(t, err) + + res, err := cli.Tx(context.Background(), testHash, false) + require.NoError(t, err) + require.Equal(t, types.Tx(testTx), res.Tx) + + cancel() + wg.Wait() + + eventSinkMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) + blockStoreMock.AssertExpectations(t) +} +func TestConsensusParams(t *testing.T) { + testHeight := int64(1) + testMaxGas := int64(55) + stateStoreMock := &statemocks.Store{} + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("Height").Return(testHeight) + blockStoreMock.On("Base").Return(int64(0)) + stateStoreMock.On("LoadConsensusParams", testHeight).Return(types.ConsensusParams{ + Block: types.BlockParams{ + MaxGas: testMaxGas, + }, + }, nil) + eventSinkMock := &indexermocks.EventSink{} + eventSinkMock.On("Stop").Return(nil) + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) + + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress) + require.NoError(t, err) + params, err := cli.ConsensusParams(context.Background(), &testHeight) + require.NoError(t, err) + require.Equal(t, params.ConsensusParams.Block.MaxGas, testMaxGas) + + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func TestBlockResults(t *testing.T) { + testHeight := int64(1) + testGasUsed := int64(100) + stateStoreMock := &statemocks.Store{} + // tmstate "github.com/tendermint/tendermint/proto/tendermint/state" + stateStoreMock.On("LoadABCIResponses", testHeight).Return(&state.ABCIResponses{ + DeliverTxs: []*abcitypes.ResponseDeliverTx{ + { + GasUsed: testGasUsed, + }, + }, + EndBlock: &abcitypes.ResponseEndBlock{}, + BeginBlock: &abcitypes.ResponseBeginBlock{}, + }, nil) + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("Base").Return(int64(0)) + blockStoreMock.On("Height").Return(testHeight) + eventSinkMock := &indexermocks.EventSink{} + eventSinkMock.On("Stop").Return(nil) + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) + + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress) + require.NoError(t, err) + res, err := cli.BlockResults(context.Background(), &testHeight) + require.NoError(t, err) + require.Equal(t, res.TotalGasUsed, testGasUsed) + + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func TestCommit(t *testing.T) { + testHeight := int64(1) + testRound := int32(101) + stateStoreMock := &statemocks.Store{} + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("Base").Return(int64(0)) + blockStoreMock.On("Height").Return(testHeight) + blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{}, nil) + blockStoreMock.On("LoadSeenCommit").Return(&types.Commit{ + Height: testHeight, + Round: testRound, + }, nil) + eventSinkMock := &indexermocks.EventSink{} + eventSinkMock.On("Stop").Return(nil) + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) + + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress) + require.NoError(t, err) + res, err := cli.Commit(context.Background(), &testHeight) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, res.SignedHeader.Commit.Round, testRound) + + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func TestBlockByHash(t *testing.T) { + testHeight := int64(1) + testHash := []byte("test hash") + testBlock := new(types.Block) + testBlock.Header.Height = testHeight + testBlock.Header.LastCommitHash = testHash + stateStoreMock := &statemocks.Store{} + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{ + BlockID: types.BlockID{ + Hash: testHash, + }, + Header: types.Header{ + Height: testHeight, + }, + }, nil) + blockStoreMock.On("LoadBlockByHash", testHash).Return(testBlock, nil) + eventSinkMock := &indexermocks.EventSink{} + eventSinkMock.On("Stop").Return(nil) + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) + + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress) + require.NoError(t, err) + res, err := cli.BlockByHash(context.Background(), testHash) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, []byte(res.BlockID.Hash), testHash) + + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func TestBlockchain(t *testing.T) { + testHeight := int64(1) + testBlock := new(types.Block) + testBlockHash := []byte("test hash") + testBlock.Header.Height = testHeight + testBlock.Header.LastCommitHash = testBlockHash + stateStoreMock := &statemocks.Store{} + + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("Height").Return(testHeight) + blockStoreMock.On("Base").Return(int64(0)) + blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{ + BlockID: types.BlockID{ + Hash: testBlockHash, + }, + }) + eventSinkMock := &indexermocks.EventSink{} + eventSinkMock.On("Stop").Return(nil) + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) + + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress) + require.NoError(t, err) + res, err := cli.BlockchainInfo(context.Background(), 0, 100) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, testBlockHash, []byte(res.BlockMetas[0].BlockID.Hash)) + + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func TestValidators(t *testing.T) { + testHeight := int64(1) + testVotingPower := int64(100) + testValidators := types.ValidatorSet{ + Validators: []*types.Validator{ + { + VotingPower: testVotingPower, + }, + }, + } + stateStoreMock := &statemocks.Store{} + stateStoreMock.On("LoadValidators", testHeight).Return(&testValidators, nil) + + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("Height").Return(testHeight) + blockStoreMock.On("Base").Return(int64(0)) + eventSinkMock := &indexermocks.EventSink{} + eventSinkMock.On("Stop").Return(nil) + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) + + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress) + require.NoError(t, err) + + testPage := 1 + testPerPage := 100 + res, err := cli.Validators(context.Background(), &testHeight, &testPage, &testPerPage) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, testVotingPower, res.Validators[0].VotingPower) + + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func TestBlockSearch(t *testing.T) { + testHeight := int64(1) + testBlockHash := []byte("test hash") + testQuery := "block.height = 1" + stateStoreMock := &statemocks.Store{} + + blockStoreMock := &statemocks.BlockStore{} + eventSinkMock := &indexermocks.EventSink{} + eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.KV) + blockStoreMock.On("LoadBlock", testHeight).Return(&types.Block{ + Header: types.Header{ + Height: testHeight, + }, + }, nil) + blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{ + BlockID: types.BlockID{ + Hash: testBlockHash, + }, + }) + eventSinkMock.On("SearchBlockEvents", mock.Anything, + mock.MatchedBy(func(q *query.Query) bool { return testQuery == q.String() })). + Return([]int64{testHeight}, nil) + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) + + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress) + require.NoError(t, err) + + testPage := 1 + testPerPage := 100 + testOrderBy := "desc" + res, err := cli.BlockSearch(context.Background(), testQuery, &testPage, &testPerPage, testOrderBy) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, testBlockHash, []byte(res.Blocks[0].BlockID.Hash)) + + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func requireConnect(t testing.TB, addr string, retries int) { + parts := strings.SplitN(addr, "://", 2) + if len(parts) != 2 { + t.Fatalf("malformed address to dial: %s", addr) + } + var err error + for i := 0; i < retries; i++ { + var conn net.Conn + conn, err = net.Dial(parts[0], parts[1]) + if err == nil { + conn.Close() + return + } + // FIXME attempt to yield and let the other goroutine continue execution. + time.Sleep(time.Microsecond * 100) + } + t.Fatalf("unable to connect to server %s after %d tries: %s", addr, retries, err) +} diff --git a/inspect/rpc/rpc.go b/inspect/rpc/rpc.go new file mode 100644 index 000000000..76dcda4eb --- /dev/null +++ b/inspect/rpc/rpc.go @@ -0,0 +1,143 @@ +package rpc + +import ( + "context" + "net/http" + "time" + + "github.com/rs/cors" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/rpc/core" + "github.com/tendermint/tendermint/rpc/jsonrpc/server" + "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/state/indexer" + "github.com/tendermint/tendermint/types" +) + +// Server defines parameters for running an Inspector rpc server. +type Server struct { + Addr string // TCP address to listen on, ":http" if empty + Handler http.Handler + Logger log.Logger + Config *config.RPCConfig +} + +// Routes returns the set of routes used by the Inspector server. +// +//nolint: lll +func Routes(cfg config.RPCConfig, s state.Store, bs state.BlockStore, es []indexer.EventSink, logger log.Logger) core.RoutesMap { + env := &core.Environment{ + Config: cfg, + EventSinks: es, + StateStore: s, + BlockStore: bs, + ConsensusReactor: waitSyncCheckerImpl{}, + Logger: logger, + } + return core.RoutesMap{ + "blockchain": server.NewRPCFunc(env.BlockchainInfo, "minHeight,maxHeight", true), + "consensus_params": server.NewRPCFunc(env.ConsensusParams, "height", true), + "block": server.NewRPCFunc(env.Block, "height", true), + "block_by_hash": server.NewRPCFunc(env.BlockByHash, "hash", true), + "block_results": server.NewRPCFunc(env.BlockResults, "height", true), + "commit": server.NewRPCFunc(env.Commit, "height", true), + "validators": server.NewRPCFunc(env.Validators, "height,page,per_page", true), + "tx": server.NewRPCFunc(env.Tx, "hash,prove", true), + "tx_search": server.NewRPCFunc(env.TxSearch, "query,prove,page,per_page,order_by", false), + "block_search": server.NewRPCFunc(env.BlockSearch, "query,page,per_page,order_by", false), + } +} + +// Handler returns the http.Handler configured for use with an Inspector server. Handler +// registers the routes on the http.Handler and also registers the websocket handler +// and the CORS handler if specified by the configuration options. +func Handler(rpcConfig *config.RPCConfig, routes core.RoutesMap, logger log.Logger) http.Handler { + mux := http.NewServeMux() + wmLogger := logger.With("protocol", "websocket") + + var eventBus types.EventBusSubscriber + + websocketDisconnectFn := func(remoteAddr string) { + err := eventBus.UnsubscribeAll(context.Background(), remoteAddr) + if err != nil && err != pubsub.ErrSubscriptionNotFound { + wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err) + } + } + wm := server.NewWebsocketManager(routes, + server.OnDisconnect(websocketDisconnectFn), + server.ReadLimit(rpcConfig.MaxBodyBytes)) + wm.SetLogger(wmLogger) + mux.HandleFunc("/websocket", wm.WebsocketHandler) + + server.RegisterRPCFuncs(mux, routes, logger) + var rootHandler http.Handler = mux + if rpcConfig.IsCorsEnabled() { + rootHandler = addCORSHandler(rpcConfig, mux) + } + return rootHandler +} + +func addCORSHandler(rpcConfig *config.RPCConfig, h http.Handler) http.Handler { + corsMiddleware := cors.New(cors.Options{ + AllowedOrigins: rpcConfig.CORSAllowedOrigins, + AllowedMethods: rpcConfig.CORSAllowedMethods, + AllowedHeaders: rpcConfig.CORSAllowedHeaders, + }) + h = corsMiddleware.Handler(h) + return h +} + +type waitSyncCheckerImpl struct{} + +func (waitSyncCheckerImpl) WaitSync() bool { + return false +} + +func (waitSyncCheckerImpl) GetPeerState(peerID types.NodeID) (*consensus.PeerState, bool) { + return nil, false +} + +// ListenAndServe listens on the address specified in srv.Addr and handles any +// incoming requests over HTTP using the Inspector rpc handler specified on the server. +func (srv *Server) ListenAndServe(ctx context.Context) error { + listener, err := server.Listen(srv.Addr, srv.Config.MaxOpenConnections) + if err != nil { + return err + } + go func() { + <-ctx.Done() + listener.Close() + }() + return server.Serve(listener, srv.Handler, srv.Logger, serverRPCConfig(srv.Config)) +} + +// ListenAndServeTLS listens on the address specified in srv.Addr. ListenAndServeTLS handles +// incoming requests over HTTPS using the Inspector rpc handler specified on the server. +func (srv *Server) ListenAndServeTLS(ctx context.Context, certFile, keyFile string) error { + listener, err := server.Listen(srv.Addr, srv.Config.MaxOpenConnections) + if err != nil { + return err + } + go func() { + <-ctx.Done() + listener.Close() + }() + return server.ServeTLS(listener, srv.Handler, certFile, keyFile, srv.Logger, serverRPCConfig(srv.Config)) +} + +func serverRPCConfig(r *config.RPCConfig) *server.Config { + cfg := server.DefaultConfig() + cfg.MaxBodyBytes = r.MaxBodyBytes + cfg.MaxHeaderBytes = r.MaxHeaderBytes + // If necessary adjust global WriteTimeout to ensure it's greater than + // TimeoutBroadcastTxCommit. + // See https://github.com/tendermint/tendermint/issues/3435 + if cfg.WriteTimeout <= r.TimeoutBroadcastTxCommit { + cfg.WriteTimeout = r.TimeoutBroadcastTxCommit + 1*time.Second + } + return cfg +} diff --git a/internal/blocksync/v0/reactor.go b/internal/blocksync/v0/reactor.go index c43959808..4ddfa4edc 100644 --- a/internal/blocksync/v0/reactor.go +++ b/internal/blocksync/v0/reactor.go @@ -29,10 +29,10 @@ var ( // TODO: Remove once p2p refactor is complete. // ref: https://github.com/tendermint/tendermint/issues/5670 ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - BlockchainChannel: { + BlockSyncChannel: { MsgType: new(bcproto.Message), Descriptor: &p2p.ChannelDescriptor{ - ID: byte(BlockchainChannel), + ID: byte(BlockSyncChannel), Priority: 5, SendQueueCapacity: 1000, RecvBufferCapacity: 1024, @@ -44,8 +44,8 @@ var ( ) const ( - // BlockchainChannel is a channel for blocks and status updates - BlockchainChannel = p2p.ChannelID(0x40) + // BlockSyncChannel is a channel for blocks and status updates + BlockSyncChannel = p2p.ChannelID(0x40) trySyncIntervalMS = 10 @@ -60,7 +60,7 @@ const ( ) type consensusReactor interface { - // For when we switch from blockchain reactor and block sync to the consensus + // For when we switch from block sync reactor to the consensus // machine. SwitchToConsensus(state sm.State, skipWAL bool) } @@ -87,17 +87,17 @@ type Reactor struct { consReactor consensusReactor blockSync *tmSync.AtomicBool - blockchainCh *p2p.Channel - // blockchainOutBridgeCh defines a channel that acts as a bridge between sending Envelope - // messages that the reactor will consume in processBlockchainCh and receiving messages + blockSyncCh *p2p.Channel + // blockSyncOutBridgeCh defines a channel that acts as a bridge between sending Envelope + // messages that the reactor will consume in processBlockSyncCh and receiving messages // from the peer updates channel and other goroutines. We do this instead of directly - // sending on blockchainCh.Out to avoid race conditions in the case where other goroutines - // send Envelopes directly to the to blockchainCh.Out channel, since processBlockchainCh - // may close the blockchainCh.Out channel at the same time that other goroutines send to - // blockchainCh.Out. - blockchainOutBridgeCh chan p2p.Envelope - peerUpdates *p2p.PeerUpdates - closeCh chan struct{} + // sending on blockSyncCh.Out to avoid race conditions in the case where other goroutines + // send Envelopes directly to the to blockSyncCh.Out channel, since processBlockSyncCh + // may close the blockSyncCh.Out channel at the same time that other goroutines send to + // blockSyncCh.Out. + blockSyncOutBridgeCh chan p2p.Envelope + peerUpdates *p2p.PeerUpdates + closeCh chan struct{} requestsCh <-chan BlockRequest errorsCh <-chan peerError @@ -119,7 +119,7 @@ func NewReactor( blockExec *sm.BlockExecutor, store *store.BlockStore, consReactor consensusReactor, - blockchainCh *p2p.Channel, + blockSyncCh *p2p.Channel, peerUpdates *p2p.PeerUpdates, blockSync bool, metrics *cons.Metrics, @@ -137,23 +137,23 @@ func NewReactor( errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count. r := &Reactor{ - initialState: state, - blockExec: blockExec, - store: store, - pool: NewBlockPool(startHeight, requestsCh, errorsCh), - consReactor: consReactor, - blockSync: tmSync.NewBool(blockSync), - requestsCh: requestsCh, - errorsCh: errorsCh, - blockchainCh: blockchainCh, - blockchainOutBridgeCh: make(chan p2p.Envelope), - peerUpdates: peerUpdates, - closeCh: make(chan struct{}), - metrics: metrics, - syncStartTime: time.Time{}, + initialState: state, + blockExec: blockExec, + store: store, + pool: NewBlockPool(startHeight, requestsCh, errorsCh), + consReactor: consReactor, + blockSync: tmSync.NewBool(blockSync), + requestsCh: requestsCh, + errorsCh: errorsCh, + blockSyncCh: blockSyncCh, + blockSyncOutBridgeCh: make(chan p2p.Envelope), + peerUpdates: peerUpdates, + closeCh: make(chan struct{}), + metrics: metrics, + syncStartTime: time.Time{}, } - r.BaseService = *service.NewBaseService(logger, "Blockchain", r) + r.BaseService = *service.NewBaseService(logger, "BlockSync", r) return r, nil } @@ -174,7 +174,7 @@ func (r *Reactor) OnStart() error { go r.poolRoutine(false) } - go r.processBlockchainCh() + go r.processBlockSyncCh() go r.processPeerUpdates() return nil @@ -199,7 +199,7 @@ func (r *Reactor) OnStop() { // Wait for all p2p Channels to be closed before returning. This ensures we // can easily reason about synchronization of all p2p Channels and ensure no // panics will occur. - <-r.blockchainCh.Done() + <-r.blockSyncCh.Done() <-r.peerUpdates.Done() } @@ -214,7 +214,7 @@ func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID types.NodeID) return } - r.blockchainCh.Out <- p2p.Envelope{ + r.blockSyncCh.Out <- p2p.Envelope{ To: peerID, Message: &bcproto.BlockResponse{Block: blockProto}, } @@ -223,16 +223,16 @@ func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID types.NodeID) } r.Logger.Info("peer requesting a block we do not have", "peer", peerID, "height", msg.Height) - r.blockchainCh.Out <- p2p.Envelope{ + r.blockSyncCh.Out <- p2p.Envelope{ To: peerID, Message: &bcproto.NoBlockResponse{Height: msg.Height}, } } -// handleBlockchainMessage handles envelopes sent from peers on the -// BlockchainChannel. It returns an error only if the Envelope.Message is unknown +// handleBlockSyncMessage handles envelopes sent from peers on the +// BlockSyncChannel. It returns an error only if the Envelope.Message is unknown // for this channel. This should never be called outside of handleMessage. -func (r *Reactor) handleBlockchainMessage(envelope p2p.Envelope) error { +func (r *Reactor) handleBlockSyncMessage(envelope p2p.Envelope) error { logger := r.Logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { @@ -249,7 +249,7 @@ func (r *Reactor) handleBlockchainMessage(envelope p2p.Envelope) error { r.pool.AddBlock(envelope.From, block, block.Size()) case *bcproto.StatusRequest: - r.blockchainCh.Out <- p2p.Envelope{ + r.blockSyncCh.Out <- p2p.Envelope{ To: envelope.From, Message: &bcproto.StatusResponse{ Height: r.store.Height(), @@ -288,8 +288,8 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err r.Logger.Debug("received message", "message", envelope.Message, "peer", envelope.From) switch chID { - case BlockchainChannel: - err = r.handleBlockchainMessage(envelope) + case BlockSyncChannel: + err = r.handleBlockSyncMessage(envelope) default: err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) @@ -298,30 +298,30 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err return err } -// processBlockchainCh initiates a blocking process where we listen for and handle -// envelopes on the BlockchainChannel and blockchainOutBridgeCh. Any error encountered during -// message execution will result in a PeerError being sent on the BlockchainChannel. +// processBlockSyncCh initiates a blocking process where we listen for and handle +// envelopes on the BlockSyncChannel and blockSyncOutBridgeCh. Any error encountered during +// message execution will result in a PeerError being sent on the BlockSyncChannel. // When the reactor is stopped, we will catch the signal and close the p2p Channel // gracefully. -func (r *Reactor) processBlockchainCh() { - defer r.blockchainCh.Close() +func (r *Reactor) processBlockSyncCh() { + defer r.blockSyncCh.Close() for { select { - case envelope := <-r.blockchainCh.In: - if err := r.handleMessage(r.blockchainCh.ID, envelope); err != nil { - r.Logger.Error("failed to process message", "ch_id", r.blockchainCh.ID, "envelope", envelope, "err", err) - r.blockchainCh.Error <- p2p.PeerError{ + case envelope := <-r.blockSyncCh.In: + if err := r.handleMessage(r.blockSyncCh.ID, envelope); err != nil { + r.Logger.Error("failed to process message", "ch_id", r.blockSyncCh.ID, "envelope", envelope, "err", err) + r.blockSyncCh.Error <- p2p.PeerError{ NodeID: envelope.From, Err: err, } } - case envelope := <-r.blockchainOutBridgeCh: - r.blockchainCh.Out <- envelope + case envelope := <-r.blockSyncOutBridgeCh: + r.blockSyncCh.Out <- envelope case <-r.closeCh: - r.Logger.Debug("stopped listening on blockchain channel; closing...") + r.Logger.Debug("stopped listening on block sync channel; closing...") return } @@ -340,7 +340,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { switch peerUpdate.Status { case p2p.PeerStatusUp: // send a status update the newly added peer - r.blockchainOutBridgeCh <- p2p.Envelope{ + r.blockSyncOutBridgeCh <- p2p.Envelope{ To: peerUpdate.NodeID, Message: &bcproto.StatusResponse{ Base: r.store.Base(), @@ -406,13 +406,13 @@ func (r *Reactor) requestRoutine() { return case request := <-r.requestsCh: - r.blockchainOutBridgeCh <- p2p.Envelope{ + r.blockSyncOutBridgeCh <- p2p.Envelope{ To: request.PeerID, Message: &bcproto.BlockRequest{Height: request.Height}, } case pErr := <-r.errorsCh: - r.blockchainCh.Error <- p2p.PeerError{ + r.blockSyncCh.Error <- p2p.PeerError{ NodeID: pErr.peerID, Err: pErr.err, } @@ -423,7 +423,7 @@ func (r *Reactor) requestRoutine() { go func() { defer r.poolWG.Done() - r.blockchainOutBridgeCh <- p2p.Envelope{ + r.blockSyncOutBridgeCh <- p2p.Envelope{ Broadcast: true, Message: &bcproto.StatusRequest{}, } @@ -554,14 +554,14 @@ FOR_LOOP: // NOTE: We've already removed the peer's request, but we still need // to clean up the rest. peerID := r.pool.RedoRequest(first.Height) - r.blockchainCh.Error <- p2p.PeerError{ + r.blockSyncCh.Error <- p2p.PeerError{ NodeID: peerID, Err: err, } peerID2 := r.pool.RedoRequest(second.Height) if peerID2 != peerID { - r.blockchainCh.Error <- p2p.PeerError{ + r.blockSyncCh.Error <- p2p.PeerError{ NodeID: peerID2, Err: err, } diff --git a/internal/blocksync/v0/reactor_test.go b/internal/blocksync/v0/reactor_test.go index e038b57af..a1ddc02cd 100644 --- a/internal/blocksync/v0/reactor_test.go +++ b/internal/blocksync/v0/reactor_test.go @@ -32,9 +32,9 @@ type reactorTestSuite struct { reactors map[types.NodeID]*Reactor app map[types.NodeID]proxy.AppConns - blockchainChannels map[types.NodeID]*p2p.Channel - peerChans map[types.NodeID]chan p2p.PeerUpdate - peerUpdates map[types.NodeID]*p2p.PeerUpdates + blockSyncChannels map[types.NodeID]*p2p.Channel + peerChans map[types.NodeID]chan p2p.PeerUpdate + peerUpdates map[types.NodeID]*p2p.PeerUpdates blockSync bool } @@ -53,19 +53,19 @@ func setup( "must specify at least one block height (nodes)") rts := &reactorTestSuite{ - logger: log.TestingLogger().With("module", "blockchain", "testCase", t.Name()), - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), - nodes: make([]types.NodeID, 0, numNodes), - reactors: make(map[types.NodeID]*Reactor, numNodes), - app: make(map[types.NodeID]proxy.AppConns, numNodes), - blockchainChannels: make(map[types.NodeID]*p2p.Channel, numNodes), - peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), - peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), - blockSync: true, + logger: log.TestingLogger().With("module", "block_sync", "testCase", t.Name()), + network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), + nodes: make([]types.NodeID, 0, numNodes), + reactors: make(map[types.NodeID]*Reactor, numNodes), + app: make(map[types.NodeID]proxy.AppConns, numNodes), + blockSyncChannels: make(map[types.NodeID]*p2p.Channel, numNodes), + peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), + peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), + blockSync: true, } - chDesc := p2p.ChannelDescriptor{ID: byte(BlockchainChannel)} - rts.blockchainChannels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(bcproto.Message), int(chBuf)) + chDesc := p2p.ChannelDescriptor{ID: byte(BlockSyncChannel)} + rts.blockSyncChannels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(bcproto.Message), int(chBuf)) i := 0 for nodeID := range rts.network.Nodes { @@ -161,7 +161,7 @@ func (rts *reactorTestSuite) addNode(t *testing.T, blockExec, blockStore, nil, - rts.blockchainChannels[nodeID], + rts.blockSyncChannels[nodeID], rts.peerUpdates[nodeID], rts.blockSync, cons.NopMetrics()) @@ -181,7 +181,7 @@ func (rts *reactorTestSuite) start(t *testing.T) { } func TestReactor_AbruptDisconnect(t *testing.T) { - config := cfg.ResetTestRoot("blockchain_reactor_test") + config := cfg.ResetTestRoot("block_sync_reactor_test") defer os.RemoveAll(config.RootDir) genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30) @@ -216,7 +216,7 @@ func TestReactor_AbruptDisconnect(t *testing.T) { } func TestReactor_SyncTime(t *testing.T) { - config := cfg.ResetTestRoot("blockchain_reactor_test") + config := cfg.ResetTestRoot("block_sync_reactor_test") defer os.RemoveAll(config.RootDir) genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30) @@ -239,7 +239,7 @@ func TestReactor_SyncTime(t *testing.T) { } func TestReactor_NoBlockResponse(t *testing.T) { - config := cfg.ResetTestRoot("blockchain_reactor_test") + config := cfg.ResetTestRoot("block_sync_reactor_test") defer os.RemoveAll(config.RootDir) genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30) @@ -286,7 +286,7 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) { // See: https://github.com/tendermint/tendermint/issues/6005 t.SkipNow() - config := cfg.ResetTestRoot("blockchain_reactor_test") + config := cfg.ResetTestRoot("block_sync_reactor_test") defer os.RemoveAll(config.RootDir) maxBlockHeight := int64(48) diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 4da989b40..6379b71d5 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -916,8 +916,8 @@ func (cs *State) handleMsg(mi msgInfo) { "height", cs.Height, "round", cs.Round, "peer", peerID, + "msg_type", fmt.Sprintf("%T", msg), "err", err, - "msg", msg, ) } } diff --git a/internal/libs/clist/clist_property_test.go b/internal/libs/clist/clist_property_test.go new file mode 100644 index 000000000..cdc173ee5 --- /dev/null +++ b/internal/libs/clist/clist_property_test.go @@ -0,0 +1,72 @@ +package clist_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + "pgregory.net/rapid" + + "github.com/tendermint/tendermint/internal/libs/clist" +) + +func TestCListProperties(t *testing.T) { + rapid.Check(t, rapid.Run(&clistModel{})) +} + +// clistModel is used by the rapid state machine testing framework. +// clistModel contains both the clist that is being tested and a slice of *clist.CElements +// that will be used to model the expected clist behavior. +type clistModel struct { + clist *clist.CList + + model []*clist.CElement +} + +// Init is a method used by the rapid state machine testing library. +// Init is called when the test starts to initialize the data that will be used +// in the state machine test. +func (m *clistModel) Init(t *rapid.T) { + m.clist = clist.New() + m.model = []*clist.CElement{} +} + +// PushBack defines an action that will be randomly selected across by the rapid state +// machines testing library. Every call to PushBack calls PushBack on the clist and +// performs a similar action on the model data. +func (m *clistModel) PushBack(t *rapid.T) { + value := rapid.String().Draw(t, "value").(string) + el := m.clist.PushBack(value) + m.model = append(m.model, el) +} + +// Remove defines an action that will be randomly selected across by the rapid state +// machine testing library. Every call to Remove selects an element from the model +// and calls Remove on the CList with that element. The same element is removed from +// the model to keep the objects in sync. +func (m *clistModel) Remove(t *rapid.T) { + if len(m.model) == 0 { + return + } + ix := rapid.IntRange(0, len(m.model)-1).Draw(t, "index").(int) + value := m.model[ix] + m.model = append(m.model[:ix], m.model[ix+1:]...) + m.clist.Remove(value) +} + +// Check is a method required by the rapid state machine testing library. +// Check is run after each action and is used to verify that the state of the object, +// in this case a clist.CList matches the state of the objec. +func (m *clistModel) Check(t *rapid.T) { + require.Equal(t, len(m.model), m.clist.Len()) + if len(m.model) == 0 { + return + } + require.Equal(t, m.model[0], m.clist.Front()) + require.Equal(t, m.model[len(m.model)-1], m.clist.Back()) + + iter := m.clist.Front() + for _, val := range m.model { + require.Equal(t, val, iter) + iter = iter.Next() + } +} diff --git a/libs/bytes/bytes.go b/libs/bytes/bytes.go index cfb7a8db2..dd8e39737 100644 --- a/libs/bytes/bytes.go +++ b/libs/bytes/bytes.go @@ -27,15 +27,22 @@ func (bz *HexBytes) Unmarshal(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaler interface. The hex bytes is a -// quoted hexadecimal encoded string. +// MarshalJSON implements the json.Marshaler interface. The encoding is a JSON +// quoted string of hexadecimal digits. func (bz HexBytes) MarshalJSON() ([]byte, error) { - s := strings.ToUpper(hex.EncodeToString(bz)) - jbz := make([]byte, len(s)+2) - jbz[0] = '"' - copy(jbz[1:], s) - jbz[len(jbz)-1] = '"' - return jbz, nil + size := hex.EncodedLen(len(bz)) + 2 // +2 for quotation marks + buf := make([]byte, size) + hex.Encode(buf[1:], []byte(bz)) + buf[0] = '"' + buf[size-1] = '"' + + // Ensure letter digits are capitalized. + for i := 1; i < size-1; i++ { + if buf[i] >= 'a' && buf[i] <= 'f' { + buf[i] = 'A' + (buf[i] - 'a') + } + } + return buf, nil } // UnmarshalJSON implements the json.Umarshaler interface. diff --git a/libs/bytes/bytes_test.go b/libs/bytes/bytes_test.go index db882f1c1..6a9ca7c3d 100644 --- a/libs/bytes/bytes_test.go +++ b/libs/bytes/bytes_test.go @@ -37,6 +37,7 @@ func TestJSONMarshal(t *testing.T) { {[]byte(``), `{"B1":"","B2":""}`}, {[]byte(`a`), `{"B1":"YQ==","B2":"61"}`}, {[]byte(`abc`), `{"B1":"YWJj","B2":"616263"}`}, + {[]byte("\x1a\x2b\x3c"), `{"B1":"Gis8","B2":"1A2B3C"}`}, } for i, tc := range cases { diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index 54a030fe8..68d1ec941 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -231,34 +231,45 @@ func (s *Server) Unsubscribe(ctx context.Context, args UnsubscribeArgs) error { return err } var qs string + if args.Query != nil { qs = args.Query.String() } - s.mtx.RLock() - clientSubscriptions, ok := s.subscriptions[args.Subscriber] - if args.ID != "" { - qs, ok = clientSubscriptions[args.ID] + clientSubscriptions, err := func() (map[string]string, error) { + s.mtx.RLock() + defer s.mtx.RUnlock() - if ok && args.Query == nil { - var err error - args.Query, err = query.New(qs) - if err != nil { - return err + clientSubscriptions, ok := s.subscriptions[args.Subscriber] + if args.ID != "" { + qs, ok = clientSubscriptions[args.ID] + + if ok && args.Query == nil { + var err error + args.Query, err = query.New(qs) + if err != nil { + return nil, err + } } + } else if qs != "" { + args.ID, ok = clientSubscriptions[qs] } - } else if qs != "" { - args.ID, ok = clientSubscriptions[qs] - } - s.mtx.RUnlock() - if !ok { - return ErrSubscriptionNotFound + if !ok { + return nil, ErrSubscriptionNotFound + } + + return clientSubscriptions, nil + }() + + if err != nil { + return err } select { case s.cmds <- cmd{op: unsub, clientID: args.Subscriber, query: args.Query, subscription: &Subscription{id: args.ID}}: s.mtx.Lock() + defer s.mtx.Unlock() delete(clientSubscriptions, args.ID) delete(clientSubscriptions, qs) @@ -266,7 +277,6 @@ func (s *Server) Unsubscribe(ctx context.Context, args UnsubscribeArgs) error { if len(clientSubscriptions) == 0 { delete(s.subscriptions, args.Subscriber) } - s.mtx.Unlock() return nil case <-ctx.Done(): return ctx.Err() @@ -288,8 +298,10 @@ func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { select { case s.cmds <- cmd{op: unsub, clientID: clientID}: s.mtx.Lock() + defer s.mtx.Unlock() + delete(s.subscriptions, clientID) - s.mtx.Unlock() + return nil case <-ctx.Done(): return ctx.Err() @@ -495,7 +507,10 @@ func (state *state) send(msg interface{}, events []types.Event) error { for clientID, subscription := range clientSubscriptions { if cap(subscription.out) == 0 { // block on unbuffered channel - subscription.out <- NewMessage(subscription.id, msg, events) + select { + case subscription.out <- NewMessage(subscription.id, msg, events): + case <-subscription.canceled: + } } else { // don't block on buffered channels select { diff --git a/libs/time/time.go b/libs/time/time.go index 022bdf574..786f9bbb4 100644 --- a/libs/time/time.go +++ b/libs/time/time.go @@ -1,7 +1,6 @@ package time import ( - "sort" "time" ) @@ -16,43 +15,3 @@ func Now() time.Time { func Canonical(t time.Time) time.Time { return t.Round(0).UTC() } - -// WeightedTime for computing a median. -type WeightedTime struct { - Time time.Time - Weight int64 -} - -// NewWeightedTime with time and weight. -func NewWeightedTime(time time.Time, weight int64) *WeightedTime { - return &WeightedTime{ - Time: time, - Weight: weight, - } -} - -// WeightedMedian computes weighted median time for a given array of WeightedTime and the total voting power. -func WeightedMedian(weightedTimes []*WeightedTime, totalVotingPower int64) (res time.Time) { - median := totalVotingPower / 2 - - sort.Slice(weightedTimes, func(i, j int) bool { - if weightedTimes[i] == nil { - return false - } - if weightedTimes[j] == nil { - return true - } - return weightedTimes[i].Time.UnixNano() < weightedTimes[j].Time.UnixNano() - }) - - for _, weightedTime := range weightedTimes { - if weightedTime != nil { - if median <= weightedTime.Weight { - res = weightedTime.Time - break - } - median -= weightedTime.Weight - } - } - return -} diff --git a/light/proxy/proxy.go b/light/proxy/proxy.go index 8f1e7bf87..6f2622588 100644 --- a/light/proxy/proxy.go +++ b/light/proxy/proxy.go @@ -113,7 +113,7 @@ func (p *Proxy) listen() (net.Listener, *http.ServeMux, error) { } // 4) Start listening for new connections. - listener, err := rpcserver.Listen(p.Addr, p.Config) + listener, err := rpcserver.Listen(p.Addr, p.Config.MaxOpenConnections) if err != nil { return nil, mux, err } diff --git a/node/node.go b/node/node.go index 249101a11..ebc6fcbd9 100644 --- a/node/node.go +++ b/node/node.go @@ -18,7 +18,6 @@ import ( cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" cs "github.com/tendermint/tendermint/internal/consensus" - "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/pex" @@ -36,7 +35,6 @@ import ( grpccore "github.com/tendermint/tendermint/rpc/grpc" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) @@ -70,16 +68,12 @@ type nodeImpl struct { mempool mempool.Mempool stateSync bool // whether the node should state sync on startup stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots - consensusState *cs.State // latest consensus state consensusReactor *cs.Reactor // for participating in the consensus - pexReactor *pex.Reactor // for exchanging peer addresses - pexReactorV2 *pex.ReactorV2 // for exchanging peer addresses - evidenceReactor *evidence.Reactor - evidencePool *evidence.Pool // tracking evidence - proxyApp proxy.AppConns // connection to the application + pexReactor service.Service // for exchanging peer addresses + evidenceReactor service.Service rpcListeners []net.Listener // rpc servers - eventSinks []indexer.EventSink - indexerService *indexer.Service + indexerService service.Service + rpcEnv *rpccore.Environment prometheusSrv *http.Server } @@ -324,12 +318,12 @@ func makeNode(config *cfg.Config, stateSyncReactorShim = p2p.NewReactorShim(logger.With("module", "statesync"), "StateSyncShim", statesync.ChannelShims) - if config.P2P.DisableLegacy { - channels = makeChannelsFromShims(router, statesync.ChannelShims) - peerUpdates = peerManager.Subscribe() - } else { + if config.P2P.UseLegacy { channels = getChannelsFromShim(stateSyncReactorShim) peerUpdates = stateSyncReactorShim.PeerUpdates + } else { + channels = makeChannelsFromShims(router, statesync.ChannelShims) + peerUpdates = peerManager.Subscribe() } stateSyncReactor = statesync.NewReactor( @@ -373,45 +367,42 @@ func makeNode(config *cfg.Config, // Note we currently use the addrBook regardless at least for AddOurAddress var ( - pexReactor *pex.Reactor - pexReactorV2 *pex.ReactorV2 - sw *p2p.Switch - addrBook pex.AddrBook + pexReactor service.Service + sw *p2p.Switch + addrBook pex.AddrBook ) pexCh := pex.ChannelDescriptor() transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) - if config.P2P.PexReactor { - if config.P2P.DisableLegacy { - addrBook = nil - pexReactorV2, err = createPEXReactorV2(config, logger, peerManager, router) - if err != nil { - return nil, err - } - } else { - // setup Transport and Switch - sw = createSwitch( - config, transport, p2pMetrics, mpReactorShim, bcReactorForSwitch, - stateSyncReactorShim, csReactorShim, evReactorShim, proxyApp, nodeInfo, nodeKey, p2pLogger, - ) + if config.P2P.UseLegacy { + // setup Transport and Switch + sw = createSwitch( + config, transport, p2pMetrics, mpReactorShim, bcReactorForSwitch, + stateSyncReactorShim, csReactorShim, evReactorShim, proxyApp, nodeInfo, nodeKey, p2pLogger, + ) - err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peers from persistent-peers field: %w", err) - } + err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) + if err != nil { + return nil, fmt.Errorf("could not add peers from persistent-peers field: %w", err) + } - err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) - } + err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) + if err != nil { + return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) + } - addrBook, err = createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey) - if err != nil { - return nil, fmt.Errorf("could not create addrbook: %w", err) - } + addrBook, err = createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey) + if err != nil { + return nil, fmt.Errorf("could not create addrbook: %w", err) + } - pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger) + pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger) + } else { + addrBook = nil + pexReactor, err = createPEXReactorV2(config, logger, peerManager, router) + if err != nil { + return nil, err } } @@ -440,19 +431,51 @@ func makeNode(config *cfg.Config, bcReactor: bcReactor, mempoolReactor: mpReactor, mempool: mp, - consensusState: csState, consensusReactor: csReactor, stateSyncReactor: stateSyncReactor, stateSync: stateSync, pexReactor: pexReactor, - pexReactorV2: pexReactorV2, evidenceReactor: evReactor, - evidencePool: evPool, - proxyApp: proxyApp, indexerService: indexerService, eventBus: eventBus, - eventSinks: eventSinks, + + rpcEnv: &rpccore.Environment{ + ProxyAppQuery: proxyApp.Query(), + ProxyAppMempool: proxyApp.Mempool(), + + StateStore: stateStore, + BlockStore: blockStore, + EvidencePool: evPool, + ConsensusState: csState, + + ConsensusReactor: csReactor, + BlockSyncReactor: bcReactor.(cs.BlockSyncReactor), + + P2PPeers: sw, + PeerManager: peerManager, + + GenDoc: genDoc, + EventSinks: eventSinks, + EventBus: eventBus, + Mempool: mp, + Logger: logger.With("module", "rpc"), + Config: *config.RPC, + }, } + + // this is a terrible, because typed nil interfaces are not == + // nil, so this is just cleanup to avoid having a non-nil + // value in the RPC environment that has the semantic + // properties of nil. + if sw == nil { + node.rpcEnv.P2PPeers = nil + } else if peerManager == nil { + node.rpcEnv.PeerManager = nil + } + // end hack + + node.rpcEnv.P2PTransport = node + node.BaseService = *service.NewBaseService(logger, "Node", node) return node, nil @@ -485,25 +508,6 @@ func makeSeedNode(config *cfg.Config, p2pMetrics := p2p.PrometheusMetrics(config.Instrumentation.Namespace, "chain_id", genDoc.ChainID) p2pLogger := logger.With("module", "p2p") transport := createTransport(p2pLogger, config) - sw := createSwitch( - config, transport, p2pMetrics, nil, nil, - nil, nil, nil, nil, nodeInfo, nodeKey, p2pLogger, - ) - - err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err) - } - - err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) - } - - addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey) - if err != nil { - return nil, fmt.Errorf("could not create addrbook: %w", err) - } peerManager, err := createPeerManager(config, dbProvider, p2pLogger, nodeKey.ID) if err != nil { @@ -517,8 +521,9 @@ func makeSeedNode(config *cfg.Config, } var ( - pexReactor *pex.Reactor - pexReactorV2 *pex.ReactorV2 + pexReactor service.Service + sw *p2p.Switch + addrBook pex.AddrBook ) // add the pex reactor @@ -527,13 +532,34 @@ func makeSeedNode(config *cfg.Config, // p2p stack is removed. pexCh := pex.ChannelDescriptor() transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) - if config.P2P.DisableLegacy { - pexReactorV2, err = createPEXReactorV2(config, logger, peerManager, router) + + if config.P2P.UseLegacy { + sw = createSwitch( + config, transport, p2pMetrics, nil, nil, + nil, nil, nil, nil, nodeInfo, nodeKey, p2pLogger, + ) + + err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) + if err != nil { + return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err) + } + + err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) + if err != nil { + return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) + } + + addrBook, err = createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey) + if err != nil { + return nil, fmt.Errorf("could not create addrbook: %w", err) + } + + pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger) + } else { + pexReactor, err = createPEXReactorV2(config, logger, peerManager, router) if err != nil { return nil, err } - } else { - pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger) } if config.RPC.PprofListenAddress != "" { @@ -555,8 +581,7 @@ func makeSeedNode(config *cfg.Config, peerManager: peerManager, router: router, - pexReactor: pexReactor, - pexReactorV2: pexReactorV2, + pexReactor: pexReactor, } node.BaseService = *service.NewBaseService(logger, "SeedNode", node) @@ -597,23 +622,20 @@ func (n *nodeImpl) OnStart() error { } n.isListening = true + n.Logger.Info("p2p service", "legacy_enabled", n.config.P2P.UseLegacy) - n.Logger.Info("p2p service", "legacy_enabled", !n.config.P2P.DisableLegacy) - - if n.config.P2P.DisableLegacy { - err = n.router.Start() - } else { + if n.config.P2P.UseLegacy { // Add private IDs to addrbook to block those peers being added n.addrBook.AddPrivateIDs(strings.SplitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " ")) - err = n.sw.Start() - } - if err != nil { + if err = n.sw.Start(); err != nil { + return err + } + } else if err = n.router.Start(); err != nil { return err } if n.config.Mode != cfg.ModeSeed { if n.config.BlockSync.Version == cfg.BlockSyncV0 { - // Start the real blockchain reactor separately since the switch uses the shim. if err := n.bcReactor.Start(); err != nil { return err } @@ -640,17 +662,14 @@ func (n *nodeImpl) OnStart() error { } } - if n.config.P2P.DisableLegacy && n.pexReactorV2 != nil { - if err := n.pexReactorV2.Start(); err != nil { - return err - } - } else { + if n.config.P2P.UseLegacy { // Always connect to persistent peers err = n.sw.DialPeersAsync(strings.SplitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " ")) if err != nil { return fmt.Errorf("could not dial peers from persistent-peers field: %w", err) } - + } else if err := n.pexReactor.Start(); err != nil { + return err } // Run state sync @@ -764,20 +783,18 @@ func (n *nodeImpl) OnStop() { } } - if n.config.P2P.DisableLegacy && n.pexReactorV2 != nil { - if err := n.pexReactorV2.Stop(); err != nil { - n.Logger.Error("failed to stop the PEX v2 reactor", "err", err) - } + if err := n.pexReactor.Stop(); err != nil { + n.Logger.Error("failed to stop the PEX v2 reactor", "err", err) } - if n.config.P2P.DisableLegacy { - if err := n.router.Stop(); err != nil { - n.Logger.Error("failed to stop router", "err", err) - } - } else { + if n.config.P2P.UseLegacy { if err := n.sw.Stop(); err != nil { n.Logger.Error("failed to stop switch", "err", err) } + } else { + if err := n.router.Stop(); err != nil { + n.Logger.Error("failed to stop router", "err", err) + } } if err := n.transport.Close(); err != nil { @@ -808,55 +825,23 @@ func (n *nodeImpl) OnStop() { } } -// ConfigureRPC makes sure RPC has all the objects it needs to operate. -func (n *nodeImpl) ConfigureRPC() (*rpccore.Environment, error) { - rpcCoreEnv := rpccore.Environment{ - ProxyAppQuery: n.proxyApp.Query(), - ProxyAppMempool: n.proxyApp.Mempool(), - - StateStore: n.stateStore, - BlockStore: n.blockStore, - EvidencePool: n.evidencePool, - ConsensusState: n.consensusState, - P2PPeers: n.sw, - P2PTransport: n, - - GenDoc: n.genesisDoc, - EventSinks: n.eventSinks, - ConsensusReactor: n.consensusReactor, - EventBus: n.eventBus, - Mempool: n.mempool, - - Logger: n.Logger.With("module", "rpc"), - - Config: *n.config.RPC, - BlockSyncReactor: n.bcReactor.(cs.BlockSyncReactor), - } +func (n *nodeImpl) startRPC() ([]net.Listener, error) { if n.config.Mode == cfg.ModeValidator { pubKey, err := n.privValidator.GetPubKey(context.TODO()) if pubKey == nil || err != nil { return nil, fmt.Errorf("can't get pubkey: %w", err) } - rpcCoreEnv.PubKey = pubKey + n.rpcEnv.PubKey = pubKey } - if err := rpcCoreEnv.InitGenesisChunks(); err != nil { - return nil, err - } - - return &rpcCoreEnv, nil -} - -func (n *nodeImpl) startRPC() ([]net.Listener, error) { - env, err := n.ConfigureRPC() - if err != nil { + if err := n.rpcEnv.InitGenesisChunks(); err != nil { return nil, err } listenAddrs := strings.SplitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ") - routes := env.GetRoutes() + routes := n.rpcEnv.GetRoutes() if n.config.RPC.Unsafe { - env.AddUnsafe(routes) + n.rpcEnv.AddUnsafe(routes) } config := rpcserver.DefaultConfig() @@ -890,7 +875,7 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) { rpcserver.RegisterRPCFuncs(mux, routes, rpcLogger) listener, err := rpcserver.Listen( listenAddr, - config, + config.MaxOpenConnections, ) if err != nil { return nil, err @@ -948,12 +933,12 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) { if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second } - listener, err := rpcserver.Listen(grpcListenAddr, config) + listener, err := rpcserver.Listen(grpcListenAddr, config.MaxOpenConnections) if err != nil { return nil, err } go func() { - if err := grpccore.StartGRPCServer(env, listener); err != nil { + if err := grpccore.StartGRPCServer(n.rpcEnv, listener); err != nil { n.Logger.Error("Error starting gRPC server", "err", err) } }() @@ -986,46 +971,16 @@ func (n *nodeImpl) startPrometheusServer(addr string) *http.Server { return srv } -// Switch returns the Node's Switch. -func (n *nodeImpl) Switch() *p2p.Switch { - return n.sw -} - -// BlockStore returns the Node's BlockStore. -func (n *nodeImpl) BlockStore() *store.BlockStore { - return n.blockStore -} - -// ConsensusState returns the Node's ConsensusState. -func (n *nodeImpl) ConsensusState() *cs.State { - return n.consensusState -} - // ConsensusReactor returns the Node's ConsensusReactor. func (n *nodeImpl) ConsensusReactor() *cs.Reactor { return n.consensusReactor } -// MempoolReactor returns the Node's mempool reactor. -func (n *nodeImpl) MempoolReactor() service.Service { - return n.mempoolReactor -} - // Mempool returns the Node's mempool. func (n *nodeImpl) Mempool() mempool.Mempool { return n.mempool } -// PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled. -func (n *nodeImpl) PEXReactor() *pex.Reactor { - return n.pexReactor -} - -// EvidencePool returns the Node's EvidencePool. -func (n *nodeImpl) EvidencePool() *evidence.Pool { - return n.evidencePool -} - // EventBus returns the Node's EventBus. func (n *nodeImpl) EventBus() *types.EventBus { return n.eventBus @@ -1042,19 +997,9 @@ func (n *nodeImpl) GenesisDoc() *types.GenesisDoc { return n.genesisDoc } -// ProxyApp returns the Node's AppConns, representing its connections to the ABCI application. -func (n *nodeImpl) ProxyApp() proxy.AppConns { - return n.proxyApp -} - -// Config returns the Node's config. -func (n *nodeImpl) Config() *cfg.Config { - return n.config -} - -// EventSinks returns the Node's event indexing sinks. -func (n *nodeImpl) EventSinks() []indexer.EventSink { - return n.eventSinks +// RPCEnvironment makes sure RPC has all the objects it needs to operate. +func (n *nodeImpl) RPCEnvironment() *rpccore.Environment { + return n.rpcEnv } //------------------------------------------------------------------------------ diff --git a/node/node_test.go b/node/node_test.go index 1f2645144..6925008a6 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -39,6 +39,7 @@ import ( func TestNodeStartStop(t *testing.T) { config := cfg.ResetTestRoot("node_node_test") + defer os.RemoveAll(config.RootDir) // create & start node @@ -49,8 +50,6 @@ func TestNodeStartStop(t *testing.T) { n, ok := ns.(*nodeImpl) require.True(t, ok) - t.Logf("Started node %v", n.sw.NodeInfo()) - // wait for the node to produce a block blocksSub, err := n.EventBus().Subscribe(context.Background(), "node_test", types.EventQueryNewBlock) require.NoError(t, err) @@ -509,36 +508,50 @@ func TestNodeSetEventSink(t *testing.T) { config := cfg.ResetTestRoot("node_app_version_test") defer os.RemoveAll(config.RootDir) - n := getTestNode(t, config, log.TestingLogger()) + logger := log.TestingLogger() + setupTest := func(t *testing.T, conf *cfg.Config) []indexer.EventSink { + eventBus, err := createAndStartEventBus(logger) + require.NoError(t, err) - assert.Equal(t, 1, len(n.eventSinks)) - assert.Equal(t, indexer.KV, n.eventSinks[0].Type()) + genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) + require.NoError(t, err) + + indexService, eventSinks, err := createAndStartIndexerService(config, + cfg.DefaultDBProvider, eventBus, logger, genDoc.ChainID) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, indexService.Stop()) }) + return eventSinks + } + + eventSinks := setupTest(t, config) + assert.Equal(t, 1, len(eventSinks)) + assert.Equal(t, indexer.KV, eventSinks[0].Type()) config.TxIndex.Indexer = []string{"null"} - n = getTestNode(t, config, log.TestingLogger()) + eventSinks = setupTest(t, config) - assert.Equal(t, 1, len(n.eventSinks)) - assert.Equal(t, indexer.NULL, n.eventSinks[0].Type()) + assert.Equal(t, 1, len(eventSinks)) + assert.Equal(t, indexer.NULL, eventSinks[0].Type()) config.TxIndex.Indexer = []string{"null", "kv"} - n = getTestNode(t, config, log.TestingLogger()) + eventSinks = setupTest(t, config) - assert.Equal(t, 1, len(n.eventSinks)) - assert.Equal(t, indexer.NULL, n.eventSinks[0].Type()) + assert.Equal(t, 1, len(eventSinks)) + assert.Equal(t, indexer.NULL, eventSinks[0].Type()) config.TxIndex.Indexer = []string{"kvv"} - ns, err := newDefaultNode(config, log.TestingLogger()) + ns, err := newDefaultNode(config, logger) assert.Nil(t, ns) assert.Equal(t, errors.New("unsupported event sink type"), err) config.TxIndex.Indexer = []string{} - n = getTestNode(t, config, log.TestingLogger()) + eventSinks = setupTest(t, config) - assert.Equal(t, 1, len(n.eventSinks)) - assert.Equal(t, indexer.NULL, n.eventSinks[0].Type()) + assert.Equal(t, 1, len(eventSinks)) + assert.Equal(t, indexer.NULL, eventSinks[0].Type()) config.TxIndex.Indexer = []string{"psql"} - ns, err = newDefaultNode(config, log.TestingLogger()) + ns, err = newDefaultNode(config, logger) assert.Nil(t, ns) assert.Equal(t, errors.New("the psql connection settings cannot be empty"), err) @@ -546,46 +559,46 @@ func TestNodeSetEventSink(t *testing.T) { config.TxIndex.Indexer = []string{"psql"} config.TxIndex.PsqlConn = psqlConn - n = getTestNode(t, config, log.TestingLogger()) - assert.Equal(t, 1, len(n.eventSinks)) - assert.Equal(t, indexer.PSQL, n.eventSinks[0].Type()) - n.OnStop() + eventSinks = setupTest(t, config) + + assert.Equal(t, 1, len(eventSinks)) + assert.Equal(t, indexer.PSQL, eventSinks[0].Type()) config.TxIndex.Indexer = []string{"psql", "kv"} config.TxIndex.PsqlConn = psqlConn - n = getTestNode(t, config, log.TestingLogger()) - assert.Equal(t, 2, len(n.eventSinks)) + eventSinks = setupTest(t, config) + + assert.Equal(t, 2, len(eventSinks)) // we use map to filter the duplicated sinks, so it's not guarantee the order when append sinks. - if n.eventSinks[0].Type() == indexer.KV { - assert.Equal(t, indexer.PSQL, n.eventSinks[1].Type()) + if eventSinks[0].Type() == indexer.KV { + assert.Equal(t, indexer.PSQL, eventSinks[1].Type()) } else { - assert.Equal(t, indexer.PSQL, n.eventSinks[0].Type()) - assert.Equal(t, indexer.KV, n.eventSinks[1].Type()) + assert.Equal(t, indexer.PSQL, eventSinks[0].Type()) + assert.Equal(t, indexer.KV, eventSinks[1].Type()) } - n.OnStop() config.TxIndex.Indexer = []string{"kv", "psql"} config.TxIndex.PsqlConn = psqlConn - n = getTestNode(t, config, log.TestingLogger()) - assert.Equal(t, 2, len(n.eventSinks)) - if n.eventSinks[0].Type() == indexer.KV { - assert.Equal(t, indexer.PSQL, n.eventSinks[1].Type()) + eventSinks = setupTest(t, config) + + assert.Equal(t, 2, len(eventSinks)) + if eventSinks[0].Type() == indexer.KV { + assert.Equal(t, indexer.PSQL, eventSinks[1].Type()) } else { - assert.Equal(t, indexer.PSQL, n.eventSinks[0].Type()) - assert.Equal(t, indexer.KV, n.eventSinks[1].Type()) + assert.Equal(t, indexer.PSQL, eventSinks[0].Type()) + assert.Equal(t, indexer.KV, eventSinks[1].Type()) } - n.OnStop() var e = errors.New("found duplicated sinks, please check the tx-index section in the config.toml") config.TxIndex.Indexer = []string{"psql", "kv", "Kv"} config.TxIndex.PsqlConn = psqlConn - _, err = newDefaultNode(config, log.TestingLogger()) + _, err = newDefaultNode(config, logger) require.Error(t, err) assert.Equal(t, e, err) config.TxIndex.Indexer = []string{"Psql", "kV", "kv", "pSql"} config.TxIndex.PsqlConn = psqlConn - _, err = newDefaultNode(config, log.TestingLogger()) + _, err = newDefaultNode(config, logger) require.Error(t, err) assert.Equal(t, e, err) } diff --git a/node/setup.go b/node/setup.go index a128568ce..1a7c1b3b2 100644 --- a/node/setup.go +++ b/node/setup.go @@ -8,7 +8,6 @@ import ( "math" "net" _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port - "strings" "time" dbm "github.com/tendermint/tm-db" @@ -33,9 +32,7 @@ import ( "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/indexer" - kv "github.com/tendermint/tendermint/state/indexer/sink/kv" - null "github.com/tendermint/tendermint/state/indexer/sink/null" - psql "github.com/tendermint/tendermint/state/indexer/sink/psql" + "github.com/tendermint/tendermint/state/indexer/sink" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" @@ -78,56 +75,9 @@ func createAndStartIndexerService( logger log.Logger, chainID string, ) (*indexer.Service, []indexer.EventSink, error) { - - eventSinks := []indexer.EventSink{} - - // check for duplicated sinks - sinks := map[string]bool{} - for _, s := range config.TxIndex.Indexer { - sl := strings.ToLower(s) - if sinks[sl] { - return nil, nil, errors.New("found duplicated sinks, please check the tx-index section in the config.toml") - } - - sinks[sl] = true - } - -loop: - for k := range sinks { - switch k { - case string(indexer.NULL): - // When we see null in the config, the eventsinks will be reset with the - // nullEventSink. - eventSinks = []indexer.EventSink{null.NewEventSink()} - break loop - - case string(indexer.KV): - store, err := dbProvider(&cfg.DBContext{ID: "tx_index", Config: config}) - if err != nil { - return nil, nil, err - } - - eventSinks = append(eventSinks, kv.NewEventSink(store)) - - case string(indexer.PSQL): - conn := config.TxIndex.PsqlConn - if conn == "" { - return nil, nil, errors.New("the psql connection settings cannot be empty") - } - - es, _, err := psql.NewEventSink(conn, chainID) - if err != nil { - return nil, nil, err - } - eventSinks = append(eventSinks, es) - - default: - return nil, nil, errors.New("unsupported event sink type") - } - } - - if len(eventSinks) == 0 { - eventSinks = []indexer.EventSink{null.NewEventSink()} + eventSinks, err := sink.EventSinksFromConfig(config, dbProvider, chainID) + if err != nil { + return nil, nil, err } indexerService := indexer.NewIndexerService(eventSinks, eventBus) @@ -216,12 +166,12 @@ func createMempoolReactor( peerUpdates *p2p.PeerUpdates ) - if config.P2P.DisableLegacy { - channels = makeChannelsFromShims(router, channelShims) - peerUpdates = peerManager.Subscribe() - } else { + if config.P2P.UseLegacy { channels = getChannelsFromShim(reactorShim) peerUpdates = reactorShim.PeerUpdates + } else { + channels = makeChannelsFromShims(router, channelShims) + peerUpdates = peerManager.Subscribe() } switch config.Mempool.Version { @@ -310,12 +260,12 @@ func createEvidenceReactor( peerUpdates *p2p.PeerUpdates ) - if config.P2P.DisableLegacy { - channels = makeChannelsFromShims(router, evidence.ChannelShims) - peerUpdates = peerManager.Subscribe() - } else { + if config.P2P.UseLegacy { channels = getChannelsFromShim(reactorShim) peerUpdates = reactorShim.PeerUpdates + } else { + channels = makeChannelsFromShims(router, evidence.ChannelShims) + peerUpdates = peerManager.Subscribe() } evidenceReactor := evidence.NewReactor( @@ -352,17 +302,17 @@ func createBlockchainReactor( peerUpdates *p2p.PeerUpdates ) - if config.P2P.DisableLegacy { - channels = makeChannelsFromShims(router, bcv0.ChannelShims) - peerUpdates = peerManager.Subscribe() - } else { + if config.P2P.UseLegacy { channels = getChannelsFromShim(reactorShim) peerUpdates = reactorShim.PeerUpdates + } else { + channels = makeChannelsFromShims(router, bcv0.ChannelShims) + peerUpdates = peerManager.Subscribe() } reactor, err := bcv0.NewReactor( logger, state.Copy(), blockExec, blockStore, csReactor, - channels[bcv0.BlockchainChannel], peerUpdates, blockSync, + channels[bcv0.BlockSyncChannel], peerUpdates, blockSync, metrics, ) if err != nil { @@ -416,12 +366,12 @@ func createConsensusReactor( peerUpdates *p2p.PeerUpdates ) - if config.P2P.DisableLegacy { - channels = makeChannelsFromShims(router, cs.ChannelShims) - peerUpdates = peerManager.Subscribe() - } else { + if config.P2P.UseLegacy { channels = getChannelsFromShim(reactorShim) peerUpdates = reactorShim.PeerUpdates + } else { + channels = makeChannelsFromShims(router, cs.ChannelShims) + peerUpdates = peerManager.Subscribe() } reactor := cs.NewReactor( @@ -700,7 +650,7 @@ func createPEXReactorV2( logger log.Logger, peerManager *p2p.PeerManager, router *p2p.Router, -) (*pex.ReactorV2, error) { +) (service.Service, error) { channel, err := router.OpenChannel(pex.ChannelDescriptor(), &protop2p.PexMessage{}, 128) if err != nil { @@ -727,7 +677,7 @@ func makeNodeInfo( var bcChannel byte switch config.BlockSync.Version { case cfg.BlockSyncV0: - bcChannel = byte(bcv0.BlockchainChannel) + bcChannel = byte(bcv0.BlockSyncChannel) case cfg.BlockSyncV2: bcChannel = bcv2.BlockchainChannel diff --git a/release_notes.md b/release_notes.md deleted file mode 100644 index a537871c5..000000000 --- a/release_notes.md +++ /dev/null @@ -1 +0,0 @@ - diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index 0663ebf67..d752e6a93 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -2,6 +2,7 @@ package local import ( "context" + "errors" "fmt" "time" @@ -46,15 +47,15 @@ type Local struct { // NodeService describes the portion of the node interface that the // local RPC client constructor needs to build a local client. type NodeService interface { - ConfigureRPC() (*rpccore.Environment, error) + RPCEnvironment() *rpccore.Environment EventBus() *types.EventBus } // New configures a client that calls the Node directly. func New(node NodeService) (*Local, error) { - env, err := node.ConfigureRPC() - if err != nil { - return nil, err + env := node.RPCEnvironment() + if env == nil { + return nil, errors.New("rpc is nil") } return &Local{ EventBus: node.EventBus(), diff --git a/rpc/core/abci.go b/rpc/core/abci.go index 613eaec8b..ce705ba90 100644 --- a/rpc/core/abci.go +++ b/rpc/core/abci.go @@ -26,7 +26,7 @@ func (env *Environment) ABCIQuery( if err != nil { return nil, err } - env.Logger.Info("ABCIQuery", "path", path, "data", data, "result", resQuery) + return &ctypes.ResultABCIQuery{Response: *resQuery}, nil } @@ -37,5 +37,6 @@ func (env *Environment) ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, if err != nil { return nil, err } + return &ctypes.ResultABCIInfo{Response: *resInfo}, nil } diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index 1767c4b35..b067e1063 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -1,6 +1,8 @@ package core import ( + "errors" + cm "github.com/tendermint/tendermint/internal/consensus" tmmath "github.com/tendermint/tendermint/libs/math" ctypes "github.com/tendermint/tendermint/rpc/core/types" @@ -54,24 +56,56 @@ func (env *Environment) Validators( // More: https://docs.tendermint.com/master/rpc/#/Info/dump_consensus_state func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { // Get Peer consensus states. - peers := env.P2PPeers.Peers().List() - peerStates := make([]ctypes.PeerStateInfo, len(peers)) - for i, peer := range peers { - peerState, ok := peer.Get(types.PeerStateKey).(*cm.PeerState) - if !ok { // peer does not have a state yet - continue + + var peerStates []ctypes.PeerStateInfo + switch { + case env.P2PPeers != nil: + peers := env.P2PPeers.Peers().List() + peerStates = make([]ctypes.PeerStateInfo, 0, len(peers)) + for _, peer := range peers { + peerState, ok := peer.Get(types.PeerStateKey).(*cm.PeerState) + if !ok { // peer does not have a state yet + continue + } + peerStateJSON, err := peerState.ToJSON() + if err != nil { + return nil, err + } + peerStates = append(peerStates, ctypes.PeerStateInfo{ + // Peer basic info. + NodeAddress: peer.SocketAddr().String(), + // Peer consensus state. + PeerState: peerStateJSON, + }) } - peerStateJSON, err := peerState.ToJSON() - if err != nil { - return nil, err - } - peerStates[i] = ctypes.PeerStateInfo{ - // Peer basic info. - NodeAddress: peer.SocketAddr().String(), - // Peer consensus state. - PeerState: peerStateJSON, + case env.PeerManager != nil: + peers := env.PeerManager.Peers() + peerStates = make([]ctypes.PeerStateInfo, 0, len(peers)) + for _, pid := range peers { + peerState, ok := env.ConsensusReactor.GetPeerState(pid) + if !ok { + continue + } + + peerStateJSON, err := peerState.ToJSON() + if err != nil { + return nil, err + } + + addr := env.PeerManager.Addresses(pid) + if len(addr) >= 1 { + peerStates = append(peerStates, ctypes.PeerStateInfo{ + // Peer basic info. + NodeAddress: addr[0].String(), + // Peer consensus state. + PeerState: peerStateJSON, + }) + } } + default: + return nil, errors.New("no peer system configured") } + // Get self round state. roundState, err := env.ConsensusState.GetRoundStateJSON() if err != nil { diff --git a/rpc/core/env.go b/rpc/core/env.go index eb7232c01..7069bc4d4 100644 --- a/rpc/core/env.go +++ b/rpc/core/env.go @@ -36,7 +36,7 @@ const ( //---------------------------------------------- // These interfaces are used by RPC and must be thread safe -type Consensus interface { +type consensusState interface { GetState() sm.State GetValidators() (int64, []*types.Validator) GetLastHeight() int64 @@ -58,6 +58,16 @@ type peers interface { Peers() p2p.IPeerSet } +type consensusReactor interface { + WaitSync() bool + GetPeerState(peerID types.NodeID) (*consensus.PeerState, bool) +} + +type peerManager interface { + Peers() []types.NodeID + Addresses(types.NodeID) []p2p.NodeAddress +} + //---------------------------------------------- // Environment contains objects and interfaces used by the RPC. It is expected // to be setup once during startup. @@ -67,18 +77,23 @@ type Environment struct { ProxyAppMempool proxy.AppConnMempool // interfaces defined in types and above - StateStore sm.Store - BlockStore sm.BlockStore - EvidencePool sm.EvidencePool - ConsensusState Consensus - P2PPeers peers - P2PTransport transport + StateStore sm.Store + BlockStore sm.BlockStore + EvidencePool sm.EvidencePool + ConsensusState consensusState + ConsensusReactor consensusReactor + P2PPeers peers + + // Legacy p2p stack + P2PTransport transport + + // interfaces for new p2p interfaces + PeerManager peerManager // objects PubKey crypto.PubKey GenDoc *types.GenesisDoc // cache the genesis structure EventSinks []indexer.EventSink - ConsensusReactor *consensus.Reactor EventBus *types.EventBus // thread safe Mempool mempl.Mempool BlockSyncReactor consensus.BlockSyncReactor diff --git a/rpc/core/net.go b/rpc/core/net.go index edcf8fffa..5b1672e26 100644 --- a/rpc/core/net.go +++ b/rpc/core/net.go @@ -13,19 +13,35 @@ import ( // NetInfo returns network info. // More: https://docs.tendermint.com/master/rpc/#/Info/net_info func (env *Environment) NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) { - peersList := env.P2PPeers.Peers().List() - peers := make([]ctypes.Peer, 0, len(peersList)) - for _, peer := range peersList { - peers = append(peers, ctypes.Peer{ - NodeInfo: peer.NodeInfo(), - IsOutbound: peer.IsOutbound(), - ConnectionStatus: peer.Status(), - RemoteIP: peer.RemoteIP().String(), - }) + var peers []ctypes.Peer + + switch { + case env.P2PPeers != nil: + peersList := env.P2PPeers.Peers().List() + peers = make([]ctypes.Peer, 0, len(peersList)) + for _, peer := range peersList { + peers = append(peers, ctypes.Peer{ + ID: peer.ID(), + URL: peer.SocketAddr().String(), + }) + } + case env.PeerManager != nil: + peerList := env.PeerManager.Peers() + for _, peer := range peerList { + addrs := env.PeerManager.Addresses(peer) + if len(addrs) == 0 { + continue + } + + peers = append(peers, ctypes.Peer{ + ID: peer, + URL: addrs[0].String(), + }) + } + default: + return nil, errors.New("peer management system does not support NetInfo responses") } - // TODO: Should we include PersistentPeers and Seeds in here? - // PRO: useful info - // CON: privacy + return &ctypes.ResultNetInfo{ Listening: env.P2PTransport.IsListening(), Listeners: env.P2PTransport.Listeners(), @@ -36,6 +52,10 @@ func (env *Environment) NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, e // UnsafeDialSeeds dials the given seeds (comma-separated id@IP:PORT). func (env *Environment) UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { + if env.P2PPeers == nil { + return nil, errors.New("peer management system does not support this operation") + } + if len(seeds) == 0 { return &ctypes.ResultDialSeeds{}, fmt.Errorf("%w: no seeds provided", ctypes.ErrInvalidRequest) } @@ -53,6 +73,10 @@ func (env *Environment) UnsafeDialPeers( peers []string, persistent, unconditional, private bool) (*ctypes.ResultDialPeers, error) { + if env.P2PPeers == nil { + return nil, errors.New("peer management system does not support this operation") + } + if len(peers) == 0 { return &ctypes.ResultDialPeers{}, fmt.Errorf("%w: no peers provided", ctypes.ErrInvalidRequest) } diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index a49e3c0d9..caa9b8732 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -7,7 +7,6 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/bytes" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -145,10 +144,8 @@ type ResultDialPeers struct { // A peer type Peer struct { - NodeInfo types.NodeInfo `json:"node_info"` - IsOutbound bool `json:"is_outbound"` - ConnectionStatus p2p.ConnectionStatus `json:"connection_status"` - RemoteIP string `json:"remote_ip"` + ID types.NodeID `json:"node_id"` + URL string `json:"url"` } // Validators for a height. diff --git a/rpc/jsonrpc/jsonrpc_test.go b/rpc/jsonrpc/jsonrpc_test.go index 6e0c03f00..b5e422280 100644 --- a/rpc/jsonrpc/jsonrpc_test.go +++ b/rpc/jsonrpc/jsonrpc_test.go @@ -110,7 +110,7 @@ func setup() { wm.SetLogger(tcpLogger) mux.HandleFunc(websocketEndpoint, wm.WebsocketHandler) config := server.DefaultConfig() - listener1, err := server.Listen(tcpAddr, config) + listener1, err := server.Listen(tcpAddr, config.MaxOpenConnections) if err != nil { panic(err) } @@ -126,7 +126,7 @@ func setup() { wm = server.NewWebsocketManager(Routes) wm.SetLogger(unixLogger) mux2.HandleFunc(websocketEndpoint, wm.WebsocketHandler) - listener2, err := server.Listen(unixAddr, config) + listener2, err := server.Listen(unixAddr, config.MaxOpenConnections) if err != nil { panic(err) } diff --git a/rpc/jsonrpc/server/http_server.go b/rpc/jsonrpc/server/http_server.go index c21c71c49..549671241 100644 --- a/rpc/jsonrpc/server/http_server.go +++ b/rpc/jsonrpc/server/http_server.go @@ -261,7 +261,7 @@ func (h maxBytesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Listen starts a new net.Listener on the given address. // It returns an error if the address is invalid or the call to Listen() fails. -func Listen(addr string, config *Config) (listener net.Listener, err error) { +func Listen(addr string, maxOpenConnections int) (listener net.Listener, err error) { parts := strings.SplitN(addr, "://", 2) if len(parts) != 2 { return nil, fmt.Errorf( @@ -274,8 +274,8 @@ func Listen(addr string, config *Config) (listener net.Listener, err error) { if err != nil { return nil, fmt.Errorf("failed to listen on %v: %v", addr, err) } - if config.MaxOpenConnections > 0 { - listener = netutil.LimitListener(listener, config.MaxOpenConnections) + if maxOpenConnections > 0 { + listener = netutil.LimitListener(listener, maxOpenConnections) } return listener, nil diff --git a/rpc/jsonrpc/server/http_server_test.go b/rpc/jsonrpc/server/http_server_test.go index e7c517cde..823719e41 100644 --- a/rpc/jsonrpc/server/http_server_test.go +++ b/rpc/jsonrpc/server/http_server_test.go @@ -39,8 +39,7 @@ func TestMaxOpenConnections(t *testing.T) { fmt.Fprint(w, "some body") }) config := DefaultConfig() - config.MaxOpenConnections = max - l, err := Listen("tcp://127.0.0.1:0", config) + l, err := Listen("tcp://127.0.0.1:0", max) require.NoError(t, err) defer l.Close() go Serve(l, mux, log.TestingLogger(), config) //nolint:errcheck // ignore for tests diff --git a/rpc/jsonrpc/test/main.go b/rpc/jsonrpc/test/main.go index 1c949571f..d348e1639 100644 --- a/rpc/jsonrpc/test/main.go +++ b/rpc/jsonrpc/test/main.go @@ -33,7 +33,7 @@ func main() { rpcserver.RegisterRPCFuncs(mux, routes, logger) config := rpcserver.DefaultConfig() - listener, err := rpcserver.Listen("tcp://127.0.0.1:8008", config) + listener, err := rpcserver.Listen("tcp://127.0.0.1:8008", config.MaxOpenConnections) if err != nil { tmos.Exit(err.Error()) } diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index bb35d34ac..535320b3f 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -837,7 +837,7 @@ paths: - Info description: | Get genesis document in a paginated/chunked format to make it - easier to iterate through larger gensis structures. + easier to iterate through larger genesis structures. parameters: - in: query name: chunkID @@ -1042,7 +1042,7 @@ paths: - Info responses: "200": - description: List of unconfirmed transactions + description: List of transactions content: application/json: schema: @@ -1132,10 +1132,10 @@ paths: tags: - Info description: | - Get a trasasction + Get a transaction responses: "200": - description: Get a transaction` + description: Get a transaction content: application/json: schema: @@ -1476,16 +1476,12 @@ components: Peer: type: object properties: - node_info: - $ref: "#/components/schemas/NodeInfo" - is_outbound: - type: boolean - example: true - connection_status: - $ref: "#/components/schemas/ConnectionStatus" - remote_ip: + node_id: type: string - example: "95.179.155.35" + example: "" + url: + type: string + example: "@95.179.155.35:2385>" NetInfo: type: object properties: @@ -1946,14 +1942,14 @@ components: - "chunk" - "total" - "data" - properties: + properties: chunk: type: integer example: 0 total: type: integer example: 1 - data: + data: type: string example: "Z2VuZXNpcwo=" diff --git a/state/indexer/doc.go b/state/indexer/doc.go new file mode 100644 index 000000000..61adbabac --- /dev/null +++ b/state/indexer/doc.go @@ -0,0 +1,72 @@ +/* +Package indexer defines Tendermint's block and transaction event indexing logic. + +Tendermint supports two primary means of block and transaction event indexing: + +1. A key-value sink via an embedded database with a proprietary query language. +2. A Postgres-based sink. + +An ABCI application can emit events during block and transaction execution in the form + + .= + +for example "transfer.amount=10000". + +An operator can enable one or both of the supported indexing sinks via the +'tx-index.indexer' Tendermint configuration. + +Example: + + [tx-index] + indexer = ["kv", "psql"] + +If an operator wants to completely disable indexing, they may simply just provide +the "null" sink option in the configuration. All other sinks will be ignored if +"null" is provided. + +If indexing is enabled, the indexer.Service will iterate over all enabled sinks +and invoke block and transaction indexing via the appropriate IndexBlockEvents +and IndexTxEvents methods. + +Note, the "kv" sink is considered deprecated and its query functionality is very +limited, but does allow users to directly query for block and transaction events +against Tendermint's RPC. Instead, operators are encouraged to use the "psql" +indexing sink when more complex queries are required and for reliability purposes +as PostgreSQL can scale. + +Prior to starting Tendermint with the "psql" indexing sink enabled, operators +must ensure the following: + +1. The "psql" indexing sink is provided in Tendermint's configuration. +2. A 'tx-index.psql-conn' value is provided that contains the PostgreSQL connection URI. +3. The block and transaction event schemas have been created in the PostgreSQL database. + +Tendermint provides the block and transaction event schemas in the following +path: state/indexer/sink/psql/schema.sql + +To create the schema in a PostgreSQL database, perform the schema query +manually or invoke schema creation via the CLI: + + $ psql -f state/indexer/sink/psql/schema.sql + +The "psql" indexing sink prohibits queries via RPC. When using a PostgreSQL sink, +queries can and should be made directly against the database using SQL. + +The following are some example SQL queries against the database schema: + +* Query for all transaction events for a given transaction hash: + + SELECT * FROM tx_events WHERE hash = '3E7D1F...'; + +* Query for all transaction events for a given block height: + + SELECT * FROM tx_events WHERE height = 25; + +* Query for transaction events that have a given type (i.e. value wildcard): + + SELECT * FROM tx_events WHERE key LIKE '%transfer.recipient%'; + +Note that if a complete abci.TxResult is needed, you will need to join "tx_events" with +"tx_results" via a foreign key, to obtain contains the raw protobuf-encoded abci.TxResult. +*/ +package indexer diff --git a/state/indexer/indexer_service.go b/state/indexer/indexer_service.go index a429b66a0..39a1847f8 100644 --- a/state/indexer/indexer_service.go +++ b/state/indexer/indexer_service.go @@ -51,43 +51,47 @@ func (is *Service) OnStart() error { go func() { for { - msg := <-blockHeadersSub.Out() + select { + case <-blockHeadersSub.Canceled(): + return + case msg := <-blockHeadersSub.Out(): - eventDataHeader := msg.Data().(types.EventDataNewBlockHeader) - height := eventDataHeader.Header.Height - batch := NewBatch(eventDataHeader.NumTxs) + eventDataHeader := msg.Data().(types.EventDataNewBlockHeader) + height := eventDataHeader.Header.Height + batch := NewBatch(eventDataHeader.NumTxs) - for i := int64(0); i < eventDataHeader.NumTxs; i++ { - msg2 := <-txsSub.Out() - txResult := msg2.Data().(types.EventDataTx).TxResult + for i := int64(0); i < eventDataHeader.NumTxs; i++ { + msg2 := <-txsSub.Out() + txResult := msg2.Data().(types.EventDataTx).TxResult - if err = batch.Add(&txResult); err != nil { - is.Logger.Error( - "failed to add tx to batch", - "height", height, - "index", txResult.Index, - "err", err, - ) - } - } - - if !IndexingEnabled(is.eventSinks) { - continue - } - - for _, sink := range is.eventSinks { - if err := sink.IndexBlockEvents(eventDataHeader); err != nil { - is.Logger.Error("failed to index block", "height", height, "err", err) - } else { - is.Logger.Debug("indexed block", "height", height, "sink", sink.Type()) + if err = batch.Add(&txResult); err != nil { + is.Logger.Error( + "failed to add tx to batch", + "height", height, + "index", txResult.Index, + "err", err, + ) + } } - if len(batch.Ops) > 0 { - err := sink.IndexTxEvents(batch.Ops) - if err != nil { - is.Logger.Error("failed to index block txs", "height", height, "err", err) + if !IndexingEnabled(is.eventSinks) { + continue + } + + for _, sink := range is.eventSinks { + if err := sink.IndexBlockEvents(eventDataHeader); err != nil { + is.Logger.Error("failed to index block", "height", height, "err", err) } else { - is.Logger.Debug("indexed txs", "height", height, "sink", sink.Type()) + is.Logger.Debug("indexed block", "height", height, "sink", sink.Type()) + } + + if len(batch.Ops) > 0 { + err := sink.IndexTxEvents(batch.Ops) + if err != nil { + is.Logger.Error("failed to index block txs", "height", height, "err", err) + } else { + is.Logger.Debug("indexed txs", "height", height, "sink", sink.Type()) + } } } } diff --git a/state/indexer/indexer_service_test.go b/state/indexer/indexer_service_test.go index 68a00afb5..457ed065a 100644 --- a/state/indexer/indexer_service_test.go +++ b/state/indexer/indexer_service_test.go @@ -164,19 +164,16 @@ func setupDB(t *testing.T) (*dockertest.Pool, error) { conn := fmt.Sprintf(dsn, user, password, resource.GetPort(port+"/tcp"), dbName) - if err = pool.Retry(func() error { - var err error - - pSink, psqldb, err = psql.NewEventSink(conn, "test-chainID") - + assert.NoError(t, pool.Retry(func() error { + sink, err := psql.NewEventSink(conn, "test-chainID") if err != nil { return err } + pSink = sink + psqldb = sink.DB() return psqldb.Ping() - }); err != nil { - assert.Error(t, err) - } + })) resetDB(t) diff --git a/state/indexer/sink/psql/psql.go b/state/indexer/sink/psql/psql.go index efb539e0b..8bd378f4a 100644 --- a/state/indexer/sink/psql/psql.go +++ b/state/indexer/sink/psql/psql.go @@ -1,3 +1,4 @@ +// Package psql implements an event sink backed by a PostgreSQL database. package psql import ( @@ -24,28 +25,38 @@ const ( DriverName = "postgres" ) -// EventSink is an indexer backend providing the tx/block index services. +// EventSink is an indexer backend providing the tx/block index services. This +// implementation stores records in a PostgreSQL database using the schema +// defined in state/indexer/sink/psql/schema.sql. type EventSink struct { store *sql.DB chainID string } -func NewEventSink(connStr string, chainID string) (indexer.EventSink, *sql.DB, error) { +// NewEventSink constructs an event sink associated with the PostgreSQL +// database specified by connStr. Events written to the sink are attributed to +// the specified chainID. +func NewEventSink(connStr, chainID string) (*EventSink, error) { db, err := sql.Open(DriverName, connStr) if err != nil { - return nil, nil, err + return nil, err } return &EventSink{ store: db, chainID: chainID, - }, db, nil + }, nil } -func (es *EventSink) Type() indexer.EventSinkType { - return indexer.PSQL -} +// DB returns the underlying Postgres connection used by the sink. +// This is exported to support testing. +func (es *EventSink) DB() *sql.DB { return es.store } +// Type returns the structure type for this sink, which is Postgres. +func (es *EventSink) Type() indexer.EventSinkType { return indexer.PSQL } + +// IndexBlockEvents indexes the specified block header, part of the +// indexer.EventSink interface. func (es *EventSink) IndexBlockEvents(h types.EventDataNewBlockHeader) error { sqlStmt := sq. Insert(TableEventBlock). @@ -156,18 +167,22 @@ func (es *EventSink) IndexTxEvents(txr []*abci.TxResult) error { return err } +// SearchBlockEvents is not implemented by this sink, and reports an error for all queries. func (es *EventSink) SearchBlockEvents(ctx context.Context, q *query.Query) ([]int64, error) { return nil, errors.New("block search is not supported via the postgres event sink") } +// SearchTxEvents is not implemented by this sink, and reports an error for all queries. func (es *EventSink) SearchTxEvents(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) { return nil, errors.New("tx search is not supported via the postgres event sink") } +// GetTxByHash is not implemented by this sink, and reports an error for all queries. func (es *EventSink) GetTxByHash(hash []byte) (*abci.TxResult, error) { return nil, errors.New("getTxByHash is not supported via the postgres event sink") } +// HasBlock is not implemented by this sink, and reports an error for all queries. func (es *EventSink) HasBlock(h int64) (bool, error) { return false, errors.New("hasBlock is not supported via the postgres event sink") } @@ -206,6 +221,5 @@ func indexBlockEvents( return sqlStmt, nil } -func (es *EventSink) Stop() error { - return es.store.Close() -} +// Stop closes the underlying PostgreSQL database. +func (es *EventSink) Stop() error { return es.store.Close() } diff --git a/state/indexer/sink/psql/psql_test.go b/state/indexer/sink/psql/psql_test.go index 0df773a53..35ad7eea3 100644 --- a/state/indexer/sink/psql/psql_test.go +++ b/state/indexer/sink/psql/psql_test.go @@ -341,19 +341,14 @@ func setupDB(t *testing.T) (*dockertest.Pool, error) { conn := fmt.Sprintf(dsn, user, password, resource.GetPort(port+"/tcp"), dbName) - if err = pool.Retry(func() error { - var err error - - _, db, err = NewEventSink(conn, chainID) - + require.NoError(t, pool.Retry(func() error { + sink, err := NewEventSink(conn, chainID) if err != nil { return err } - + db = sink.DB() // set global for test use return db.Ping() - }); err != nil { - require.NoError(t, err) - } + })) resetDB(t) diff --git a/state/indexer/sink/sink.go b/state/indexer/sink/sink.go new file mode 100644 index 000000000..f9dfa54df --- /dev/null +++ b/state/indexer/sink/sink.go @@ -0,0 +1,65 @@ +package sink + +import ( + "errors" + "strings" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/state/indexer" + "github.com/tendermint/tendermint/state/indexer/sink/kv" + "github.com/tendermint/tendermint/state/indexer/sink/null" + "github.com/tendermint/tendermint/state/indexer/sink/psql" +) + +// EventSinksFromConfig constructs a slice of indexer.EventSink using the provided +// configuration. +// +//nolint:lll +func EventSinksFromConfig(cfg *config.Config, dbProvider config.DBProvider, chainID string) ([]indexer.EventSink, error) { + if len(cfg.TxIndex.Indexer) == 0 { + return []indexer.EventSink{null.NewEventSink()}, nil + } + + // check for duplicated sinks + sinks := map[string]struct{}{} + for _, s := range cfg.TxIndex.Indexer { + sl := strings.ToLower(s) + if _, ok := sinks[sl]; ok { + return nil, errors.New("found duplicated sinks, please check the tx-index section in the config.toml") + } + sinks[sl] = struct{}{} + } + eventSinks := []indexer.EventSink{} + for k := range sinks { + switch indexer.EventSinkType(k) { + case indexer.NULL: + // When we see null in the config, the eventsinks will be reset with the + // nullEventSink. + return []indexer.EventSink{null.NewEventSink()}, nil + + case indexer.KV: + store, err := dbProvider(&config.DBContext{ID: "tx_index", Config: cfg}) + if err != nil { + return nil, err + } + + eventSinks = append(eventSinks, kv.NewEventSink(store)) + + case indexer.PSQL: + conn := cfg.TxIndex.PsqlConn + if conn == "" { + return nil, errors.New("the psql connection settings cannot be empty") + } + + es, err := psql.NewEventSink(conn, chainID) + if err != nil { + return nil, err + } + eventSinks = append(eventSinks, es) + default: + return nil, errors.New("unsupported event sink type") + } + } + return eventSinks, nil + +} diff --git a/state/state.go b/state/state.go index 132a86fda..5862162d1 100644 --- a/state/state.go +++ b/state/state.go @@ -9,7 +9,6 @@ import ( "github.com/gogo/protobuf/proto" - tmtime "github.com/tendermint/tendermint/libs/time" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmversion "github.com/tendermint/tendermint/proto/tendermint/version" "github.com/tendermint/tendermint/types" @@ -287,7 +286,7 @@ func (state State) MakeBlock( // the votes sent by honest processes, i.e., a faulty processes can not arbitrarily increase or decrease the // computed value. func MedianTime(commit *types.Commit, validators *types.ValidatorSet) time.Time { - weightedTimes := make([]*tmtime.WeightedTime, len(commit.Signatures)) + weightedTimes := make([]*weightedTime, len(commit.Signatures)) totalVotingPower := int64(0) for i, commitSig := range commit.Signatures { @@ -298,11 +297,11 @@ func MedianTime(commit *types.Commit, validators *types.ValidatorSet) time.Time // If there's no condition, TestValidateBlockCommit panics; not needed normally. if validator != nil { totalVotingPower += validator.VotingPower - weightedTimes[i] = tmtime.NewWeightedTime(commitSig.Timestamp, validator.VotingPower) + weightedTimes[i] = newWeightedTime(commitSig.Timestamp, validator.VotingPower) } } - return tmtime.WeightedMedian(weightedTimes, totalVotingPower) + return weightedMedian(weightedTimes, totalVotingPower) } //------------------------------------------------------------------------ diff --git a/state/time.go b/state/time.go new file mode 100644 index 000000000..c0770b3af --- /dev/null +++ b/state/time.go @@ -0,0 +1,46 @@ +package state + +import ( + "sort" + "time" +) + +// weightedTime for computing a median. +type weightedTime struct { + Time time.Time + Weight int64 +} + +// newWeightedTime with time and weight. +func newWeightedTime(time time.Time, weight int64) *weightedTime { + return &weightedTime{ + Time: time, + Weight: weight, + } +} + +// weightedMedian computes weighted median time for a given array of WeightedTime and the total voting power. +func weightedMedian(weightedTimes []*weightedTime, totalVotingPower int64) (res time.Time) { + median := totalVotingPower / 2 + + sort.Slice(weightedTimes, func(i, j int) bool { + if weightedTimes[i] == nil { + return false + } + if weightedTimes[j] == nil { + return true + } + return weightedTimes[i].Time.UnixNano() < weightedTimes[j].Time.UnixNano() + }) + + for _, weightedTime := range weightedTimes { + if weightedTime != nil { + if median <= weightedTime.Weight { + res = weightedTime.Time + break + } + median -= weightedTime.Weight + } + } + return +} diff --git a/libs/time/time_test.go b/state/time_test.go similarity index 50% rename from libs/time/time_test.go rename to state/time_test.go index 1b1a30e50..893ade7ea 100644 --- a/libs/time/time_test.go +++ b/state/time_test.go @@ -1,54 +1,55 @@ -package time +package state import ( "testing" "time" "github.com/stretchr/testify/assert" + tmtime "github.com/tendermint/tendermint/libs/time" ) func TestWeightedMedian(t *testing.T) { - m := make([]*WeightedTime, 3) + m := make([]*weightedTime, 3) - t1 := Now() + t1 := tmtime.Now() t2 := t1.Add(5 * time.Second) t3 := t1.Add(10 * time.Second) - m[2] = NewWeightedTime(t1, 33) // faulty processes - m[0] = NewWeightedTime(t2, 40) // correct processes - m[1] = NewWeightedTime(t3, 27) // correct processes + m[2] = newWeightedTime(t1, 33) // faulty processes + m[0] = newWeightedTime(t2, 40) // correct processes + m[1] = newWeightedTime(t3, 27) // correct processes totalVotingPower := int64(100) - median := WeightedMedian(m, totalVotingPower) + median := weightedMedian(m, totalVotingPower) assert.Equal(t, t2, median) // median always returns value between values of correct processes assert.Equal(t, true, (median.After(t1) || median.Equal(t1)) && (median.Before(t3) || median.Equal(t3))) - m[1] = NewWeightedTime(t1, 40) // correct processes - m[2] = NewWeightedTime(t2, 27) // correct processes - m[0] = NewWeightedTime(t3, 33) // faulty processes + m[1] = newWeightedTime(t1, 40) // correct processes + m[2] = newWeightedTime(t2, 27) // correct processes + m[0] = newWeightedTime(t3, 33) // faulty processes totalVotingPower = int64(100) - median = WeightedMedian(m, totalVotingPower) + median = weightedMedian(m, totalVotingPower) assert.Equal(t, t2, median) // median always returns value between values of correct processes assert.Equal(t, true, (median.After(t1) || median.Equal(t1)) && (median.Before(t2) || median.Equal(t2))) - m = make([]*WeightedTime, 8) + m = make([]*weightedTime, 8) t4 := t1.Add(15 * time.Second) t5 := t1.Add(60 * time.Second) - m[3] = NewWeightedTime(t1, 10) // correct processes - m[1] = NewWeightedTime(t2, 10) // correct processes - m[5] = NewWeightedTime(t2, 10) // correct processes - m[4] = NewWeightedTime(t3, 23) // faulty processes - m[0] = NewWeightedTime(t4, 20) // correct processes - m[7] = NewWeightedTime(t5, 10) // faulty processes + m[3] = newWeightedTime(t1, 10) // correct processes + m[1] = newWeightedTime(t2, 10) // correct processes + m[5] = newWeightedTime(t2, 10) // correct processes + m[4] = newWeightedTime(t3, 23) // faulty processes + m[0] = newWeightedTime(t4, 20) // correct processes + m[7] = newWeightedTime(t5, 10) // faulty processes totalVotingPower = int64(83) - median = WeightedMedian(m, totalVotingPower) + median = weightedMedian(m, totalVotingPower) assert.Equal(t, t3, median) // median always returns value between values of correct processes assert.Equal(t, true, (median.After(t1) || median.Equal(t1)) && diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 02055a38a..d123c2f4e 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -107,11 +107,11 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er switch opt["p2p"].(P2PMode) { case NewP2PMode: - manifest.DisableLegacyP2P = true + manifest.UseLegacyP2P = true case LegacyP2PMode: - manifest.DisableLegacyP2P = false + manifest.UseLegacyP2P = false case HybridP2PMode: - manifest.DisableLegacyP2P = false + manifest.UseLegacyP2P = true p2pNodeFactor = 2 default: return manifest, fmt.Errorf("unknown p2p mode %s", opt["p2p"]) @@ -138,9 +138,9 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er node := generateNode(r, e2e.ModeSeed, 0, manifest.InitialHeight, false) if p2pNodeFactor == 0 { - node.DisableLegacyP2P = manifest.DisableLegacyP2P + node.UseLegacyP2P = manifest.UseLegacyP2P } else if p2pNodeFactor%i == 0 { - node.DisableLegacyP2P = !manifest.DisableLegacyP2P + node.UseLegacyP2P = !manifest.UseLegacyP2P } manifest.Nodes[fmt.Sprintf("seed%02d", i)] = node @@ -162,9 +162,9 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er r, e2e.ModeValidator, startAt, manifest.InitialHeight, i <= 2) if p2pNodeFactor == 0 { - node.DisableLegacyP2P = manifest.DisableLegacyP2P + node.UseLegacyP2P = manifest.UseLegacyP2P } else if p2pNodeFactor%i == 0 { - node.DisableLegacyP2P = !manifest.DisableLegacyP2P + node.UseLegacyP2P = !manifest.UseLegacyP2P } manifest.Nodes[name] = node @@ -198,9 +198,9 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er node := generateNode(r, e2e.ModeFull, startAt, manifest.InitialHeight, false) if p2pNodeFactor == 0 { - node.DisableLegacyP2P = manifest.DisableLegacyP2P + node.UseLegacyP2P = manifest.UseLegacyP2P } else if p2pNodeFactor%i == 0 { - node.DisableLegacyP2P = !manifest.DisableLegacyP2P + node.UseLegacyP2P = !manifest.UseLegacyP2P } manifest.Nodes[fmt.Sprintf("full%02d", i)] = node } diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index 28c61161b..81f64a854 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -59,8 +59,8 @@ type Manifest struct { // by individual nodes. LogLevel string `toml:"log_level"` - // DisableLegacyP2P enables use of the new p2p layer for all nodes in a test. - DisableLegacyP2P bool `toml:"disable_legacy_p2p"` + // UseLegacyP2P uses the legacy p2p layer for all nodes in a test. + UseLegacyP2P bool `toml:"use_legacy_p2p"` // QueueType describes the type of queue that the system uses internally QueueType string `toml:"queue_type"` @@ -148,8 +148,8 @@ type ManifestNode struct { // level. LogLevel string `toml:"log_level"` - // UseNewP2P enables use of the new p2p layer for this node. - DisableLegacyP2P bool `toml:"disable_legacy_p2p"` + // UseLegacyP2P enables use of the legacy p2p layer for this node. + UseLegacyP2P bool `toml:"use_legacy_p2p"` } // Save saves the testnet manifest to a file. diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index 03e9bf4cd..265a413a7 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -96,7 +96,7 @@ type Node struct { PersistentPeers []*Node Perturbations []Perturbation LogLevel string - DisableLegacyP2P bool + UseLegacyP2P bool QueueType string } @@ -181,7 +181,7 @@ func LoadTestnet(file string) (*Testnet, error) { Perturbations: []Perturbation{}, LogLevel: manifest.LogLevel, QueueType: manifest.QueueType, - DisableLegacyP2P: manifest.DisableLegacyP2P || nodeManifest.DisableLegacyP2P, + UseLegacyP2P: manifest.UseLegacyP2P && nodeManifest.UseLegacyP2P, } if node.StartAt == testnet.InitialHeight { @@ -426,16 +426,6 @@ func (t Testnet) ArchiveNodes() []*Node { return nodes } -// RandomNode returns a random non-seed node. -func (t Testnet) RandomNode() *Node { - for { - node := t.Nodes[rand.Intn(len(t.Nodes))] - if node.Mode != ModeSeed { - return node - } - } -} - // IPv6 returns true if the testnet is an IPv6 network. func (t Testnet) IPv6() bool { return t.IP.IP.To4() == nil diff --git a/test/e2e/runner/evidence.go b/test/e2e/runner/evidence.go index 6a246dcb5..30e8d9f0a 100644 --- a/test/e2e/runner/evidence.go +++ b/test/e2e/runner/evidence.go @@ -3,6 +3,7 @@ package main import ( "bytes" "context" + "errors" "fmt" "io/ioutil" "math/rand" @@ -29,7 +30,21 @@ const lightClientEvidenceRatio = 4 // DuplicateVoteEvidence. func InjectEvidence(testnet *e2e.Testnet, amount int) error { // select a random node - targetNode := testnet.RandomNode() + var targetNode *e2e.Node + + for i := 0; i < len(testnet.Nodes)-1; i++ { + targetNode = testnet.Nodes[rand.Intn(len(testnet.Nodes))] // nolint: gosec + if targetNode.Mode == e2e.ModeSeed { + targetNode = nil + continue + } + + break + } + + if targetNode == nil { + return errors.New("could not find node to inject evidence into") + } logger.Info(fmt.Sprintf("Injecting evidence through %v (amount: %d)...", targetNode.Name, amount)) diff --git a/test/e2e/runner/load.go b/test/e2e/runner/load.go index 518e32564..b57c96ddf 100644 --- a/test/e2e/runner/load.go +++ b/test/e2e/runner/load.go @@ -1,6 +1,7 @@ package main import ( + "container/ring" "context" "crypto/rand" "errors" @@ -93,34 +94,64 @@ func loadGenerate(ctx context.Context, chTx chan<- types.Tx, multiplier int, siz // loadProcess processes transactions func loadProcess(ctx context.Context, testnet *e2e.Testnet, chTx <-chan types.Tx, chSuccess chan<- types.Tx) { - // Each worker gets its own client to each node, which allows for some - // concurrency while still bounding it. - clients := map[string]*rpchttp.HTTP{} + // Each worker gets its own client to each usable node, which + // allows for some concurrency while still bounding it. + clients := make([]*rpchttp.HTTP, 0, len(testnet.Nodes)) - var err error - for tx := range chTx { - node := testnet.RandomNode() - - client, ok := clients[node.Name] - if !ok { - client, err = node.Client() - if err != nil { - continue - } - - // check that the node is up - _, err = client.Health(ctx) - if err != nil { - continue - } - - clients[node.Name] = client - } - - if _, err = client.BroadcastTxSync(ctx, tx); err != nil { + for idx := range testnet.Nodes { + // Construct a list of usable nodes for the creating + // load. Don't send load through seed nodes because + // they do not provide the RPC endpoints required to + // broadcast transaction. + if testnet.Nodes[idx].Mode == e2e.ModeSeed { continue } - chSuccess <- tx + client, err := testnet.Nodes[idx].Client() + if err != nil { + continue + } + + clients = append(clients, client) + } + + if len(clients) == 0 { + panic("no clients to process load") + } + + // Put the clients in a ring so they can be used in a + // round-robin fashion. + clientRing := ring.New(len(clients)) + for idx := range clients { + clientRing.Value = clients[idx] + clientRing = clientRing.Next() + } + + var err error + + for { + select { + case <-ctx.Done(): + return + case tx := <-chTx: + clientRing = clientRing.Next() + client := clientRing.Value.(*rpchttp.HTTP) + + if _, err := client.Health(ctx); err != nil { + continue + } + + if _, err = client.BroadcastTxSync(ctx, tx); err != nil { + continue + } + + select { + case chSuccess <- tx: + continue + case <-ctx.Done(): + return + } + + } } } diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index fda261b43..1a02a7872 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -238,7 +238,7 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.RPC.PprofListenAddress = ":6060" cfg.P2P.ExternalAddress = fmt.Sprintf("tcp://%v", node.AddressP2P(false)) cfg.P2P.AddrBookStrict = false - cfg.P2P.DisableLegacy = node.DisableLegacyP2P + cfg.P2P.UseLegacy = node.UseLegacyP2P cfg.P2P.QueueType = node.QueueType cfg.DBBackend = node.Database cfg.StateSync.DiscoveryTime = 5 * time.Second @@ -345,17 +345,17 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { // MakeAppConfig generates an ABCI application config for a node. func MakeAppConfig(node *e2e.Node) ([]byte, error) { cfg := map[string]interface{}{ - "chain_id": node.Testnet.Name, - "dir": "data/app", - "listen": AppAddressUNIX, - "mode": node.Mode, - "proxy_port": node.ProxyPort, - "protocol": "socket", - "persist_interval": node.PersistInterval, - "snapshot_interval": node.SnapshotInterval, - "retain_blocks": node.RetainBlocks, - "key_type": node.PrivvalKey.Type(), - "disable_legacy_p2p": node.DisableLegacyP2P, + "chain_id": node.Testnet.Name, + "dir": "data/app", + "listen": AppAddressUNIX, + "mode": node.Mode, + "proxy_port": node.ProxyPort, + "protocol": "socket", + "persist_interval": node.PersistInterval, + "snapshot_interval": node.SnapshotInterval, + "retain_blocks": node.RetainBlocks, + "key_type": node.PrivvalKey.Type(), + "use_legacy_p2p": node.UseLegacyP2P, } switch node.ABCIProtocol { case e2e.ProtocolUNIX: diff --git a/test/e2e/runner/start.go b/test/e2e/runner/start.go index b42d2ae35..c3152cb68 100644 --- a/test/e2e/runner/start.go +++ b/test/e2e/runner/start.go @@ -9,6 +9,9 @@ import ( ) func Start(testnet *e2e.Testnet) error { + if len(testnet.Nodes) == 0 { + return fmt.Errorf("no nodes in testnet") + } // Nodes are already sorted by name. Sort them by name then startAt, // which gives the overall order startAt, mode, name. @@ -25,12 +28,11 @@ func Start(testnet *e2e.Testnet) error { } return false }) + sort.SliceStable(nodeQueue, func(i, j int) bool { return nodeQueue[i].StartAt < nodeQueue[j].StartAt }) - if len(nodeQueue) == 0 { - return fmt.Errorf("no nodes in testnet") - } + if nodeQueue[0].StartAt > 0 { return fmt.Errorf("no initial nodes in testnet") } @@ -49,9 +51,15 @@ func Start(testnet *e2e.Testnet) error { logger.Info(fmt.Sprintf("Node %v up on http://127.0.0.1:%v", node.Name, node.ProxyPort)) } + networkHeight := testnet.InitialHeight + // Wait for initial height - logger.Info(fmt.Sprintf("Waiting for initial height %v...", testnet.InitialHeight)) - block, blockID, err := waitForHeight(testnet, testnet.InitialHeight) + logger.Info("Waiting for initial height", + "height", networkHeight, + "nodes", len(testnet.Nodes)-len(nodeQueue), + "pending", len(nodeQueue)) + + block, blockID, err := waitForHeight(testnet, networkHeight) if err != nil { return err } @@ -66,12 +74,28 @@ func Start(testnet *e2e.Testnet) error { } } - // Start up remaining nodes for _, node := range nodeQueue { - logger.Info(fmt.Sprintf("Starting node %v at height %v...", node.Name, node.StartAt)) - if _, _, err := waitForHeight(testnet, node.StartAt); err != nil { - return err + if node.StartAt > networkHeight { + // if we're starting a node that's ahead of + // the last known height of the network, then + // we should make sure that the rest of the + // network has reached at least the height + // that this node will start at before we + // start the node. + + networkHeight = node.StartAt + + logger.Info("Waiting for network to advance before starting catch up node", + "node", node.Name, + "height", networkHeight) + + if _, _, err := waitForHeight(testnet, networkHeight); err != nil { + return err + } } + + logger.Info("Starting catch up node", "node", node.Name, "height", node.StartAt) + if err := execCompose(testnet.Dir, "up", "-d", node.Name); err != nil { return err } diff --git a/test/e2e/tests/net_test.go b/test/e2e/tests/net_test.go index 1ca43fa05..8d331aff9 100644 --- a/test/e2e/tests/net_test.go +++ b/test/e2e/tests/net_test.go @@ -32,11 +32,12 @@ func TestNet_Peers(t *testing.T) { seen[n.Name] = (n.Name == node.Name) // we've clearly seen ourself } for _, peerInfo := range netInfo.Peers { - peer := node.Testnet.LookupNode(peerInfo.NodeInfo.Moniker) - require.NotNil(t, peer, "unknown node %v", peerInfo.NodeInfo.Moniker) - require.Equal(t, peer.IP.String(), peerInfo.RemoteIP, - "unexpected IP address for peer %v", peer.Name) - seen[peerInfo.NodeInfo.Moniker] = true + id := peerInfo.ID + peer := node.Testnet.LookupNode(string(id)) + require.NotNil(t, peer, "unknown node %v", id) + require.Contains(t, peerInfo.URL, peer.IP.String(), + "unexpected IP address for peer %v", id) + seen[string(id)] = true } for name := range seen {