diff --git a/.github/dependabot.yml b/.github/dependabot.yml index c823bbaae..6dd968c20 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -30,12 +30,6 @@ updates: - T:dependencies - S:automerge - - package-ecosystem: npm - directory: "/docs" - schedule: - interval: weekly - open-pull-requests-limit: 10 - ################################### ## ## Update All Go Dependencies diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index 944ba13ef..8d89a20da 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -30,8 +30,7 @@ jobs: - name: Capture git repo info id: git-info run: | - echo "::set-output name=branch::`git branch --show-current`" - echo "::set-output name=commit::`git rev-parse HEAD`" + echo "branch=`git branch --show-current`" >> $GITHUB_OUTPUT - name: Build working-directory: test/e2e @@ -49,7 +48,6 @@ jobs: outputs: git-branch: ${{ steps.git-info.outputs.branch }} - git-commit: ${{ steps.git-info.outputs.commit }} e2e-nightly-fail: needs: e2e-nightly-test @@ -63,7 +61,7 @@ jobs: SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }} RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" - COMMIT_URL: "${{ github.server_url }}/${{ github.repository }}/commit/${{ needs.e2e-nightly-test.outputs.git-commit }}" + COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ needs.e2e-nightly-test.outputs.git-branch }}" with: payload: | { @@ -72,7 +70,7 @@ jobs: "type": "section", "text": { "type": "mrkdwn", - "text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure." + "text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure." } } ] diff --git a/.github/workflows/e2e-nightly-37x.yml b/.github/workflows/e2e-nightly-37x.yml index 769a316b2..ebcc4a3fb 100644 --- a/.github/workflows/e2e-nightly-37x.yml +++ b/.github/workflows/e2e-nightly-37x.yml @@ -30,8 +30,7 @@ jobs: - name: Capture git repo info id: git-info run: | - echo "::set-output name=branch::`git branch --show-current`" - echo "::set-output name=commit::`git rev-parse HEAD`" + echo "branch=`git branch --show-current`" >> $GITHUB_OUTPUT - name: Build working-directory: test/e2e @@ -49,7 +48,6 @@ jobs: outputs: git-branch: ${{ steps.git-info.outputs.branch }} - git-commit: ${{ steps.git-info.outputs.commit }} e2e-nightly-fail: needs: e2e-nightly-test @@ -63,7 +61,7 @@ jobs: SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }} RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" - COMMIT_URL: "${{ github.server_url }}/${{ github.repository }}/commit/${{ needs.e2e-nightly-test.outputs.git-commit }}" + COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ needs.e2e-nightly-test.outputs.git-branch }}" with: payload: | { @@ -72,7 +70,7 @@ jobs: "type": "section", "text": { "type": "mrkdwn", - "text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure." + "text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure." } } ] diff --git a/.github/workflows/e2e-nightly-main.yml b/.github/workflows/e2e-nightly-main.yml index dd8b7abbb..7f734367d 100644 --- a/.github/workflows/e2e-nightly-main.yml +++ b/.github/workflows/e2e-nightly-main.yml @@ -52,7 +52,7 @@ jobs: SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK BRANCH: ${{ github.ref_name }} RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" - COMMIT_URL: "${{ github.server_url }}/${{ github.repository }}/commit/${{ github.sha }}" + COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ github.ref_name }}" with: payload: | { @@ -61,7 +61,7 @@ jobs: "type": "section", "text": { "type": "mrkdwn", - "text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure." + "text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure." } } ] diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index 0209501a1..e6c71db08 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -64,7 +64,7 @@ jobs: - name: Set crashers count working-directory: test/fuzz - run: echo "::set-output name=count::$(find . -type d -name 'crashers' | xargs -I % sh -c 'ls % | wc -l' | awk '{total += $1} END {print total}')" + run: echo "count=$(find . -type d -name 'crashers' | xargs -I % sh -c 'ls % | wc -l' | awk '{total += $1} END {print total}')" >> $GITHUB_OUTPUT id: set-crashers-count outputs: diff --git a/.github/workflows/docker.yml b/.github/workflows/tendermint-docker.yml similarity index 97% rename from .github/workflows/docker.yml rename to .github/workflows/tendermint-docker.yml index e1f38d1a6..721a2717b 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/tendermint-docker.yml @@ -33,7 +33,7 @@ jobs: if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then TAGS="$TAGS,${DOCKER_IMAGE}:${VERSION}" fi - echo ::set-output name=tags::${TAGS} + echo "tags=${TAGS}" >> $GITHUB_OUTPUT - name: Set up QEMU uses: docker/setup-qemu-action@master diff --git a/.github/workflows/testapp-docker.yml b/.github/workflows/testapp-docker.yml new file mode 100644 index 000000000..310e80e38 --- /dev/null +++ b/.github/workflows/testapp-docker.yml @@ -0,0 +1,60 @@ +name: Docker E2E Node +# Build & Push rebuilds the e2e Testapp docker image on every push to main and creation of tags +# and pushes the image to https://hub.docker.com/r/tendermint/e2e-node +on: + push: + branches: + - main + tags: + - "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10 + - "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" # e.g. v0.37.0-alpha.1, v0.38.0-alpha.10 + - "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" # e.g. v0.37.0-beta.1, v0.38.0-beta.10 + - "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10 + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Prepare + id: prep + run: | + DOCKER_IMAGE=tendermint/e2e-node + VERSION=noop + if [[ $GITHUB_REF == refs/tags/* ]]; then + VERSION=${GITHUB_REF#refs/tags/} + elif [[ $GITHUB_REF == refs/heads/* ]]; then + VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g') + if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then + VERSION=latest + fi + fi + TAGS="${DOCKER_IMAGE}:${VERSION}" + if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then + TAGS="$TAGS,${DOCKER_IMAGE}:${VERSION}" + fi + echo "tags=${TAGS}" >> $GITHUB_OUTPUT + + - name: Set up QEMU + uses: docker/setup-qemu-action@master + with: + platforms: all + + - name: Set up Docker Build + uses: docker/setup-buildx-action@v2.2.1 + + - name: Login to DockerHub + if: ${{ github.event_name != 'pull_request' }} + uses: docker/login-action@v2.1.0 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Publish to Docker Hub + uses: docker/build-push-action@v3.2.0 + with: + context: . + file: ./test/e2e/docker/Dockerfile + platforms: linux/amd64,linux/arm64 + push: ${{ github.event_name != 'beep_boop' }} + tags: ${{ steps.prep.outputs.tags }} diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 25c97aff7..5f0e93dd0 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -12,6 +12,8 @@ - Go API - [p2p] \#9625 Remove unused p2p/trust package (@cmwaters) + - [rpc] \#9655 Remove global environment and replace with constructor. (@williambanfield,@tychoish) + - [node] \#9655 Move DBContext and DBProvider from the node package to the config package. (@williambanfield,@tychoish) - Blockchain Protocol @@ -22,9 +24,13 @@ - [tools/tm-signer-harness] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106) - [metrics] \#9682 move state-syncing and block-syncing metrics to their respective packages (@cmwaters) labels have moved from block_syncing -> blocksync_syncing and state_syncing -> statesync_syncing + - [inspect] \#9655 Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield) ### FEATURES +- [proxy] \#9830 Introduce `NewUnsyncLocalClientCreator`, which allows local + ABCI clients to have the same concurrency model as remote clients (i.e. one + mutex per client "connection", for each of the four ABCI "connections"). - [config] \#9680 Introduce `BootstrapPeers` to the config to allow nodes to list peers to be added to the addressbook upon start up (@cmwaters) diff --git a/abci/client/unsync_local_client.go b/abci/client/unsync_local_client.go deleted file mode 100644 index 3198c1720..000000000 --- a/abci/client/unsync_local_client.go +++ /dev/null @@ -1,263 +0,0 @@ -package abcicli - -import ( - "sync" - - types "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/service" -) - -type unsyncLocalClient struct { - service.BaseService - - types.Application - - // This mutex is exclusively used to protect the callback. - mtx sync.RWMutex - Callback -} - -var _ Client = (*unsyncLocalClient)(nil) - -// NewUnsyncLocalClient creates an unsynchronized local client, which will be -// directly calling the methods of the given app. -// -// Unlike NewLocalClient, it does not hold a mutex around the application, so -// it is up to the application to manage its synchronization properly. -func NewUnsyncLocalClient(app types.Application) Client { - cli := &unsyncLocalClient{ - Application: app, - } - cli.BaseService = *service.NewBaseService(nil, "unsyncLocalClient", cli) - return cli -} - -func (app *unsyncLocalClient) SetResponseCallback(cb Callback) { - app.mtx.Lock() - defer app.mtx.Unlock() - app.Callback = cb -} - -// TODO: change types.Application to include Error()? -func (app *unsyncLocalClient) Error() error { - return nil -} - -func (app *unsyncLocalClient) FlushAsync() *ReqRes { - // Do nothing - return newLocalReqRes(types.ToRequestFlush(), nil) -} - -func (app *unsyncLocalClient) EchoAsync(msg string) *ReqRes { - return app.callback( - types.ToRequestEcho(msg), - types.ToResponseEcho(msg), - ) -} - -func (app *unsyncLocalClient) InfoAsync(req types.RequestInfo) *ReqRes { - res := app.Application.Info(req) - return app.callback( - types.ToRequestInfo(req), - types.ToResponseInfo(res), - ) -} - -func (app *unsyncLocalClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes { - res := app.Application.DeliverTx(params) - return app.callback( - types.ToRequestDeliverTx(params), - types.ToResponseDeliverTx(res), - ) -} - -func (app *unsyncLocalClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes { - res := app.Application.CheckTx(req) - return app.callback( - types.ToRequestCheckTx(req), - types.ToResponseCheckTx(res), - ) -} - -func (app *unsyncLocalClient) QueryAsync(req types.RequestQuery) *ReqRes { - res := app.Application.Query(req) - return app.callback( - types.ToRequestQuery(req), - types.ToResponseQuery(res), - ) -} - -func (app *unsyncLocalClient) CommitAsync() *ReqRes { - res := app.Application.Commit() - return app.callback( - types.ToRequestCommit(), - types.ToResponseCommit(res), - ) -} - -func (app *unsyncLocalClient) InitChainAsync(req types.RequestInitChain) *ReqRes { - res := app.Application.InitChain(req) - return app.callback( - types.ToRequestInitChain(req), - types.ToResponseInitChain(res), - ) -} - -func (app *unsyncLocalClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes { - res := app.Application.BeginBlock(req) - return app.callback( - types.ToRequestBeginBlock(req), - types.ToResponseBeginBlock(res), - ) -} - -func (app *unsyncLocalClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes { - res := app.Application.EndBlock(req) - return app.callback( - types.ToRequestEndBlock(req), - types.ToResponseEndBlock(res), - ) -} - -func (app *unsyncLocalClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqRes { - res := app.Application.ListSnapshots(req) - return app.callback( - types.ToRequestListSnapshots(req), - types.ToResponseListSnapshots(res), - ) -} - -func (app *unsyncLocalClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqRes { - res := app.Application.OfferSnapshot(req) - return app.callback( - types.ToRequestOfferSnapshot(req), - types.ToResponseOfferSnapshot(res), - ) -} - -func (app *unsyncLocalClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChunk) *ReqRes { - res := app.Application.LoadSnapshotChunk(req) - return app.callback( - types.ToRequestLoadSnapshotChunk(req), - types.ToResponseLoadSnapshotChunk(res), - ) -} - -func (app *unsyncLocalClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotChunk) *ReqRes { - res := app.Application.ApplySnapshotChunk(req) - return app.callback( - types.ToRequestApplySnapshotChunk(req), - types.ToResponseApplySnapshotChunk(res), - ) -} - -func (app *unsyncLocalClient) PrepareProposalAsync(req types.RequestPrepareProposal) *ReqRes { - res := app.Application.PrepareProposal(req) - return app.callback( - types.ToRequestPrepareProposal(req), - types.ToResponsePrepareProposal(res), - ) -} - -func (app *unsyncLocalClient) ProcessProposalAsync(req types.RequestProcessProposal) *ReqRes { - res := app.Application.ProcessProposal(req) - return app.callback( - types.ToRequestProcessProposal(req), - types.ToResponseProcessProposal(res), - ) -} - -//------------------------------------------------------- - -func (app *unsyncLocalClient) FlushSync() error { - return nil -} - -func (app *unsyncLocalClient) EchoSync(msg string) (*types.ResponseEcho, error) { - return &types.ResponseEcho{Message: msg}, nil -} - -func (app *unsyncLocalClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) { - res := app.Application.Info(req) - return &res, nil -} - -func (app *unsyncLocalClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) { - res := app.Application.DeliverTx(req) - return &res, nil -} - -func (app *unsyncLocalClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) { - res := app.Application.CheckTx(req) - return &res, nil -} - -func (app *unsyncLocalClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) { - res := app.Application.Query(req) - return &res, nil -} - -func (app *unsyncLocalClient) CommitSync() (*types.ResponseCommit, error) { - res := app.Application.Commit() - return &res, nil -} - -func (app *unsyncLocalClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) { - res := app.Application.InitChain(req) - return &res, nil -} - -func (app *unsyncLocalClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { - res := app.Application.BeginBlock(req) - return &res, nil -} - -func (app *unsyncLocalClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) { - res := app.Application.EndBlock(req) - return &res, nil -} - -func (app *unsyncLocalClient) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { - res := app.Application.ListSnapshots(req) - return &res, nil -} - -func (app *unsyncLocalClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { - res := app.Application.OfferSnapshot(req) - return &res, nil -} - -func (app *unsyncLocalClient) LoadSnapshotChunkSync( - req types.RequestLoadSnapshotChunk, -) (*types.ResponseLoadSnapshotChunk, error) { - res := app.Application.LoadSnapshotChunk(req) - return &res, nil -} - -func (app *unsyncLocalClient) ApplySnapshotChunkSync( - req types.RequestApplySnapshotChunk, -) (*types.ResponseApplySnapshotChunk, error) { - res := app.Application.ApplySnapshotChunk(req) - return &res, nil -} - -func (app *unsyncLocalClient) PrepareProposalSync(req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { - res := app.Application.PrepareProposal(req) - return &res, nil -} - -func (app *unsyncLocalClient) ProcessProposalSync(req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { - res := app.Application.ProcessProposal(req) - return &res, nil -} - -//------------------------------------------------------- - -func (app *unsyncLocalClient) callback(req *types.Request, res *types.Response) *ReqRes { - app.mtx.RLock() - defer app.mtx.RUnlock() - app.Callback(req, res) - rr := newLocalReqRes(req, res) - rr.callbackInvoked = true - return rr -} diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index 4a4201943..ab0d52156 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -460,11 +460,14 @@ func cmdUnimplemented(cmd *cobra.Command, args []string) error { fmt.Println("Available commands:") fmt.Printf("%s: %s\n", echoCmd.Use, echoCmd.Short) - fmt.Printf("%s: %s\n", infoCmd.Use, infoCmd.Short) fmt.Printf("%s: %s\n", checkTxCmd.Use, checkTxCmd.Short) - fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short) - fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short) fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short) + fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short) + fmt.Printf("%s: %s\n", infoCmd.Use, infoCmd.Short) + fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short) + fmt.Printf("%s: %s\n", prepareProposalCmd.Use, prepareProposalCmd.Short) + fmt.Printf("%s: %s\n", processProposalCmd.Use, processProposalCmd.Short) + fmt.Println("Use \"[command] --help\" for more information about a command.") return nil diff --git a/abci/example/code/code.go b/abci/example/code/code.go index 6d011ed9d..660bac987 100644 --- a/abci/example/code/code.go +++ b/abci/example/code/code.go @@ -8,4 +8,5 @@ const ( CodeTypeUnauthorized uint32 = 3 CodeTypeUnknownError uint32 = 4 CodeTypeExecuted uint32 = 5 + CodeTypeRejected uint32 = 6 ) diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index 3188c9425..cae5f79d9 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -122,6 +122,10 @@ func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeli } func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx { + if len(req.Tx) == 0 { + return types.ResponseCheckTx{Code: code.CodeTypeRejected} + } + if req.Type == types.CheckTxType_Recheck { if _, ok := app.txToRemove[string(req.Tx)]; ok { return types.ResponseCheckTx{Code: code.CodeTypeExecuted, GasWanted: 1} diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index 5d91f3699..77d6e933f 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -70,6 +70,24 @@ func TestKVStoreKV(t *testing.T) { testKVStore(t, kvstore, tx, key, value) } +func TestPersistentKVStoreEmptyTX(t *testing.T) { + dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO + if err != nil { + t.Fatal(err) + } + kvstore := NewPersistentKVStoreApplication(dir) + tx := []byte("") + reqCheck := types.RequestCheckTx{Tx: tx} + resCheck := kvstore.CheckTx(reqCheck) + require.Equal(t, resCheck.Code, code.CodeTypeRejected) + + txs := make([][]byte, 0, 4) + txs = append(txs, []byte("key=value"), []byte("key"), []byte(""), []byte("kee=value")) + reqPrepare := types.RequestPrepareProposal{Txs: txs, MaxTxBytes: 10 * 1024} + resPrepare := kvstore.PrepareProposal(reqPrepare) + require.Equal(t, len(reqPrepare.Txs), len(resPrepare.Txs)+1, "Empty transaction not properly removed") +} + func TestPersistentKVStoreKV(t *testing.T) { dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO if err != nil { diff --git a/abci/example/kvstore/persistent_kvstore.go b/abci/example/kvstore/persistent_kvstore.go index 500d4c5c9..8aa255b75 100644 --- a/abci/example/kvstore/persistent_kvstore.go +++ b/abci/example/kvstore/persistent_kvstore.go @@ -324,11 +324,15 @@ func (app *PersistentKVStoreApplication) execPrepareTx(tx []byte) types.Response } // substPrepareTx substitutes all the transactions prefixed with 'prepare' in the -// proposal for transactions with the prefix stripped. +// proposal for transactions with the prefix stripped, while discarding invalid empty transactions. func (app *PersistentKVStoreApplication) substPrepareTx(blockData [][]byte, maxTxBytes int64) [][]byte { txs := make([][]byte, 0, len(blockData)) var totalBytes int64 for _, tx := range blockData { + if len(tx) == 0 { + continue + } + txMod := tx if isPrepareTx(tx) { txMod = bytes.Replace(tx, []byte(PreparePrefix), []byte(ReplacePrefix), 1) diff --git a/cmd/tendermint/commands/inspect.go b/cmd/tendermint/commands/inspect.go new file mode 100644 index 000000000..9e473ec00 --- /dev/null +++ b/cmd/tendermint/commands/inspect.go @@ -0,0 +1,87 @@ +package commands + +import ( + "context" + "os" + "os/signal" + "syscall" + + "github.com/spf13/cobra" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/inspect" + "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/state/indexer/block" + "github.com/tendermint/tendermint/store" + "github.com/tendermint/tendermint/types" +) + +// InspectCmd is the command for starting an inspect server. +var InspectCmd = &cobra.Command{ + Use: "inspect", + Short: "Run an inspect server for investigating Tendermint state", + Long: ` + inspect runs a subset of Tendermint's RPC endpoints that are useful for debugging + issues with Tendermint. + + When the Tendermint consensus engine detects inconsistent state, it will crash the + Tendermint process. Tendermint will not start up while in this inconsistent state. + The inspect command can be used to query the block and state store using Tendermint + RPC calls to debug issues of inconsistent state. + `, + + RunE: runInspect, +} + +func init() { + InspectCmd.Flags(). + String("rpc.laddr", + config.RPC.ListenAddress, "RPC listenener address. Port required") + InspectCmd.Flags(). + String("db-backend", + config.DBBackend, "database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb") + InspectCmd.Flags(). + String("db-dir", config.DBPath, "database directory") +} + +func runInspect(cmd *cobra.Command, args []string) error { + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() + + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGTERM, syscall.SIGINT) + go func() { + <-c + cancel() + }() + + blockStoreDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "blockstore", Config: config}) + if err != nil { + return err + } + blockStore := store.NewBlockStore(blockStoreDB) + defer blockStore.Close() + + stateDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "state", Config: config}) + if err != nil { + return err + } + stateStore := state.NewStore(stateDB, state.StoreOptions{DiscardABCIResponses: false}) + defer stateStore.Close() + + genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) + if err != nil { + return err + } + txIndexer, blockIndexer, err := block.IndexerFromConfig(config, cfg.DefaultDBProvider, genDoc.ChainID) + if err != nil { + return err + } + ins := inspect.New(config.RPC, blockStore, stateStore, txIndexer, blockIndexer, logger) + + logger.Info("starting inspect server") + if err := ins.Run(ctx); err != nil { + return err + } + return nil +} diff --git a/cmd/tendermint/main.go b/cmd/tendermint/main.go index 59e7a1b12..47cad15ed 100644 --- a/cmd/tendermint/main.go +++ b/cmd/tendermint/main.go @@ -30,6 +30,7 @@ func main() { cmd.VersionCmd, cmd.RollbackStateCmd, cmd.CompactGoLevelDBCmd, + cmd.InspectCmd, debug.DebugCmd, cli.NewCompletionCmd(rootCmd, true), ) diff --git a/config/db.go b/config/db.go new file mode 100644 index 000000000..bbc286944 --- /dev/null +++ b/config/db.go @@ -0,0 +1,30 @@ +package config + +import ( + "context" + + dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/service" +) + +// ServiceProvider takes a config and a logger and returns a ready to go Node. +type ServiceProvider func(context.Context, *Config, log.Logger) (service.Service, error) + +// DBContext specifies config information for loading a new DB. +type DBContext struct { + ID string + Config *Config +} + +// DBProvider takes a DBContext and returns an instantiated DB. +type DBProvider func(*DBContext) (dbm.DB, error) + +// DefaultDBProvider returns a database using the DBBackend and DBDir +// specified in the Config. +func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) { + dbType := dbm.BackendType(ctx.Config.DBBackend) + + return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()) +} diff --git a/consensus/replay.go b/consensus/replay.go index 83569aff2..edac91425 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -54,7 +54,7 @@ func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscr if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step { return fmt.Errorf("roundState mismatch. Got %v; Expected %v", m2, m) } - case <-newStepSub.Cancelled(): + case <-newStepSub.Canceled(): return fmt.Errorf("failed to read off newStepSub.Out(). newStepSub was canceled") case <-ticker: return fmt.Errorf("failed to read off newStepSub.Out()") diff --git a/consensus/replay_test.go b/consensus/replay_test.go index 44bbe09bf..f4c4e14e8 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -97,7 +97,7 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Confi require.NoError(t, err) select { case <-newBlockSub.Out(): - case <-newBlockSub.Cancelled(): + case <-newBlockSub.Canceled(): t.Fatal("newBlockSub was canceled") case <-time.After(120 * time.Second): t.Fatal("Timed out waiting for new block (see trace above)") @@ -1198,6 +1198,7 @@ func (bs *mockBlockStore) PruneBlocks(height int64, state sm.State) (uint64, int } func (bs *mockBlockStore) DeleteLatestBlock() error { return nil } +func (bs *mockBlockStore) Close() error { return nil } //--------------------------------------- // Test handshake/init chain diff --git a/docs/pre.sh b/docs/pre.sh index 37193d265..9a671789a 100755 --- a/docs/pre.sh +++ b/docs/pre.sh @@ -1,3 +1,4 @@ #!/bin/bash -cp -a ../rpc/openapi/ .vuepress/public/rpc/ +mkdir -p .vuepress/public/rpc/ +cp -a ../rpc/openapi/* .vuepress/public/rpc/ diff --git a/docs/rfc/README.md b/docs/rfc/README.md index 83ff9551d..bd3e2e02b 100644 --- a/docs/rfc/README.md +++ b/docs/rfc/README.md @@ -62,5 +62,6 @@ sections. - [RFC-023: Semi-permanent Testnet](./rfc-023-semi-permanent-testnet.md) - [RFC-024: Block Structure Consolidation](./rfc-024-block-structure-consolidation.md) - [RFC-025: Application Defined Transaction Storage](./rfc-025-support-app-side-mempool.md) +- [RFC-027: P2P Message Bandwidth Report](./rfc-027-p2p-message-bandwidth-report.md) diff --git a/docs/rfc/images/receive-rate-all.png b/docs/rfc/images/receive-rate-all.png new file mode 100644 index 000000000..b28a83f61 Binary files /dev/null and b/docs/rfc/images/receive-rate-all.png differ diff --git a/docs/rfc/images/send-rate-all.png b/docs/rfc/images/send-rate-all.png new file mode 100644 index 000000000..803aca21f Binary files /dev/null and b/docs/rfc/images/send-rate-all.png differ diff --git a/docs/rfc/images/top-3-percent-receive.png b/docs/rfc/images/top-3-percent-receive.png new file mode 100644 index 000000000..39c5a2d06 Binary files /dev/null and b/docs/rfc/images/top-3-percent-receive.png differ diff --git a/docs/rfc/images/top-3-percent-send.png b/docs/rfc/images/top-3-percent-send.png new file mode 100644 index 000000000..61f7f6e6a Binary files /dev/null and b/docs/rfc/images/top-3-percent-send.png differ diff --git a/docs/rfc/rfc-027-p2p-message-bandwidth-report.md b/docs/rfc/rfc-027-p2p-message-bandwidth-report.md new file mode 100644 index 000000000..eaa99cdef --- /dev/null +++ b/docs/rfc/rfc-027-p2p-message-bandwidth-report.md @@ -0,0 +1,287 @@ +# RFC 27: P2P Message Bandwidth Report + +## Changelog + +- Nov 7, 2022: initial draft (@williambanfield) +- Nov 15, 2022: draft completed (@williambanfield) + +## Abstract + +Node operators and application developers complain that Tendermint nodes consume +larges amounts of network bandwidth. This RFC catalogues the major sources of bandwidth +consumption within Tendermint and suggests modifications to Tendermint that may reduce +bandwidth consumption for nodes. + +## Background +Multiple teams running validators in production report that the validator +consumes a lot of bandwidth. They report that operators running on a network +with hundreds of validators consumes multiple terabytes of bandwidth per day. +Prometheus data collected from a validator node running on the Osmosis chain +shows that Tendermint sends and receives large amounts of data to peers. In the +nearly three hours of observation, Tendermint sent nearly 42 gigabytes and +received about 26 gigabytes, for an estimated 366 gigabytes sent daily and 208 +gigabytes received daily. While this is shy of the reported terabytes number, +operators running multiple nodes for a 'sentry' pattern could easily send and +receive a terabyte of data. + +Sending and receiving large amounts of data has a cost for node operators. Most +cloud platforms charge for network traffic egress. Google Cloud charges between +[$.05 to $.12 per gigabyte of egress traffic][gcloud-pricing], and ingress is +free. Hetzner [charges 1€ per TB used over the 10-20TB base bandwidth per +month][hetzner-pricing], which will be easily hit if multiple terabytes are +sent and received per day. Using the values collected from the validator on +Osmosis, a single node on Google cloud may cost $18 to $44 a day running on +Google cloud. On Hetzner, the estimated 18TB a month of both sending and +receiving may cost between 0 and 10 Euro a month per node. + +## Discussion + +### Overview of Major Bandwidth Usage + +To determine which components of Tendermint were consuming the most bandwidth, +I gathered prometheus metrics from the [Blockpane][blockpane] validator running +on the Osmosis network for several hours. The data reveal that three message +types account for 98% of the total bandwidth consumed. These message types are +as follows: + + +1. [consensus.BlockPart][block-part-message] +2. [mempool.Txs][mempool-txs-message] +3. [consensus.Vote][vote-message] + + +The image below of p2p data collected from the Blockpane validator illustrate +the total bandwidth consumption of these three message types. + + +#### Send: + +##### Top 3 Percent: + +![](./images/top-3-percent-send.png) + +##### Rate For All Messages: + +![](./images/send-rate-all.png) + +#### Receive: + +##### Top 3 Percent: + +![](./images/top-3-percent-receive.png) + +##### Rate For All Messages: + +![](./images/receive-rate-all.png) + +### Investigation of Message Usage + +This section discusses the usage of each of the three highest consumption messages. +#### BlockPart Transmission + +Sending `BlockPart` messages consumes the most bandwidth out of all p2p +messages types as observed in the Blockpane Osmosis validator. In the almost 3 +hour observation, the validator sent about 20 gigabytes of `BlockPart` +messages. + +A block is proposed each round of Tendermint consensus. The paper does not +define a specific way that the block is to be transmitted, just that all +participants will receive it via a gossip network. + +The Go implementation of Tendermint transmits the block in 'parts'. It +serializes the block to wire-format proto and splits this byte representation +into a set of 4 kilobyte arrays and sends these arrays to its peers, each in a +separate message. + +The logic for sending `BlockPart` messages resides in the code for the +[consensus.Reactor][gossip-data-routine]. The consensus reactor starts a new +`gossipDataRoutine` for each peer it connects to. This routine repeatedly picks +a part of the block that Tendermint believes the peer does not know about yet +and gossips it to the peer. The set of `BlockParts` that Tendermint considers +its peer as having is only updated in one of four ways: + + + 1. Our peer tells us they have entered a new round [via a `NewRoundStep` +message][new-round-step-message-send]. This message is only sent when a node +moves to a new round or height and only resets the data we collect about a +peer's blockpart state. + 1. [We receive a block part from the peer][block-part-receive]. + 1. [We send][block-part-send-1] [the peer a block part][block-part-send-2]. + 1. Our peer tells us about the parts they have block [via `NewValidBlock` +messages][new-valid-block-message-send]. This message is only sent when the +peer has a quorum of prevotes or precommits for a block. + +Each node receives block parts from all of its peers. The particular block part +to send at any given time is randomly selected from the set of parts that the +peer node is not yet known to have. Given that these are the only times that +Tendermint learns of its peers' block parts, it's very likely that a node has +an incomplete understanding of its peers' block parts and is transmitting block +parts to a peer that the peer has received from some other node. + +Multiple potential mechanisms exist to reduce the number of duplicate block +parts a node receives. One set of mechanisms relies on more frequently +communicating the set of block parts a node needs to its peers. Another +potential mechanism requires a larger overhaul to the way blocks are gossiped +in the network. + +#### Mempool Tx Transmission + +The Tendermint mempool stages transactions that are yet to be committed to the +blockchain and communicates these transactions to its peers. Each message +contains one transaction. Data collected from the Blockpane node running on +Osmosis indicates that the validator sent about 12 gigabytes of `Txs` messages +during the nearly 3 hour observation period. + +The Tendermint mempool starts a new [broadcastTxRoutine][broadcast-tx-routine] +for each peer that it is informed of. The routine sends all transactions that +the mempool is aware of to all peers with few exceptions. The only exception is +if the mempool received a transaction from a peer, then it marks it as such and +won't resend to that peer. Otherwise, it retains no information about which +transactions it already sent to a peer. In some cases it may therefore resend +transactions the peer already has. This can occur if the mempool removes a +transaction from the `CList` data structure used to store the list of +transactions while it is about to be sent and if the transaction was the tail +of the `CList` during removal. This will be more likely to occur if a large +number of transactions from the end of the list are removed during `RecheckTx`, +since multiple transactions will become the tail and then be deleted. It is +unclear at the moment how frequently this occurs on production chains. + +Beyond ensuring that transactions are rebroadcast to peers less frequently, +there is not a simple scheme to communicate fewer transactions to peers. Peers +cannot communicate what transactions they need since they do not know which +transactions exist on the network. + +#### Vote Transmission + +Tendermint votes, both prevotes and precommits, are central to Tendermint +consensus and are gossiped by all nodes to all peers during each consensus +round. Data collected from the Blockpane node running on Osmosis indicates that +about 9 gigabytes of `Vote` messages were sent during the nearly 3 hour period +of observation. Examination of the [Vote message][vote-msg] indicates that it +contains 184 bytes of data, with the proto encoding adding a few additional +bytes when transmitting. + +The Tendermint consensus reactor starts a new +[gossipVotesRoutine][gossip-votes-routine] for each peer that it connects to. +The reactor sends all votes to all peers unless it knows that the peer already +has the vote or the reactor learns that the peer is in a different round and +that thus the vote no longer applies. Tendermint learns that a peer has a vote +in one of 4 ways: + + 1. Tendermint sent the peer the vote. + 1. Tendermint received the vote from the peer. + 1. The peer [sent a `HasVote` message][apply-has-vote]. This message is broadcast +to all peers [each time validator receives a vote it hasn't seen before +corresponding to its current height and round][publish-event-vote]. + 1. The peer [sent a `VoteSetBits` message][apply-vote-set-bits]. This message is +[sent as a response to a peer that sends a `VoteSetMaj23`][vote-set-bits-send]. + +Given that Tendermint informs all peers of _each_ vote message it receives, all +nodes should be well informed of which votes their peers have. Given that the +vote messages were the third largest consumer of bandwidth in the observation +on Osmosis, it's possible that this system is not currently working correctly. +Further analysis should examine where votes may be being retransmitted. + +### Suggested Improvements to Lower Message Transmission Bandwidth + +#### Gossip Known BlockPart Data + +The `BlockPart` messages, by far, account for the majority of the data sent to +each peer. At the moment, peers do not inform the node of which block parts +they already have. This means that each block part is _very likely_ to be +transmitted many times to each node. This frivolous consumption is even worse +in networks with large blocks. + +The very simple solution to this issue is to copy the technique used in +consensus for informing peers when the node receives a vote. The consensus +reactor can be augmented with a `HasBlockPart` message that is broadcast to +each peer every time the node receives a block part. By informing each peer +every time the node receives a block part, we can drastically reduce the amount +of duplicate data sent to each node. There would be no algorithmic way of +enforcing that a peer accurately reports its block parts, so providing this +message would be a somewhat altruistic action on the part of the node. Such a +system [has been proposed in the past][i627] as well, so this is certainly not +totally new ground. + +Measuring the size of duplicately received blockparts before and after this +change would help validate this approach. + +#### Compress Transmitted Data + +Tendermint's data is sent uncompressed on the wire. The messages are not +compressed before sending and the transport performs no compression either. +Some of the information communicated by Tendermint is a poor candidate for +compression: Data such as digital signatures and hashes have high entropy and +therefore do not compress well. However, transactions may contain lots of +information that has less entropy. Compression within Tendermint may be added +at several levels. Compression may be performed at the [Tendermint 'packet' +level][must-wrap-packet] or at the [Tendermint message send +level][message-send]. + +#### Transmit Less Data During Block Gossip + +Block, vote, and mempool gossiping transmit much of same data. The mempool +reactor gossips candidate transactions to each peer. The consensus reactor, +when gossiping the votes, sends vote metadata and the digital signature of that +signs over that metadata. Finally, when a block is proposed, the proposing node +amalgamates the received votes, a set of transaction, and adds a header to +produce the block. This block is then serialized and gossiped as a list of +bytes. However, the data that the block contains, namely the votes and the +transactions were most likely _already transmitted to the nodes on the network_ +via mempool transaction gossip and consensus vote gossip. + +Therefore, block gossip can be updated to transmit a representation of the data +contained in the block that assumes the peers will already have most of this +data. Namely, the block gossip can be updated to only send 1) a list of +transaction hashes and 2) a bit array of votes selected for the block along +with the header and other required block metadata. + +This new proposed method for gossiping block data could accompany a slight +update to the mempool transaction gossip and consensus vote gossip. Since all +of the contents of each block will not be gossiped together, it's possible that +some nodes are missing a proposed transaction or the vote of a validator +indicated in the new block gossip format during block gossip. The mempool and +consensus reactors may therefore be updated to provide a `NeedTxs` and +`NeedVotes` message. Each of these messages would allow a node to request a set +of data from their peers. When a node receives one of these, it will then +transmit the Tx/Votes indicate in the associated message regardless of whether +it believes it has transmitted them to the peer before. The gossip layer will +ensure that each peer eventually receives all of the data in the block. +However, if a transaction is needed immediately by a peer so that it can verify +and execute a block during consensus, a mechanism such as the `NeedTxs` and +`NeedVotes` messages should be added to ensure it receives the messages +quickly. + +The same logic may applied for evidence transmission as well, since all nodes +should receive evidence and therefore do not need to re-transmit it in a block +part. + +A similar idea has been proposed in the past as [Compact Block +Propagation][compact-block-propagation]. + + +## References + +[blockpane]: https://www.mintscan.io/osmosis/validators/osmovaloper1z0sh4s80u99l6y9d3vfy582p8jejeeu6tcucs2 +[block-part-message]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/proto/tendermint/consensus/types.proto#L44 +[mempool-txs-message]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/proto/tendermint/mempool/types.proto#L6 +[vote-message]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/proto/tendermint/consensus/types.proto#L51 +[gossip-data-routine]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L537 +[block-part-receive]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L324 +[block-part-send-1]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L566 +[block-part-send-2]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L684. +[new-valid-block-message-send]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L268 +[new-round-step-message-send]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L266 +[broadcast-tx-routine]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/mempool/v0/reactor.go#L197 +[gossip-votes-routine]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L694 +[apply-has-vote]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L1429 +[apply-vote-set-bits]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L1445 +[publish-event-vote]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/state.go#L2083 +[vote-set-bits-send]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L306 +[must-wrap-packet]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/p2p/conn/connection.go#L889-L918 +[message-send]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/p2p/peer.go#L285 +[gcloud-pricing]: https://cloud.google.com/vpc/network-pricing#vpc-pricing +[hetzner-pricing]: https://docs.hetzner.com/robot/general/traffic +[vote-msg]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/proto/tendermint/types/types.pb.go#L468 +[i627]: https://github.com/tendermint/tendermint/issues/627 +[compact-block-propagation]: https://github.com/tendermint/tendermint/issues/7932 diff --git a/docs/tendermint-core/metrics.md b/docs/tendermint-core/metrics.md index bbd14ea82..6e28b564b 100644 --- a/docs/tendermint-core/metrics.md +++ b/docs/tendermint-core/metrics.md @@ -18,52 +18,52 @@ Listen address can be changed in the config file (see The following metrics are available: -| **Name** | **Type** | **Tags** | **Description** | -|------------------------------------------|-----------|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------| -| `abci_connection_method_timing_seconds` | Histogram | `method`, `type` | Timings for each of the ABCI methods | -| `blocksync_syncing` | Gauge | | Either 0 (not block syncing) or 1 (syncing) | -| `consensus_height` | Gauge | | Height of the chain | -| `consensus_validators` | Gauge | | Number of validators | -| `consensus_validators_power` | Gauge | | Total voting power of all validators | -| `consensus_validator_power` | Gauge | | Voting power of the node if in the validator set | -| `consensus_validator_last_signed_height` | Gauge | | Last height the node signed a block, if the node is a validator | -| `consensus_validator_missed_blocks` | Gauge | | Total amount of blocks missed for the node, if the node is a validator | -| `consensus_missing_validators` | Gauge | | Number of validators who did not sign | -| `consensus_missing_validators_power` | Gauge | | Total voting power of the missing validators | -| `consensus_byzantine_validators` | Gauge | | Number of validators who tried to double sign | -| `consensus_byzantine_validators_power` | Gauge | | Total voting power of the byzantine validators | -| `consensus_block_interval_seconds` | Histogram | | Time between this and last block (Block.Header.Time) in seconds | -| `consensus_rounds` | Gauge | | Number of rounds | -| `consensus_num_txs` | Gauge | | Number of transactions | -| `consensus_total_txs` | Gauge | | Total number of transactions committed | -| `consensus_block_parts` | Counter | `peer_id` | Number of blockparts transmitted by peer | -| `consensus_latest_block_height` | Gauge | | /status sync\_info number | -| `consensus_block_size_bytes` | Gauge | | Block size in bytes | -| `consensus_step_duration` | Histogram | `step` | Histogram of durations for each step in the consensus protocol | -| `consensus_round_duration` | Histogram | | Histogram of durations for all the rounds that have occurred since the process started | -| `consensus_block_gossip_parts_received` | Counter | `matches_current` | Number of block parts received by the node | -| `consensus_quorum_prevote_delay` | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum | -| `consensus_full_prevote_delay` | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted | -| `consensus_proposal_receive_count` | Counter | `status` | Total number of proposals received by the node since process start | -| `consensus_proposal_create_count` | Counter | | Total number of proposals created by the node since process start | -| `consensus_round_voting_power_percent` | Gauge | `vote_type` | A value between 0 and 1.0 representing the percentage of the total voting power per vote type received within a round | -| `consensus_late_votes` | Counter | `vote_type` | Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in. | -| `p2p_message_send_bytes_total` | Counter | `message_type` | Number of bytes sent to all peers per message type | -| `p2p_message_receive_bytes_total` | Counter | `message_type` | Number of bytes received from all peers per message type | -| `p2p_peers` | Gauge | | Number of peers node's connected to | -| `p2p_peer_receive_bytes_total` | Counter | `peer_id`, `chID` | Number of bytes per channel received from a given peer | -| `p2p_peer_send_bytes_total` | Counter | `peer_id`, `chID` | Number of bytes per channel sent to a given peer | -| `p2p_peer_pending_send_bytes` | Gauge | `peer_id` | Number of pending bytes to be sent to a given peer | -| `p2p_num_txs` | Gauge | `peer_id` | Number of transactions submitted by each peer\_id | -| `p2p_pending_send_bytes` | Gauge | `peer_id` | Amount of data pending to be sent to peer | -| `mempool_size` | Gauge | | Number of uncommitted transactions | -| `mempool_tx_size_bytes` | Histogram | | Transaction sizes in bytes | -| `mempool_failed_txs` | Counter | | Number of failed transactions | -| `mempool_recheck_times` | Counter | | Number of transactions rechecked in the mempool | -| `state_block_processing_time` | Histogram | | Time between BeginBlock and EndBlock in ms | -| `state_consensus_param_updates` | Counter | | Number of consensus parameter updates returned by the application since process start | -| `state_validator_set_updates` | Counter | | Number of validator set updates returned by the application since process start | -| `statesync_syncing` | Gauge | | Either 0 (not state syncing) or 1 (syncing) | +| **Name** | **Type** | **Tags** | **Description** | +|--------------------------------------------|-----------|------------------|--------------------------------------------------------------------------------------------------------------------------------------------| +| abci\_connection\_method\_timing\_seconds | Histogram | method, type | Timings for each of the ABCI methods | +| blocksync\_syncing | Gauge | | Either 0 (not block syncing) or 1 (syncing) | +| consensus\_height | Gauge | | Height of the chain | +| consensus\_validators | Gauge | | Number of validators | +| consensus\_validators\_power | Gauge | | Total voting power of all validators | +| consensus\_validator\_power | Gauge | | Voting power of the node if in the validator set | +| consensus\_validator\_last\_signed\_height | Gauge | | Last height the node signed a block, if the node is a validator | +| consensus\_validator\_missed\_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator | +| consensus\_missing\_validators | Gauge | | Number of validators who did not sign | +| consensus\_missing\_validators\_power | Gauge | | Total voting power of the missing validators | +| consensus\_byzantine\_validators | Gauge | | Number of validators who tried to double sign | +| consensus\_byzantine\_validators\_power | Gauge | | Total voting power of the byzantine validators | +| consensus\_block\_interval\_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds | +| consensus\_rounds | Gauge | | Number of rounds | +| consensus\_num\_txs | Gauge | | Number of transactions | +| consensus\_total\_txs | Gauge | | Total number of transactions committed | +| consensus\_block\_parts | Counter | peer\_id | Number of blockparts transmitted by peer | +| consensus\_latest\_block\_height | Gauge | | /status sync\_info number | +| consensus\_block\_size\_bytes | Gauge | | Block size in bytes | +| consensus\_step\_duration | Histogram | step | Histogram of durations for each step in the consensus protocol | +| consensus\_round\_duration | Histogram | | Histogram of durations for all the rounds that have occurred since the process started | +| consensus\_block\_gossip\_parts\_received | Counter | matches\_current | Number of block parts received by the node | +| consensus\_quorum\_prevote\_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum | +| consensus\_full\_prevote\_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted | +| consensus\_proposal\_receive\_count | Counter | status | Total number of proposals received by the node since process start | +| consensus\_proposal\_create\_count | Counter | | Total number of proposals created by the node since process start | +| consensus\_round\_voting\_power\_percent | Gauge | vote\_type | A value between 0 and 1.0 representing the percentage of the total voting power per vote type received within a round | +| consensus\_late\_votes | Counter | vote\_type | Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in. | +| p2p\_message\_send\_bytes\_total | Counter | message\_type | Number of bytes sent to all peers per message type | +| p2p\_message\_receive\_bytes\_total | Counter | message\_type | Number of bytes received from all peers per message type | +| p2p\_peers | Gauge | | Number of peers node's connected to | +| p2p\_peer\_receive\_bytes\_total | Counter | peer\_id, chID | Number of bytes per channel received from a given peer | +| p2p\_peer\_send\_bytes\_total | Counter | peer\_id, chID | Number of bytes per channel sent to a given peer | +| p2p\_peer\_pending\_send\_bytes | Gauge | peer\_id | Number of pending bytes to be sent to a given peer | +| p2p\_num\_txs | Gauge | peer\_id | Number of transactions submitted by each peer\_id | +| p2p\_pending\_send\_bytes | Gauge | peer\_id | Amount of data pending to be sent to peer | +| mempool\_size | Gauge | | Number of uncommitted transactions | +| mempool\_tx\_size\_bytes | Histogram | | Transaction sizes in bytes | +| mempool\_failed\_txs | Counter | | Number of failed transactions | +| mempool\_recheck\_times | Counter | | Number of transactions rechecked in the mempool | +| state\_block\_processing\_time | Histogram | | Time between BeginBlock and EndBlock in ms | +| state\_consensus\_param\_updates | Counter | | Number of consensus parameter updates returned by the application since process start | +| state\_validator\_set\_updates | Counter | | Number of validator set updates returned by the application since process start | +| statesync\_syncing | Gauge | | Either 0 (not state syncing) or 1 (syncing) | ## Useful queries diff --git a/docs/tools/debugging.md b/docs/tools/debugging.md index 17fa0ec11..2c9998fbe 100644 --- a/docs/tools/debugging.md +++ b/docs/tools/debugging.md @@ -55,3 +55,47 @@ given destination directory. Each archive will contain: Note: goroutine.out and heap.out will only be written if a profile address is provided and is operational. This command is blocking and will log any error. + +## Tendermint Inspect + +Tendermint includes an `inspect` command for querying Tendermint's state store and block +store over Tendermint RPC. + +When the Tendermint consensus engine detects inconsistent state, it will crash the +entire Tendermint process. +While in this inconsistent state, a node running Tendermint's consensus engine will not start up. +The `inspect` command runs only a subset of Tendermint's RPC endpoints for querying the block store +and state store. +`inspect` allows operators to query a read-only view of the stage. +`inspect` does not run the consensus engine at all and can therefore be used to debug +processes that have crashed due to inconsistent state. + +### Running inspect + +Start up the `inspect` tool on the machine where Tendermint crashed using: +```bash +tendermint inspect --home= +``` + +`inspect` will use the data directory specified in your Tendermint configuration file. +`inspect` will also run the RPC server at the address specified in your Tendermint configuration file. + +### Using inspect + +With the `inspect` server running, you can access RPC endpoints that are critically important +for debugging. +Calling the `/status`, `/consensus_state` and `/dump_consensus_state` RPC endpoint +will return useful information about the Tendermint consensus state. + +To start the `inspect` process, run +```bash +tendermint inspect +``` + +### RPC endpoints + +The list of available RPC endpoints can be found by making a request to the RPC port. +For an `inspect` process running on `127.0.0.1:26657`, navigate your browser to +`http://127.0.0.1:26657/` to retrieve the list of enabled RPC endpoints. + +Additional information on the Tendermint RPC endpoints can be found in the [rpc documentation](https://docs.tendermint.com/master/rpc). diff --git a/docs/tutorials/go-built-in.md b/docs/tutorials/go-built-in.md index 70b2d0892..3f38551cf 100644 --- a/docs/tutorials/go-built-in.md +++ b/docs/tutorials/go-built-in.md @@ -461,7 +461,8 @@ When Tendermint Core sees that valid transactions (validated through `CheckTx`) included in blocks, it groups some of these transactions and then gives the application a chance to modify the group by invoking `PrepareProposal`. -The application is free to modify the group before returning from the call. +The application is free to modify the group before returning from the call, as long as the resulting set +does not use more bytes than `RequestPrepareProposal.max_tx_bytes' For example, the application may reorder, add, or even remove transactions from the group to improve the execution of the block once accepted. In the following code, the application simply returns the unmodified group of transactions: diff --git a/docs/tutorials/go.md b/docs/tutorials/go.md index 4a1e3bfba..5bd65aa16 100644 --- a/docs/tutorials/go.md +++ b/docs/tutorials/go.md @@ -460,7 +460,8 @@ When Tendermint Core sees that valid transactions (validated through `CheckTx`) included in blocks, it groups some of these transactions and then gives the application a chance to modify the group by invoking `PrepareProposal`. -The application is free to modify the group before returning from the call. +The application is free to modify the group before returning from the call, as long as the resulting set +does not use more bytes than `RequestPrepareProposal.max_tx_bytes' For example, the application may reorder, add, or even remove transactions from the group to improve the execution of the block once accepted. In the following code, the application simply returns the unmodified group of transactions: diff --git a/go.mod b/go.mod index 32e7fcca5..74b8b288a 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/pointlander/peg v1.0.1 github.com/prometheus/client_golang v1.14.0 github.com/prometheus/client_model v0.3.0 - github.com/prometheus/common v0.37.0 + github.com/prometheus/common v0.38.0 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 github.com/rs/cors v1.8.2 github.com/sasha-s/go-deadlock v0.3.1 @@ -32,13 +32,13 @@ require ( github.com/spf13/viper v1.14.0 github.com/stretchr/testify v1.8.1 github.com/tendermint/tm-db v0.6.6 - golang.org/x/crypto v0.3.0 - golang.org/x/net v0.2.0 + golang.org/x/crypto v0.4.0 + golang.org/x/net v0.4.0 google.golang.org/grpc v1.51.0 ) require ( - github.com/bufbuild/buf v1.9.0 + github.com/bufbuild/buf v1.10.0 github.com/creachadair/taskgroup v0.3.2 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 ) @@ -51,6 +51,7 @@ require ( github.com/google/uuid v1.3.0 github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae github.com/vektra/mockery/v2 v2.15.0 + golang.org/x/sync v0.1.0 gonum.org/v1/gonum v0.12.0 google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 ) @@ -78,14 +79,14 @@ require ( github.com/bombsimon/wsl/v3 v3.3.0 // indirect github.com/breml/bidichk v0.2.3 // indirect github.com/breml/errchkjson v0.3.0 // indirect - github.com/bufbuild/connect-go v1.0.0 // indirect + github.com/bufbuild/connect-go v1.1.0 // indirect github.com/bufbuild/protocompile v0.1.0 // indirect github.com/butuzov/ireturn v0.1.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/charithe/durationcheck v0.0.9 // indirect github.com/chavacava/garif v0.0.0-20220630083739-93517212f375 // indirect - github.com/containerd/containerd v1.6.8 // indirect + github.com/containerd/containerd v1.6.9 // indirect github.com/containerd/continuity v0.3.0 // indirect github.com/containerd/typeurl v1.0.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect @@ -98,7 +99,7 @@ require ( github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de // indirect github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect github.com/docker/distribution v2.8.1+incompatible // indirect - github.com/docker/docker v20.10.19+incompatible // indirect + github.com/docker/docker v20.10.21+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.0 // indirect @@ -106,6 +107,7 @@ require ( github.com/ettle/strcase v0.1.1 // indirect github.com/fatih/color v1.13.0 // indirect github.com/fatih/structtag v1.2.0 // indirect + github.com/felixge/fgprof v0.9.3 // indirect github.com/firefart/nonamedreturns v1.0.4 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/fzipp/gocyclo v0.6.0 // indirect @@ -137,6 +139,7 @@ require ( github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect github.com/google/btree v1.0.0 // indirect github.com/google/go-cmp v0.5.9 // indirect + github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e // indirect github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect github.com/gostaticanalysis/analysisutil v0.7.1 // indirect github.com/gostaticanalysis/comment v1.4.2 // indirect @@ -158,7 +161,7 @@ require ( github.com/kisielk/errcheck v1.6.2 // indirect github.com/kisielk/gotool v1.0.0 // indirect github.com/kkHAIKE/contextcheck v1.1.3 // indirect - github.com/klauspost/compress v1.15.11 // indirect + github.com/klauspost/compress v1.15.12 // indirect github.com/klauspost/pgzip v1.2.5 // indirect github.com/kulti/thelper v0.6.3 // indirect github.com/kunwardeep/paralleltest v1.0.6 // indirect @@ -174,12 +177,12 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.16 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect github.com/mbilski/exhaustivestruct v1.2.0 // indirect github.com/mgechev/revive v1.2.4 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/moby/buildkit v0.10.4 // indirect + github.com/moby/buildkit v0.10.5 // indirect github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect github.com/moricho/tparallel v0.2.1 // indirect github.com/morikuni/aec v1.0.0 // indirect @@ -196,7 +199,7 @@ require ( github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect - github.com/pkg/profile v1.6.0 // indirect + github.com/pkg/profile v1.7.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pointlander/compress v1.1.1-0.20190518213731-ff44bd196cc3 // indirect github.com/pointlander/jetset v1.0.1-0.20190518214125-eee7eff80bd4 // indirect @@ -244,23 +247,22 @@ require ( github.com/yeya24/promlinter v0.2.0 // indirect gitlab.com/bosi/decorder v0.2.3 // indirect go.etcd.io/bbolt v1.3.6 // indirect - go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3 // indirect - go.opentelemetry.io/otel v1.11.0 // indirect - go.opentelemetry.io/otel/metric v0.32.3 // indirect - go.opentelemetry.io/otel/trace v1.11.0 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.4 // indirect + go.opentelemetry.io/otel v1.11.1 // indirect + go.opentelemetry.io/otel/metric v0.33.0 // indirect + go.opentelemetry.io/otel/trace v1.11.1 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.23.0 // indirect - golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect + golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 // indirect golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 // indirect - golang.org/x/mod v0.6.0 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.2.0 // indirect - golang.org/x/term v0.2.0 // indirect - golang.org/x/text v0.4.0 // indirect - golang.org/x/tools v0.2.0 // indirect - google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e // indirect + golang.org/x/mod v0.7.0 // indirect + golang.org/x/sys v0.3.0 // indirect + golang.org/x/term v0.3.0 // indirect + golang.org/x/text v0.5.0 // indirect + golang.org/x/tools v0.3.0 // indirect + google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 142fa8d97..da017675c 100644 --- a/go.sum +++ b/go.sum @@ -23,7 +23,7 @@ cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPT cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.104.0 h1:gSmWO7DY1vOm0MVU6DNXM11BWHHsTUmsC5cv1fuW5X8= +cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -172,10 +172,10 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/bufbuild/buf v1.9.0 h1:8a60qapVuRj6crerWR0rny4UUV/MhZSL5gagJuBxmx8= -github.com/bufbuild/buf v1.9.0/go.mod h1:1Q+rMHiMVcfgScEF/GOldxmu4o9TrQ2sQQh58K6MscE= -github.com/bufbuild/connect-go v1.0.0 h1:htSflKUT8y1jxhoPhPYTZMrsY3ipUXjjrbcZR5O2cVo= -github.com/bufbuild/connect-go v1.0.0/go.mod h1:9iNvh/NOsfhNBUH5CtvXeVUskQO1xsrEviH7ZArwZ3I= +github.com/bufbuild/buf v1.10.0 h1:t6rV4iP1cs/sJH5SYvcLanOshLvmtvwSC+Mt+GfG05s= +github.com/bufbuild/buf v1.10.0/go.mod h1:79BrOWh8uX1a0SVSoPyeYgtP0+Y0n5J3Tt6kjTSkLoU= +github.com/bufbuild/connect-go v1.1.0 h1:AUgqqO2ePdOJSpPOep6BPYz5v2moW1Lb8sQh0EeRzQ8= +github.com/bufbuild/connect-go v1.1.0/go.mod h1:9iNvh/NOsfhNBUH5CtvXeVUskQO1xsrEviH7ZArwZ3I= github.com/bufbuild/protocompile v0.1.0 h1:HjgJBI85hY/qmW5tw/66sNDZ7z0UDdVSi/5r40WHw4s= github.com/bufbuild/protocompile v0.1.0/go.mod h1:ix/MMMdsT3fzxfw91dvbfzKW3fRRnuPCP47kpAm5m/4= github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY= @@ -220,8 +220,8 @@ github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:z github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= -github.com/containerd/containerd v1.6.8 h1:h4dOFDwzHmqFEP754PgfgTeVXFnLiRc6kiqC7tplDJs= -github.com/containerd/containerd v1.6.8/go.mod h1:By6p5KqPK0/7/CgO/A6t/Gz+CUYUu2zf1hUaaymVXB0= +github.com/containerd/containerd v1.6.9 h1:IN/r8DUes/B5lEGTNfIiUkfZBtIQJGx2ai703dV6lRA= +github.com/containerd/containerd v1.6.9/go.mod h1:XVicUvkxOrftE2Q1YWUXgZwkkAxwQYNOFzYWvfVfEfQ= github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= @@ -284,8 +284,8 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8 github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v20.10.19+incompatible h1:lzEmjivyNHFHMNAFLXORMBXyGIhw/UP4DvJwvyKYq64= -github.com/docker/docker v20.10.19+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog= +github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= @@ -325,6 +325,8 @@ github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= +github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y= github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= @@ -506,6 +508,9 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= +github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e h1:F1LLQqQ8WoIbyoxLUY+JUZe1kuHdxThM6CPUATzE6Io= +github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -606,6 +611,7 @@ github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmK github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= @@ -620,7 +626,7 @@ github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= -github.com/jhump/protoreflect v1.13.1-0.20220928232736-101791cb1b4c h1:XImQJfpJLmGEEd8ll5yPVyL/aEvmgGHW4WYTyNseLOM= +github.com/jhump/protoreflect v1.14.0 h1:MBbQK392K3u8NTLbKOCIi3XdI+y+c6yt5oMq0X3xviw= github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= @@ -660,8 +666,8 @@ github.com/kkHAIKE/contextcheck v1.1.3/go.mod h1:PG/cwd6c0705/LM0KTr1acO2gORUxkS github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= -github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM= +github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -736,8 +742,8 @@ github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.9 h1:10HX2Td0ocZpYEjhilsuo6WWtUqttj2Kb0KtD86/KYA= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM= +github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= github.com/mgechev/revive v1.2.4 h1:+2Hd/S8oO2H0Ikq2+egtNwQsVhAeELHjxjIUFX5ajLI= @@ -764,8 +770,8 @@ github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/buildkit v0.10.4 h1:FvC+buO8isGpUFZ1abdSLdGHZVqg9sqI4BbFL8tlzP4= -github.com/moby/buildkit v0.10.4/go.mod h1:Yajz9vt1Zw5q9Pp4pdb3TCSUXJBIroIQGQ3TTs/sLug= +github.com/moby/buildkit v0.10.5 h1:d9krS/lG3dn6N7y+R8o9PTgIixlYAaDk35f3/B4jZOw= +github.com/moby/buildkit v0.10.5/go.mod h1:Yajz9vt1Zw5q9Pp4pdb3TCSUXJBIroIQGQ3TTs/sLug= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae h1:O4SWKdcHVCvYqyDV+9CJA1fcDN2L11Bule0iFy3YlAI= @@ -799,7 +805,6 @@ github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA= github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nishanths/exhaustive v0.8.3 h1:pw5O09vwg8ZaditDp/nQRqVnrMczSJDxRDJMowvhsrM= github.com/nishanths/exhaustive v0.8.3/go.mod h1:qj+zJJUgJ76tR92+25+03oYUhzF4R7/2Wk7fGTfCHmg= @@ -887,8 +892,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/profile v1.6.0 h1:hUDfIISABYI59DyeB3OTay/HxSRwTQ8rB/H83k6r5dM= -github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= +github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= +github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -934,8 +939,8 @@ github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.38.0 h1:VTQitp6mXTdUoCmDMugDVOJ1opi6ADftKfp/yeqTR/E= +github.com/prometheus/common v0.38.0/go.mod h1:MBXfmBQZrK5XpbCkjofnXs96LD2QQ7fEq4C0xjC/yec= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -1169,16 +1174,17 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3 h1:syAz40OyelLZo42+3U68Phisvrx4qh+4wpdZw7eUUdY= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3/go.mod h1:Dts42MGkzZne2yCru741+bFiTMWkIj/LLRizad7b9tw= -go.opentelemetry.io/otel v1.11.0 h1:kfToEGMDq6TrVrJ9Vht84Y8y9enykSZzDDZglV0kIEk= -go.opentelemetry.io/otel v1.11.0/go.mod h1:H2KtuEphyMvlhZ+F7tg9GRhAOe60moNx61Ex+WmiKkk= -go.opentelemetry.io/otel/metric v0.32.3 h1:dMpnJYk2KULXr0j8ph6N7+IcuiIQXlPXD4kix9t7L9c= -go.opentelemetry.io/otel/metric v0.32.3/go.mod h1:pgiGmKohxHyTPHGOff+vrtIH39/R9fiO/WoenUQ3kcc= -go.opentelemetry.io/otel/trace v1.11.0 h1:20U/Vj42SX+mASlXLmSGBg6jpI1jQtv682lZtTAOVFI= -go.opentelemetry.io/otel/trace v1.11.0/go.mod h1:nyYjis9jy0gytE9LXGU+/m1sHTKbRY0fX0hulNNDP1U= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.4 h1:PRXhsszxTt5bbPriTjmaweWUsAnJYeWBhUMLRetUgBU= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.4/go.mod h1:05eWWy6ZWzmpeImD3UowLTB3VjDMU1yxQ+ENuVWDM3c= +go.opentelemetry.io/otel v1.11.1 h1:4WLLAmcfkmDk2ukNXJyq3/kiz/3UzCaYq6PskJsaou4= +go.opentelemetry.io/otel v1.11.1/go.mod h1:1nNhXBbWSD0nsL38H6btgnFN2k4i0sNLHNNMZMSbUGE= +go.opentelemetry.io/otel/metric v0.33.0 h1:xQAyl7uGEYvrLAiV/09iTJlp1pZnQ9Wl793qbVvED1E= +go.opentelemetry.io/otel/metric v0.33.0/go.mod h1:QlTYc+EnYNq/M2mNk1qDDMRLpqCOj2f/r5c7Fd5FYaI= +go.opentelemetry.io/otel/trace v1.11.1 h1:ofxdnzsNrGBYXbP7t7zpUK281+go5rF7dvdIZXF8gdQ= +go.opentelemetry.io/otel/trace v1.11.1/go.mod h1:f/Q9G7vzk5u91PhbmKbg1Qn0rzH1LJ4vbPHFGkTPtOk= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1225,8 +1231,8 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8= +golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1240,8 +1246,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 h1:QfTh0HpN6hlw6D3vu8DAwC8pBIwikq0AI1evdm+FksE= +golang.org/x/exp v0.0.0-20221031165847-c99f073a8326/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 h1:Ic/qN6TEifvObMGQy72k0n1LlJr7DjWWEi+MOsDOiSk= golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= @@ -1274,8 +1280,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I= -golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1331,11 +1337,9 @@ golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1349,7 +1353,6 @@ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1452,12 +1455,12 @@ golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211031064116-611d5d643895/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1465,13 +1468,13 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1481,8 +1484,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1583,8 +1586,8 @@ golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.3.0 h1:SrNbZl6ECOS1qFzgTdQfWXZM9XBkiA6tkFrH9YSTPHM= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1674,8 +1677,8 @@ google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaE google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20211101144312-62acf1d99145/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e h1:S9GbmC1iCgvbLyAokVCwiO6tVIrU9Y7c5oMx1V/ki/Y= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c h1:QgY/XxIAIeccR+Ca/rDdKubLIU9rcJ3xfy1DC/Wd2Oo= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1727,8 +1730,8 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= diff --git a/inspect/doc.go b/inspect/doc.go new file mode 100644 index 000000000..0d17771b5 --- /dev/null +++ b/inspect/doc.go @@ -0,0 +1,36 @@ +/* +Package inspect provides a tool for investigating the state of a +failed Tendermint node. + +This package provides the Inspector type. The Inspector type runs a subset of the Tendermint +RPC endpoints that are useful for debugging issues with Tendermint consensus. + +When a node running the Tendermint consensus engine detects an inconsistent consensus state, +the entire node will crash. The Tendermint consensus engine cannot run in this +inconsistent state so the node will not be able to start up again. + +The RPC endpoints provided by the Inspector type allow for a node operator to inspect +the block store and state store to better understand what may have caused the inconsistent state. + +The Inspector type's lifecycle is controlled by a context.Context + + ins := inspect.NewFromConfig(rpcConfig) + ctx, cancelFunc:= context.WithCancel(context.Background()) + + // Run blocks until the Inspector server is shut down. + go ins.Run(ctx) + ... + + // calling the cancel function will stop the running inspect server + cancelFunc() + +Inspector serves its RPC endpoints on the address configured in the RPC configuration + + rpcConfig.ListenAddress = "tcp://127.0.0.1:26657" + ins := inspect.NewFromConfig(rpcConfig) + go ins.Run(ctx) + +The list of available RPC endpoints can then be viewed by navigating to +http://127.0.0.1:26657/ in the web browser. +*/ +package inspect diff --git a/inspect/inspect.go b/inspect/inspect.go new file mode 100644 index 000000000..71c44f7bb --- /dev/null +++ b/inspect/inspect.go @@ -0,0 +1,138 @@ +package inspect + +import ( + "context" + "errors" + "net" + "os" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/inspect/rpc" + "github.com/tendermint/tendermint/libs/log" + tmstrings "github.com/tendermint/tendermint/libs/strings" + rpccore "github.com/tendermint/tendermint/rpc/core" + "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/state/indexer" + "github.com/tendermint/tendermint/state/indexer/block" + "github.com/tendermint/tendermint/state/txindex" + "github.com/tendermint/tendermint/store" + "github.com/tendermint/tendermint/types" + + "golang.org/x/sync/errgroup" +) + +var ( + logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) +) + +// Inspector manages an RPC service that exports methods to debug a failed node. +// After a node shuts down due to a consensus failure, it will no longer start +// up its state cannot easily be inspected. An Inspector value provides a similar interface +// to the node, using the underlying Tendermint data stores, without bringing up +// any other components. A caller can query the Inspector service to inspect the +// persisted state and debug the failure. +type Inspector struct { + routes rpccore.RoutesMap + + config *config.RPCConfig + + logger log.Logger + + // References to the state store and block store are maintained to enable + // the Inspector to safely close them on shutdown. + ss state.Store + bs state.BlockStore +} + +// New returns an Inspector that serves RPC on the specified BlockStore and StateStore. +// The Inspector type does not modify the state or block stores. +// The sinks are used to enable block and transaction querying via the RPC server. +// The caller is responsible for starting and stopping the Inspector service. +// +//nolint:lll +func New(cfg *config.RPCConfig, bs state.BlockStore, ss state.Store, txidx txindex.TxIndexer, blkidx indexer.BlockIndexer, lg log.Logger) *Inspector { + routes := rpc.Routes(*cfg, ss, bs, txidx, blkidx, logger) + eb := types.NewEventBus() + eb.SetLogger(logger.With("module", "events")) + return &Inspector{ + routes: routes, + config: cfg, + logger: logger, + ss: ss, + bs: bs, + } +} + +// NewFromConfig constructs an Inspector using the values defined in the passed in config. +func NewFromConfig(cfg *config.Config) (*Inspector, error) { + bsDB, err := config.DefaultDBProvider(&config.DBContext{ID: "blockstore", Config: cfg}) + if err != nil { + return nil, err + } + bs := store.NewBlockStore(bsDB) + sDB, err := config.DefaultDBProvider(&config.DBContext{ID: "state", Config: cfg}) + if err != nil { + return nil, err + } + genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) + if err != nil { + return nil, err + } + txidx, blkidx, err := block.IndexerFromConfig(cfg, config.DefaultDBProvider, genDoc.ChainID) + if err != nil { + return nil, err + } + lg := logger.With("module", "inspect") + ss := state.NewStore(sDB, state.StoreOptions{}) + return New(cfg.RPC, bs, ss, txidx, blkidx, lg), nil +} + +// Run starts the Inspector servers and blocks until the servers shut down. The passed +// in context is used to control the lifecycle of the servers. +func (ins *Inspector) Run(ctx context.Context) error { + defer ins.bs.Close() + defer ins.ss.Close() + + return startRPCServers(ctx, ins.config, ins.logger, ins.routes) +} + +func startRPCServers(ctx context.Context, cfg *config.RPCConfig, logger log.Logger, routes rpccore.RoutesMap) error { + g, tctx := errgroup.WithContext(ctx) + listenAddrs := tmstrings.SplitAndTrimEmpty(cfg.ListenAddress, ",", " ") + rh := rpc.Handler(cfg, routes, logger) + for _, listenerAddr := range listenAddrs { + server := rpc.Server{ + Logger: logger, + Config: cfg, + Handler: rh, + Addr: listenerAddr, + } + if cfg.IsTLSEnabled() { + keyFile := cfg.KeyFile() + certFile := cfg.CertFile() + listenerAddr := listenerAddr + g.Go(func() error { + logger.Info("RPC HTTPS server starting", "address", listenerAddr, + "certfile", certFile, "keyfile", keyFile) + err := server.ListenAndServeTLS(tctx, certFile, keyFile) + if !errors.Is(err, net.ErrClosed) { + return err + } + logger.Info("RPC HTTPS server stopped", "address", listenerAddr) + return nil + }) + } else { + listenerAddr := listenerAddr + g.Go(func() error { + logger.Info("RPC HTTP server starting", "address", listenerAddr) + err := server.ListenAndServe(tctx) + if !errors.Is(err, net.ErrClosed) { + return err + } + logger.Info("RPC HTTP server stopped", "address", listenerAddr) + return nil + }) + } + } + return g.Wait() +} diff --git a/inspect/inspect_test.go b/inspect/inspect_test.go new file mode 100644 index 000000000..c4ad7a5e4 --- /dev/null +++ b/inspect/inspect_test.go @@ -0,0 +1,605 @@ +package inspect_test + +import ( + "context" + "fmt" + "net" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + abcitypes "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/inspect" + "github.com/tendermint/tendermint/internal/test" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/proto/tendermint/state" + httpclient "github.com/tendermint/tendermint/rpc/client/http" + indexermocks "github.com/tendermint/tendermint/state/indexer/mocks" + statemocks "github.com/tendermint/tendermint/state/mocks" + txindexmocks "github.com/tendermint/tendermint/state/txindex/mocks" + "github.com/tendermint/tendermint/types" +) + +func TestInspectConstructor(t *testing.T) { + cfg := test.ResetTestRoot("test") + t.Cleanup(leaktest.Check(t)) + defer func() { _ = os.RemoveAll(cfg.RootDir) }() + t.Run("from config", func(t *testing.T) { + d, err := inspect.NewFromConfig(cfg) + require.NoError(t, err) + require.NotNil(t, d) + }) + +} + +func TestInspectRun(t *testing.T) { + cfg := test.ResetTestRoot("test") + t.Cleanup(leaktest.Check(t)) + defer func() { _ = os.RemoveAll(cfg.RootDir) }() + t.Run("from config", func(t *testing.T) { + d, err := inspect.NewFromConfig(cfg) + require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + stoppedWG := &sync.WaitGroup{} + stoppedWG.Add(1) + go func() { + require.NoError(t, d.Run(ctx)) + stoppedWG.Done() + }() + cancel() + stoppedWG.Wait() + }) + +} + +func TestBlock(t *testing.T) { + testHeight := int64(1) + testBlock := new(types.Block) + testBlock.Header.Height = testHeight + testBlock.Header.LastCommitHash = []byte("test hash") + stateStoreMock := &statemocks.Store{} + stateStoreMock.On("Close").Return(nil) + + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("Height").Return(testHeight) + blockStoreMock.On("Base").Return(int64(0)) + blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{}) + blockStoreMock.On("LoadBlock", testHeight).Return(testBlock) + blockStoreMock.On("Close").Return(nil) + + txIndexerMock := &txindexmocks.TxIndexer{} + blkIdxMock := &indexermocks.BlockIndexer{} + + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket") + require.NoError(t, err) + resultBlock, err := cli.Block(context.Background(), &testHeight) + require.NoError(t, err) + require.Equal(t, testBlock.Height, resultBlock.Block.Height) + require.Equal(t, testBlock.LastCommitHash, resultBlock.Block.LastCommitHash) + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func TestTxSearch(t *testing.T) { + testHash := []byte("test") + testTx := []byte("tx") + testQuery := fmt.Sprintf("tx.hash='%s'", string(testHash)) + testTxResult := &abcitypes.TxResult{ + Height: 1, + Index: 100, + Tx: testTx, + } + + stateStoreMock := &statemocks.Store{} + stateStoreMock.On("Close").Return(nil) + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("Close").Return(nil) + txIndexerMock := &txindexmocks.TxIndexer{} + blkIdxMock := &indexermocks.BlockIndexer{} + txIndexerMock.On("Search", mock.Anything, + mock.MatchedBy(func(q *query.Query) bool { + return testQuery == strings.ReplaceAll(q.String(), " ", "") + })). + Return([]*abcitypes.TxResult{testTxResult}, nil) + + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket") + require.NoError(t, err) + + var page = 1 + resultTxSearch, err := cli.TxSearch(context.Background(), testQuery, false, &page, &page, "") + require.NoError(t, err) + require.Len(t, resultTxSearch.Txs, 1) + require.Equal(t, types.Tx(testTx), resultTxSearch.Txs[0].Tx) + + cancel() + wg.Wait() + + txIndexerMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) + blockStoreMock.AssertExpectations(t) +} +func TestTx(t *testing.T) { + testHash := []byte("test") + testTx := []byte("tx") + + stateStoreMock := &statemocks.Store{} + stateStoreMock.On("Close").Return(nil) + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("Close").Return(nil) + blkIdxMock := &indexermocks.BlockIndexer{} + txIndexerMock := &txindexmocks.TxIndexer{} + txIndexerMock.On("Get", testHash).Return(&abcitypes.TxResult{ + Tx: testTx, + }, nil) + + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket") + require.NoError(t, err) + + res, err := cli.Tx(context.Background(), testHash, false) + require.NoError(t, err) + require.Equal(t, types.Tx(testTx), res.Tx) + + cancel() + wg.Wait() + + txIndexerMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) + blockStoreMock.AssertExpectations(t) +} +func TestConsensusParams(t *testing.T) { + testHeight := int64(1) + testMaxGas := int64(55) + stateStoreMock := &statemocks.Store{} + stateStoreMock.On("Close").Return(nil) + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("Close").Return(nil) + blockStoreMock.On("Height").Return(testHeight) + blockStoreMock.On("Base").Return(int64(0)) + stateStoreMock.On("LoadConsensusParams", testHeight).Return(types.ConsensusParams{ + Block: types.BlockParams{ + MaxGas: testMaxGas, + }, + }, nil) + txIndexerMock := &txindexmocks.TxIndexer{} + blkIdxMock := &indexermocks.BlockIndexer{} + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket") + require.NoError(t, err) + params, err := cli.ConsensusParams(context.Background(), &testHeight) + require.NoError(t, err) + require.Equal(t, params.ConsensusParams.Block.MaxGas, testMaxGas) + + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func TestBlockResults(t *testing.T) { + testHeight := int64(1) + testGasUsed := int64(100) + stateStoreMock := &statemocks.Store{} + stateStoreMock.On("Close").Return(nil) + // tmstate "github.com/tendermint/tendermint/proto/tendermint/state" + stateStoreMock.On("LoadABCIResponses", testHeight).Return(&state.ABCIResponses{ + DeliverTxs: []*abcitypes.ResponseDeliverTx{ + { + GasUsed: testGasUsed, + }, + }, + EndBlock: &abcitypes.ResponseEndBlock{}, + BeginBlock: &abcitypes.ResponseBeginBlock{}, + }, nil) + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("Close").Return(nil) + blockStoreMock.On("Base").Return(int64(0)) + blockStoreMock.On("Height").Return(testHeight) + txIndexerMock := &txindexmocks.TxIndexer{} + blkIdxMock := &indexermocks.BlockIndexer{} + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket") + require.NoError(t, err) + res, err := cli.BlockResults(context.Background(), &testHeight) + require.NoError(t, err) + require.Equal(t, res.TxsResults[0].GasUsed, testGasUsed) + + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func TestCommit(t *testing.T) { + testHeight := int64(1) + testRound := int32(101) + stateStoreMock := &statemocks.Store{} + stateStoreMock.On("Close").Return(nil) + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("Close").Return(nil) + blockStoreMock.On("Base").Return(int64(0)) + blockStoreMock.On("Height").Return(testHeight) + blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{}, nil) + blockStoreMock.On("LoadSeenCommit", testHeight).Return(&types.Commit{ + Height: testHeight, + Round: testRound, + }, nil) + txIndexerMock := &txindexmocks.TxIndexer{} + blkIdxMock := &indexermocks.BlockIndexer{} + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket") + require.NoError(t, err) + res, err := cli.Commit(context.Background(), &testHeight) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, res.SignedHeader.Commit.Round, testRound) + + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func TestBlockByHash(t *testing.T) { + testHeight := int64(1) + testHash := []byte("test hash") + testBlock := new(types.Block) + testBlock.Header.Height = testHeight + testBlock.Header.LastCommitHash = testHash + stateStoreMock := &statemocks.Store{} + stateStoreMock.On("Close").Return(nil) + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("Close").Return(nil) + blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{ + BlockID: types.BlockID{ + Hash: testHash, + }, + Header: types.Header{ + Height: testHeight, + }, + }, nil) + blockStoreMock.On("LoadBlockByHash", testHash).Return(testBlock, nil) + txIndexerMock := &txindexmocks.TxIndexer{} + blkIdxMock := &indexermocks.BlockIndexer{} + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket") + require.NoError(t, err) + res, err := cli.BlockByHash(context.Background(), testHash) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, []byte(res.BlockID.Hash), testHash) + + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func TestBlockchain(t *testing.T) { + testHeight := int64(1) + testBlock := new(types.Block) + testBlockHash := []byte("test hash") + testBlock.Header.Height = testHeight + testBlock.Header.LastCommitHash = testBlockHash + stateStoreMock := &statemocks.Store{} + stateStoreMock.On("Close").Return(nil) + + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("Close").Return(nil) + blockStoreMock.On("Height").Return(testHeight) + blockStoreMock.On("Base").Return(int64(0)) + blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{ + BlockID: types.BlockID{ + Hash: testBlockHash, + }, + }) + txIndexerMock := &txindexmocks.TxIndexer{} + blkIdxMock := &indexermocks.BlockIndexer{} + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket") + require.NoError(t, err) + res, err := cli.BlockchainInfo(context.Background(), 0, 100) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, testBlockHash, []byte(res.BlockMetas[0].BlockID.Hash)) + + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func TestValidators(t *testing.T) { + testHeight := int64(1) + testVotingPower := int64(100) + testValidators := types.ValidatorSet{ + Validators: []*types.Validator{ + { + VotingPower: testVotingPower, + }, + }, + } + stateStoreMock := &statemocks.Store{} + stateStoreMock.On("Close").Return(nil) + stateStoreMock.On("LoadValidators", testHeight).Return(&testValidators, nil) + + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("Close").Return(nil) + blockStoreMock.On("Height").Return(testHeight) + blockStoreMock.On("Base").Return(int64(0)) + txIndexerMock := &txindexmocks.TxIndexer{} + blkIdxMock := &indexermocks.BlockIndexer{} + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket") + require.NoError(t, err) + + testPage := 1 + testPerPage := 100 + res, err := cli.Validators(context.Background(), &testHeight, &testPage, &testPerPage) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, testVotingPower, res.Validators[0].VotingPower) + + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func TestBlockSearch(t *testing.T) { + testHeight := int64(1) + testBlockHash := []byte("test hash") + testQuery := "block.height = 1" + stateStoreMock := &statemocks.Store{} + stateStoreMock.On("Close").Return(nil) + + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("Close").Return(nil) + + txIndexerMock := &txindexmocks.TxIndexer{} + blkIdxMock := &indexermocks.BlockIndexer{} + blockStoreMock.On("LoadBlock", testHeight).Return(&types.Block{ + Header: types.Header{ + Height: testHeight, + }, + }, nil) + blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{ + BlockID: types.BlockID{ + Hash: testBlockHash, + }, + }) + blkIdxMock.On("Search", mock.Anything, + mock.MatchedBy(func(q *query.Query) bool { return testQuery == q.String() })). + Return([]int64{testHeight}, nil) + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket") + require.NoError(t, err) + + testPage := 1 + testPerPage := 100 + testOrderBy := "desc" + res, err := cli.BlockSearch(context.Background(), testQuery, &testPage, &testPerPage, testOrderBy) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, testBlockHash, []byte(res.Blocks[0].BlockID.Hash)) + + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func requireConnect(t testing.TB, addr string, retries int) { + parts := strings.SplitN(addr, "://", 2) + if len(parts) != 2 { + t.Fatalf("malformed address to dial: %s", addr) + } + var err error + for i := 0; i < retries; i++ { + var conn net.Conn + conn, err = net.Dial(parts[0], parts[1]) + if err == nil { + conn.Close() + return + } + // FIXME attempt to yield and let the other goroutine continue execution. + time.Sleep(time.Microsecond * 100) + } + t.Fatalf("unable to connect to server %s after %d tries: %s", addr, retries, err) +} diff --git a/inspect/rpc/rpc.go b/inspect/rpc/rpc.go new file mode 100644 index 000000000..0aa287511 --- /dev/null +++ b/inspect/rpc/rpc.go @@ -0,0 +1,128 @@ +package rpc + +import ( + "context" + "net/http" + "time" + + "github.com/rs/cors" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/rpc/core" + "github.com/tendermint/tendermint/rpc/jsonrpc/server" + "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/state/indexer" + "github.com/tendermint/tendermint/state/txindex" +) + +// Server defines parameters for running an Inspector rpc server. +type Server struct { + Addr string // TCP address to listen on, ":http" if empty + Handler http.Handler + Logger log.Logger + Config *config.RPCConfig +} + +// Routes returns the set of routes used by the Inspector server. +func Routes(cfg config.RPCConfig, s state.Store, bs state.BlockStore, txidx txindex.TxIndexer, blkidx indexer.BlockIndexer, logger log.Logger) core.RoutesMap { //nolint: lll + env := &core.Environment{ + Config: cfg, + BlockIndexer: blkidx, + TxIndexer: txidx, + StateStore: s, + BlockStore: bs, + ConsensusReactor: waitSyncCheckerImpl{}, + Logger: logger, + } + return core.RoutesMap{ + "blockchain": server.NewRPCFunc(env.BlockchainInfo, "minHeight,maxHeight"), + "consensus_params": server.NewRPCFunc(env.ConsensusParams, "height"), + "block": server.NewRPCFunc(env.Block, "height"), + "block_by_hash": server.NewRPCFunc(env.BlockByHash, "hash"), + "block_results": server.NewRPCFunc(env.BlockResults, "height"), + "commit": server.NewRPCFunc(env.Commit, "height"), + "header": server.NewRPCFunc(env.Header, "height"), + "header_by_hash": server.NewRPCFunc(env.HeaderByHash, "hash"), + "validators": server.NewRPCFunc(env.Validators, "height,page,per_page"), + "tx": server.NewRPCFunc(env.Tx, "hash,prove"), + "tx_search": server.NewRPCFunc(env.TxSearch, "query,prove,page,per_page,order_by"), + "block_search": server.NewRPCFunc(env.BlockSearch, "query,page,per_page,order_by"), + } +} + +// Handler returns the http.Handler configured for use with an Inspector server. Handler +// registers the routes on the http.Handler and also registers the websocket handler +// and the CORS handler if specified by the configuration options. +func Handler(rpcConfig *config.RPCConfig, routes core.RoutesMap, logger log.Logger) http.Handler { + mux := http.NewServeMux() + wmLogger := logger.With("protocol", "websocket") + wm := server.NewWebsocketManager(routes, + server.ReadLimit(rpcConfig.MaxBodyBytes)) + wm.SetLogger(wmLogger) + mux.HandleFunc("/websocket", wm.WebsocketHandler) + + server.RegisterRPCFuncs(mux, routes, logger) + var rootHandler http.Handler = mux + if rpcConfig.IsCorsEnabled() { + rootHandler = addCORSHandler(rpcConfig, mux) + } + return rootHandler +} + +func addCORSHandler(rpcConfig *config.RPCConfig, h http.Handler) http.Handler { + corsMiddleware := cors.New(cors.Options{ + AllowedOrigins: rpcConfig.CORSAllowedOrigins, + AllowedMethods: rpcConfig.CORSAllowedMethods, + AllowedHeaders: rpcConfig.CORSAllowedHeaders, + }) + h = corsMiddleware.Handler(h) + return h +} + +type waitSyncCheckerImpl struct{} + +func (waitSyncCheckerImpl) WaitSync() bool { + return false +} + +// ListenAndServe listens on the address specified in srv.Addr and handles any +// incoming requests over HTTP using the Inspector rpc handler specified on the server. +func (srv *Server) ListenAndServe(ctx context.Context) error { + listener, err := server.Listen(srv.Addr, srv.Config.MaxOpenConnections) + if err != nil { + return err + } + go func() { + <-ctx.Done() + listener.Close() + }() + return server.Serve(listener, srv.Handler, srv.Logger, serverRPCConfig(srv.Config)) +} + +// ListenAndServeTLS listens on the address specified in srv.Addr. ListenAndServeTLS handles +// incoming requests over HTTPS using the Inspector rpc handler specified on the server. +func (srv *Server) ListenAndServeTLS(ctx context.Context, certFile, keyFile string) error { + listener, err := server.Listen(srv.Addr, srv.Config.MaxOpenConnections) + if err != nil { + return err + } + go func() { + <-ctx.Done() + listener.Close() + }() + return server.ServeTLS(listener, srv.Handler, certFile, keyFile, srv.Logger, serverRPCConfig(srv.Config)) +} + +func serverRPCConfig(r *config.RPCConfig) *server.Config { + cfg := server.DefaultConfig() + cfg.MaxBodyBytes = r.MaxBodyBytes + cfg.MaxHeaderBytes = r.MaxHeaderBytes + // If necessary adjust global WriteTimeout to ensure it's greater than + // TimeoutBroadcastTxCommit. + // See https://github.com/tendermint/tendermint/issues/3435 + if cfg.WriteTimeout <= r.TimeoutBroadcastTxCommit { + cfg.WriteTimeout = r.TimeoutBroadcastTxCommit + 1*time.Second + } + return cfg +} diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index 321e775c8..3a1d66b37 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -27,7 +27,7 @@ // select { // case msg <- subscription.Out(): // // handle msg.Data() and msg.Events() -// case <-subscription.Cancelled(): +// case <-subscription.Canceled(): // return subscription.Err() // } // } diff --git a/libs/pubsub/pubsub_test.go b/libs/pubsub/pubsub_test.go index 8edf12508..324928b29 100644 --- a/libs/pubsub/pubsub_test.go +++ b/libs/pubsub/pubsub_test.go @@ -431,7 +431,7 @@ func benchmarkNClients(n int, b *testing.B) { select { case <-subscription.Out(): continue - case <-subscription.Cancelled(): + case <-subscription.Canceled(): return } } @@ -472,7 +472,7 @@ func benchmarkNClientsOneQuery(n int, b *testing.B) { select { case <-subscription.Out(): continue - case <-subscription.Cancelled(): + case <-subscription.Canceled(): return } } @@ -501,7 +501,7 @@ func assertReceive(t *testing.T, expected interface{}, ch <-chan pubsub.Message, } func assertCancelled(t *testing.T, subscription *pubsub.Subscription, err error) { - _, ok := <-subscription.Cancelled() + _, ok := <-subscription.Canceled() assert.False(t, ok) assert.Equal(t, err, subscription.Err()) } diff --git a/libs/pubsub/subscription.go b/libs/pubsub/subscription.go index 141569310..d07abd86d 100644 --- a/libs/pubsub/subscription.go +++ b/libs/pubsub/subscription.go @@ -43,11 +43,9 @@ func (s *Subscription) Out() <-chan Message { return s.out } -// Cancelled returns a channel that's closed when the subscription is +// Canceled returns a channel that's closed when the subscription is // terminated and supposed to be used in a select statement. -// -//nolint:misspell -func (s *Subscription) Cancelled() <-chan struct{} { +func (s *Subscription) Canceled() <-chan struct{} { return s.canceled } diff --git a/libs/strings/string.go b/libs/strings/string.go index 466759233..37026dcc2 100644 --- a/libs/strings/string.go +++ b/libs/strings/string.go @@ -32,6 +32,27 @@ func SplitAndTrim(s, sep, cutset string) []string { return spl } +// SplitAndTrimEmpty slices s into all subslices separated by sep and returns a +// slice of the string s with all leading and trailing Unicode code points +// contained in cutset removed. If sep is empty, SplitAndTrim splits after each +// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of +// -1. also filter out empty strings, only return non-empty strings. +func SplitAndTrimEmpty(s, sep, cutset string) []string { + if s == "" { + return []string{} + } + + spl := strings.Split(s, sep) + nonEmptyStrings := make([]string, 0, len(spl)) + for i := 0; i < len(spl); i++ { + element := strings.Trim(spl[i], cutset) + if element != "" { + nonEmptyStrings = append(nonEmptyStrings, element) + } + } + return nonEmptyStrings +} + // Returns true if s is a non-empty printable non-tab ascii character. func IsASCIIText(s string) bool { if len(s) == 0 { diff --git a/light/proxy/proxy.go b/light/proxy/proxy.go index 359f6a8fe..70dca1c10 100644 --- a/light/proxy/proxy.go +++ b/light/proxy/proxy.go @@ -113,7 +113,7 @@ func (p *Proxy) listen() (net.Listener, *http.ServeMux, error) { } // 4) Start listening for new connections. - listener, err := rpcserver.Listen(p.Addr, p.Config) + listener, err := rpcserver.Listen(p.Addr, p.Config.MaxOpenConnections) if err != nil { return nil, mux, err } diff --git a/node/node.go b/node/node.go index ddf86e0dc..0829db7af 100644 --- a/node/node.go +++ b/node/node.go @@ -141,7 +141,7 @@ func NewNode(config *cfg.Config, nodeKey *p2p.NodeKey, clientCreator proxy.ClientCreator, genesisDocProvider GenesisDocProvider, - dbProvider DBProvider, + dbProvider cfg.DBProvider, metricsProvider MetricsProvider, logger log.Logger, options ...Option, @@ -503,12 +503,12 @@ func (n *Node) OnStop() { } // ConfigureRPC makes sure RPC has all the objects it needs to operate. -func (n *Node) ConfigureRPC() error { +func (n *Node) ConfigureRPC() (*rpccore.Environment, error) { pubKey, err := n.privValidator.GetPubKey() - if err != nil { - return fmt.Errorf("can't get pubkey: %w", err) + if pubKey == nil || err != nil { + return nil, fmt.Errorf("can't get pubkey: %w", err) } - rpccore.SetEnvironment(&rpccore.Environment{ + rpcCoreEnv := rpccore.Environment{ ProxyAppQuery: n.proxyApp.Query(), ProxyAppMempool: n.proxyApp.Mempool(), @@ -518,8 +518,8 @@ func (n *Node) ConfigureRPC() error { ConsensusState: n.consensusState, P2PPeers: n.sw, P2PTransport: n, + PubKey: pubKey, - PubKey: pubKey, GenDoc: n.genesisDoc, TxIndexer: n.txIndexer, BlockIndexer: n.blockIndexer, @@ -530,24 +530,24 @@ func (n *Node) ConfigureRPC() error { Logger: n.Logger.With("module", "rpc"), Config: *n.config.RPC, - }) - if err := rpccore.InitGenesisChunks(); err != nil { - return err } - - return nil + if err := rpcCoreEnv.InitGenesisChunks(); err != nil { + return nil, err + } + return &rpcCoreEnv, nil } func (n *Node) startRPC() ([]net.Listener, error) { - err := n.ConfigureRPC() + env, err := n.ConfigureRPC() if err != nil { return nil, err } listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ") + routes := env.GetRoutes() if n.config.RPC.Unsafe { - rpccore.AddUnsafeRoutes() + env.AddUnsafeRoutes(routes) } config := rpcserver.DefaultConfig() @@ -567,7 +567,7 @@ func (n *Node) startRPC() ([]net.Listener, error) { mux := http.NewServeMux() rpcLogger := n.Logger.With("module", "rpc-server") wmLogger := rpcLogger.With("protocol", "websocket") - wm := rpcserver.NewWebsocketManager(rpccore.Routes, + wm := rpcserver.NewWebsocketManager(routes, rpcserver.OnDisconnect(func(remoteAddr string) { err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr) if err != nil && err != tmpubsub.ErrSubscriptionNotFound { @@ -579,10 +579,10 @@ func (n *Node) startRPC() ([]net.Listener, error) { ) wm.SetLogger(wmLogger) mux.HandleFunc("/websocket", wm.WebsocketHandler) - rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger) + rpcserver.RegisterRPCFuncs(mux, routes, rpcLogger) listener, err := rpcserver.Listen( listenAddr, - config, + config.MaxOpenConnections, ) if err != nil { return nil, err @@ -640,12 +640,12 @@ func (n *Node) startRPC() ([]net.Listener, error) { if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second } - listener, err := rpcserver.Listen(grpcListenAddr, config) + listener, err := rpcserver.Listen(grpcListenAddr, config.MaxOpenConnections) if err != nil { return nil, err } go func() { - if err := grpccore.StartGRPCServer(listener); err != nil { + if err := grpccore.StartGRPCServer(env, listener); err != nil { n.Logger.Error("Error starting gRPC server", "err", err) } }() diff --git a/node/node_test.go b/node/node_test.go index 4fdf63f3f..01a8e01ab 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -53,7 +53,7 @@ func TestNodeStartStop(t *testing.T) { require.NoError(t, err) select { case <-blocksSub.Out(): - case <-blocksSub.Cancelled(): + case <-blocksSub.Canceled(): t.Fatal("blocksSub was canceled") case <-time.After(10 * time.Second): t.Fatal("timed out waiting for the node to produce a block") @@ -461,7 +461,7 @@ func TestNodeNewNodeCustomReactors(t *testing.T) { nodeKey, proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), DefaultGenesisDocProviderFunc(config), - DefaultDBProvider, + cfg.DefaultDBProvider, DefaultMetricsProvider(config.Instrumentation), log.TestingLogger(), CustomReactors(map[string]p2p.Reactor{"FOO": cr, "BLOCKSYNC": customBlocksyncReactor}), diff --git a/node/setup.go b/node/setup.go index 5bada8b6f..a70894469 100644 --- a/node/setup.go +++ b/node/setup.go @@ -7,6 +7,8 @@ import ( "fmt" "net" "strings" + + _ "net/http/pprof" //nolint: gosec // securely exposed on separate, optional port "time" dbm "github.com/tendermint/tm-db" @@ -17,6 +19,7 @@ import ( cs "github.com/tendermint/tendermint/consensus" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/evidence" + "github.com/tendermint/tendermint/statesync" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" @@ -30,13 +33,8 @@ import ( "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/indexer" - blockidxkv "github.com/tendermint/tendermint/state/indexer/block/kv" - blockidxnull "github.com/tendermint/tendermint/state/indexer/block/null" - "github.com/tendermint/tendermint/state/indexer/sink/psql" + "github.com/tendermint/tendermint/state/indexer/block" "github.com/tendermint/tendermint/state/txindex" - "github.com/tendermint/tendermint/state/txindex/kv" - "github.com/tendermint/tendermint/state/txindex/null" - "github.com/tendermint/tendermint/statesync" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" @@ -44,24 +42,8 @@ import ( _ "github.com/lib/pq" // provide the psql db driver ) -// DBContext specifies config information for loading a new DB. -type DBContext struct { - ID string - Config *cfg.Config -} - -// DBProvider takes a DBContext and returns an instantiated DB. -type DBProvider func(*DBContext) (dbm.DB, error) - const readHeaderTimeout = 10 * time.Second -// DefaultDBProvider returns a database using the DBBackend and DBDir -// specified in the ctx.Config. -func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) { - dbType := dbm.BackendType(ctx.Config.DBBackend) - return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()) -} - // GenesisDocProvider returns a GenesisDoc. // It allows the GenesisDoc to be pulled from sources other than the // filesystem, for instance from a distributed key-value store cluster. @@ -92,7 +74,7 @@ func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) { nodeKey, proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), DefaultGenesisDocProviderFunc(config), - DefaultDBProvider, + cfg.DefaultDBProvider, DefaultMetricsProvider(config.Instrumentation), logger, ) @@ -124,15 +106,15 @@ type blockSyncReactor interface { //------------------------------------------------------------------------------ -func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) { +func initDBs(config *cfg.Config, dbProvider cfg.DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) { var blockStoreDB dbm.DB - blockStoreDB, err = dbProvider(&DBContext{"blockstore", config}) + blockStoreDB, err = dbProvider(&cfg.DBContext{ID: "blockstore", Config: config}) if err != nil { return } blockStore = store.NewBlockStore(blockStoreDB) - stateDB, err = dbProvider(&DBContext{"state", config}) + stateDB, err = dbProvider(&cfg.DBContext{ID: "state", Config: config}) if err != nil { return } @@ -161,7 +143,7 @@ func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) { func createAndStartIndexerService( config *cfg.Config, chainID string, - dbProvider DBProvider, + dbProvider cfg.DBProvider, eventBus *types.EventBus, logger log.Logger, ) (*txindex.IndexerService, txindex.TxIndexer, indexer.BlockIndexer, error) { @@ -169,31 +151,9 @@ func createAndStartIndexerService( txIndexer txindex.TxIndexer blockIndexer indexer.BlockIndexer ) - - switch config.TxIndex.Indexer { - case "kv": - store, err := dbProvider(&DBContext{"tx_index", config}) - if err != nil { - return nil, nil, nil, err - } - - txIndexer = kv.NewTxIndex(store) - blockIndexer = blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events"))) - - case "psql": - if config.TxIndex.PsqlConn == "" { - return nil, nil, nil, errors.New(`no psql-conn is set for the "psql" indexer`) - } - es, err := psql.NewEventSink(config.TxIndex.PsqlConn, chainID) - if err != nil { - return nil, nil, nil, fmt.Errorf("creating psql indexer: %w", err) - } - txIndexer = es.TxIndexer() - blockIndexer = es.BlockIndexer() - - default: - txIndexer = &null.TxIndex{} - blockIndexer = &blockidxnull.BlockerIndexer{} + txIndexer, blockIndexer, err := block.IndexerFromConfig(config, dbProvider, chainID) + if err != nil { + return nil, nil, nil, err } indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus, false) @@ -266,6 +226,7 @@ func createMempoolAndMempoolReactor( memplMetrics *mempl.Metrics, logger log.Logger, ) (mempl.Mempool, p2p.Reactor) { + logger = logger.With("module", "mempool") switch config.Mempool.Version { case cfg.MempoolV1: mp := mempoolv1.NewTxMempool( @@ -285,6 +246,7 @@ func createMempoolAndMempoolReactor( if config.Consensus.WaitForTxs() { mp.EnableTxsAvailable() } + reactor.SetLogger(logger) return mp, reactor @@ -307,6 +269,7 @@ func createMempoolAndMempoolReactor( if config.Consensus.WaitForTxs() { mp.EnableTxsAvailable() } + reactor.SetLogger(logger) return mp, reactor @@ -315,10 +278,10 @@ func createMempoolAndMempoolReactor( } } -func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider, +func createEvidenceReactor(config *cfg.Config, dbProvider cfg.DBProvider, stateStore sm.Store, blockStore *store.BlockStore, logger log.Logger, ) (*evidence.Reactor, *evidence.Pool, error) { - evidenceDB, err := dbProvider(&DBContext{"evidence", config}) + evidenceDB, err := dbProvider(&cfg.DBContext{ID: "evidence", Config: config}) if err != nil { return nil, nil, err } diff --git a/proxy/client.go b/proxy/client.go index 8fc9917ae..e4455bead 100644 --- a/proxy/client.go +++ b/proxy/client.go @@ -26,8 +26,12 @@ type localClientCreator struct { app types.Application } -// NewLocalClientCreator returns a ClientCreator for the given app, -// which will be running locally. +// NewLocalClientCreator returns a [ClientCreator] for the given app, which +// will be running locally. +// +// Maintains a single mutex over all new clients created with NewABCIClient. +// For a local client creator that uses a single mutex per new client, rather +// use [NewUnsyncLocalClientCreator]. func NewLocalClientCreator(app types.Application) ClientCreator { return &localClientCreator{ mtx: new(tmsync.Mutex), @@ -39,24 +43,26 @@ func (l *localClientCreator) NewABCIClient() (abcicli.Client, error) { return abcicli.NewLocalClient(l.mtx, l.app), nil } -//--------------------------------------------------------------- -// unsynchronized local proxy on an in-proc app (no mutex) +//---------------------------------------------------- +// local proxy creates a new mutex for each client type unsyncLocalClientCreator struct { app types.Application } -// NewUnsyncLocalClientCreator returns a ClientCreator for the given app, which -// will be running locally. Unlike NewLocalClientCreator, this leaves -// synchronization up to the application. +// NewUnsyncLocalClientCreator returns a [ClientCreator] for the given app. +// Unlike [NewLocalClientCreator], each call to NewABCIClient returns an ABCI +// client that maintains its own mutex over the application. func NewUnsyncLocalClientCreator(app types.Application) ClientCreator { return &unsyncLocalClientCreator{ app: app, } } -func (l *unsyncLocalClientCreator) NewABCIClient() (abcicli.Client, error) { - return abcicli.NewUnsyncLocalClient(l.app), nil +func (c *unsyncLocalClientCreator) NewABCIClient() (abcicli.Client, error) { + // Specifying nil for the mutex causes each instance to create its own + // mutex. + return abcicli.NewLocalClient(nil, c.app), nil } //--------------------------------------------------------------- @@ -88,23 +94,33 @@ func (r *remoteClientCreator) NewABCIClient() (abcicli.Client, error) { return remoteApp, nil } -// DefaultClientCreator returns a default ClientCreator, which will create a -// local client if addr is one of: 'kvstore', -// 'persistent_kvstore' or 'noop', otherwise - a remote client. +// DefaultClientCreator returns a default [ClientCreator], which will create a +// local client if addr is one of "kvstore", "persistent_kvstore", "e2e", +// "noop". +// +// Otherwise a remote client will be created. +// +// Each of "kvstore", "persistent_kvstore" and "e2e" also currently have an +// "_unsync" variant (i.e. "kvstore_unsync", etc.), which attempts to replicate +// the same concurrency model as the remote client. func DefaultClientCreator(addr, transport, dbDir string) ClientCreator { switch addr { case "kvstore": return NewLocalClientCreator(kvstore.NewApplication()) + case "kvstore_unsync": + return NewUnsyncLocalClientCreator(kvstore.NewApplication()) case "persistent_kvstore": return NewLocalClientCreator(kvstore.NewPersistentKVStoreApplication(dbDir)) + case "persistent_kvstore_unsync": + return NewUnsyncLocalClientCreator(kvstore.NewPersistentKVStoreApplication(dbDir)) case "e2e": app, err := e2e.NewApplication(e2e.DefaultConfig(dbDir)) if err != nil { panic(err) } return NewLocalClientCreator(app) - case "e2e_sync": - app, err := e2e.NewSyncApplication(e2e.DefaultConfig(dbDir)) + case "e2e_unsync": + app, err := e2e.NewApplication(e2e.DefaultConfig(dbDir)) if err != nil { panic(err) } diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index ca324dee0..31f78f6b5 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -41,22 +41,20 @@ type Local struct { *types.EventBus Logger log.Logger ctx *rpctypes.Context + env *core.Environment } // NewLocal configures a client that calls the Node directly. -// -// Note that given how rpc/core works with package singletons, that -// you can only have one node per process. So make sure test cases -// don't run in parallel, or try to simulate an entire network in -// one process... func New(node *nm.Node) *Local { - if err := node.ConfigureRPC(); err != nil { + env, err := node.ConfigureRPC() + if err != nil { node.Logger.Error("Error configuring RPC", "err", err) } return &Local{ EventBus: node.EventBus(), Logger: log.NewNopLogger(), ctx: &rpctypes.Context{}, + env: env, } } @@ -68,11 +66,11 @@ func (c *Local) SetLogger(l log.Logger) { } func (c *Local) Status(ctx context.Context) (*ctypes.ResultStatus, error) { - return core.Status(c.ctx) + return c.env.Status(c.ctx) } func (c *Local) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { - return core.ABCIInfo(c.ctx) + return c.env.ABCIInfo(c.ctx) } func (c *Local) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { @@ -84,55 +82,55 @@ func (c *Local) ABCIQueryWithOptions( path string, data bytes.HexBytes, opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - return core.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove) + return c.env.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove) } func (c *Local) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - return core.BroadcastTxCommit(c.ctx, tx) + return c.env.BroadcastTxCommit(c.ctx, tx) } func (c *Local) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return core.BroadcastTxAsync(c.ctx, tx) + return c.env.BroadcastTxAsync(c.ctx, tx) } func (c *Local) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return core.BroadcastTxSync(c.ctx, tx) + return c.env.BroadcastTxSync(c.ctx, tx) } func (c *Local) UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) { - return core.UnconfirmedTxs(c.ctx, limit) + return c.env.UnconfirmedTxs(c.ctx, limit) } func (c *Local) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) { - return core.NumUnconfirmedTxs(c.ctx) + return c.env.NumUnconfirmedTxs(c.ctx) } func (c *Local) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { - return core.CheckTx(c.ctx, tx) + return c.env.CheckTx(c.ctx, tx) } func (c *Local) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { - return core.NetInfo(c.ctx) + return c.env.NetInfo(c.ctx) } func (c *Local) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { - return core.DumpConsensusState(c.ctx) + return c.env.DumpConsensusState(c.ctx) } func (c *Local) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { - return core.ConsensusState(c.ctx) + return c.env.GetConsensusState(c.ctx) } func (c *Local) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { - return core.ConsensusParams(c.ctx, height) + return c.env.ConsensusParams(c.ctx, height) } func (c *Local) Health(ctx context.Context) (*ctypes.ResultHealth, error) { - return core.Health(c.ctx) + return c.env.Health(c.ctx) } func (c *Local) DialSeeds(ctx context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { - return core.UnsafeDialSeeds(c.ctx, seeds) + return c.env.UnsafeDialSeeds(c.ctx, seeds) } func (c *Local) DialPeers( @@ -142,51 +140,51 @@ func (c *Local) DialPeers( unconditional, private bool, ) (*ctypes.ResultDialPeers, error) { - return core.UnsafeDialPeers(c.ctx, peers, persistent, unconditional, private) + return c.env.UnsafeDialPeers(c.ctx, peers, persistent, unconditional, private) } func (c *Local) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { - return core.BlockchainInfo(c.ctx, minHeight, maxHeight) + return c.env.BlockchainInfo(c.ctx, minHeight, maxHeight) } func (c *Local) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { - return core.Genesis(c.ctx) + return c.env.Genesis(c.ctx) } func (c *Local) GenesisChunked(ctx context.Context, id uint) (*ctypes.ResultGenesisChunk, error) { - return core.GenesisChunked(c.ctx, id) + return c.env.GenesisChunked(c.ctx, id) } func (c *Local) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { - return core.Block(c.ctx, height) + return c.env.Block(c.ctx, height) } func (c *Local) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { - return core.BlockByHash(c.ctx, hash) + return c.env.BlockByHash(c.ctx, hash) } func (c *Local) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) { - return core.BlockResults(c.ctx, height) + return c.env.BlockResults(c.ctx, height) } func (c *Local) Header(ctx context.Context, height *int64) (*ctypes.ResultHeader, error) { - return core.Header(c.ctx, height) + return c.env.Header(c.ctx, height) } func (c *Local) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) { - return core.HeaderByHash(c.ctx, hash) + return c.env.HeaderByHash(c.ctx, hash) } func (c *Local) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { - return core.Commit(c.ctx, height) + return c.env.Commit(c.ctx, height) } func (c *Local) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { - return core.Validators(c.ctx, height, page, perPage) + return c.env.Validators(c.ctx, height, page, perPage) } func (c *Local) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { - return core.Tx(c.ctx, hash, prove) + return c.env.Tx(c.ctx, hash, prove) } func (c *Local) TxSearch( @@ -197,7 +195,7 @@ func (c *Local) TxSearch( perPage *int, orderBy string, ) (*ctypes.ResultTxSearch, error) { - return core.TxSearch(c.ctx, query, prove, page, perPage, orderBy) + return c.env.TxSearch(c.ctx, query, prove, page, perPage, orderBy) } func (c *Local) BlockSearch( @@ -206,11 +204,11 @@ func (c *Local) BlockSearch( page, perPage *int, orderBy string, ) (*ctypes.ResultBlockSearch, error) { - return core.BlockSearch(c.ctx, query, page, perPage, orderBy) + return c.env.BlockSearch(c.ctx, query, page, perPage, orderBy) } func (c *Local) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { - return core.BroadcastEvidence(c.ctx, ev) + return c.env.BroadcastEvidence(c.ctx, ev) } func (c *Local) Subscribe( @@ -262,7 +260,7 @@ func (c *Local) eventsRoutine( c.Logger.Error("wanted to publish ResultEvent, but out channel is full", "result", result, "query", result.Query) } } - case <-sub.Cancelled(): + case <-sub.Canceled(): if sub.Err() == tmpubsub.ErrUnsubscribed { return } diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index ec3a358cd..4be251ce5 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -28,10 +28,6 @@ import ( ) // Client wraps arbitrary implementations of the various interfaces. -// -// We provide a few choices to mock out each one in this package. -// Nothing hidden here, so no New function, just construct it from -// some parts, and swap them out them during the tests. type Client struct { client.ABCIClient client.SignClient @@ -41,6 +37,14 @@ type Client struct { client.EvidenceClient client.MempoolClient service.Service + + env *core.Environment +} + +func New() Client { + return Client{ + env: &core.Environment{}, + } } var _ client.Client = Client{} @@ -80,11 +84,11 @@ func (c Call) GetResponse(args interface{}) (interface{}, error) { } func (c Client) Status(ctx context.Context) (*ctypes.ResultStatus, error) { - return core.Status(&rpctypes.Context{}) + return c.env.Status(&rpctypes.Context{}) } func (c Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { - return core.ABCIInfo(&rpctypes.Context{}) + return c.env.ABCIInfo(&rpctypes.Context{}) } func (c Client) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { @@ -96,47 +100,47 @@ func (c Client) ABCIQueryWithOptions( path string, data bytes.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - return core.ABCIQuery(&rpctypes.Context{}, path, data, opts.Height, opts.Prove) + return c.env.ABCIQuery(&rpctypes.Context{}, path, data, opts.Height, opts.Prove) } func (c Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - return core.BroadcastTxCommit(&rpctypes.Context{}, tx) + return c.env.BroadcastTxCommit(&rpctypes.Context{}, tx) } func (c Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return core.BroadcastTxAsync(&rpctypes.Context{}, tx) + return c.env.BroadcastTxAsync(&rpctypes.Context{}, tx) } func (c Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return core.BroadcastTxSync(&rpctypes.Context{}, tx) + return c.env.BroadcastTxSync(&rpctypes.Context{}, tx) } func (c Client) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { - return core.CheckTx(&rpctypes.Context{}, tx) + return c.env.CheckTx(&rpctypes.Context{}, tx) } func (c Client) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { - return core.NetInfo(&rpctypes.Context{}) + return c.env.NetInfo(&rpctypes.Context{}) } func (c Client) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { - return core.ConsensusState(&rpctypes.Context{}) + return c.env.GetConsensusState(&rpctypes.Context{}) } func (c Client) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { - return core.DumpConsensusState(&rpctypes.Context{}) + return c.env.DumpConsensusState(&rpctypes.Context{}) } func (c Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { - return core.ConsensusParams(&rpctypes.Context{}, height) + return c.env.ConsensusParams(&rpctypes.Context{}, height) } func (c Client) Health(ctx context.Context) (*ctypes.ResultHealth, error) { - return core.Health(&rpctypes.Context{}) + return c.env.Health(&rpctypes.Context{}) } func (c Client) DialSeeds(ctx context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { - return core.UnsafeDialSeeds(&rpctypes.Context{}, seeds) + return c.env.UnsafeDialSeeds(&rpctypes.Context{}, seeds) } func (c Client) DialPeers( @@ -146,33 +150,33 @@ func (c Client) DialPeers( unconditional, private bool, ) (*ctypes.ResultDialPeers, error) { - return core.UnsafeDialPeers(&rpctypes.Context{}, peers, persistent, unconditional, private) + return c.env.UnsafeDialPeers(&rpctypes.Context{}, peers, persistent, unconditional, private) } func (c Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { - return core.BlockchainInfo(&rpctypes.Context{}, minHeight, maxHeight) + return c.env.BlockchainInfo(&rpctypes.Context{}, minHeight, maxHeight) } func (c Client) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { - return core.Genesis(&rpctypes.Context{}) + return c.env.Genesis(&rpctypes.Context{}) } func (c Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { - return core.Block(&rpctypes.Context{}, height) + return c.env.Block(&rpctypes.Context{}, height) } func (c Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { - return core.BlockByHash(&rpctypes.Context{}, hash) + return c.env.BlockByHash(&rpctypes.Context{}, hash) } func (c Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { - return core.Commit(&rpctypes.Context{}, height) + return c.env.Commit(&rpctypes.Context{}, height) } func (c Client) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { - return core.Validators(&rpctypes.Context{}, height, page, perPage) + return c.env.Validators(&rpctypes.Context{}, height, page, perPage) } func (c Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { - return core.BroadcastEvidence(&rpctypes.Context{}, ev) + return c.env.BroadcastEvidence(&rpctypes.Context{}, ev) } diff --git a/rpc/core/abci.go b/rpc/core/abci.go index d1f7193be..4df8cba3f 100644 --- a/rpc/core/abci.go +++ b/rpc/core/abci.go @@ -9,8 +9,8 @@ import ( ) // ABCIQuery queries the application for some information. -// More: https://docs.tendermint.com/main/rpc/#/ABCI/abci_query -func ABCIQuery( +// More: https://docs.tendermint.com/master/rpc/#/ABCI/abci_query +func (env *Environment) ABCIQuery( ctx *rpctypes.Context, path string, data bytes.HexBytes, @@ -31,8 +31,8 @@ func ABCIQuery( } // ABCIInfo gets some info about the application. -// More: https://docs.tendermint.com/main/rpc/#/ABCI/abci_info -func ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { +// More: https://docs.tendermint.com/master/rpc/#/ABCI/abci_info +func (env *Environment) ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { resInfo, err := env.ProxyAppQuery.InfoSync(proxy.RequestInfo) if err != nil { return nil, err diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index 5d277218f..360e59e12 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -15,10 +15,19 @@ import ( ) // BlockchainInfo gets block headers for minHeight <= height <= maxHeight. -// Block headers are returned in descending order (highest first). -// More: https://docs.tendermint.com/main/rpc/#/Info/blockchain -func BlockchainInfo(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { - // maximum 20 block metas +// +// If maxHeight does not yet exist, blocks up to the current height will be +// returned. If minHeight does not exist (due to pruning), earliest existing +// height will be used. +// +// At most 20 items will be returned. Block headers are returned in descending +// order (highest first). +// +// More: https://docs.tendermint.com/master/rpc/#/Info/blockchain +func (env *Environment) BlockchainInfo( + ctx *rpctypes.Context, + minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { + const limit int64 = 20 var err error minHeight, maxHeight, err = filterMinMax( @@ -79,8 +88,8 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { // Header gets block header at a given height. // If no height is provided, it will fetch the latest header. // More: https://docs.tendermint.com/master/rpc/#/Info/header -func Header(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultHeader, error) { - height, err := getHeight(env.BlockStore.Height(), heightPtr) +func (env *Environment) Header(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultHeader, error) { + height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err } @@ -95,7 +104,7 @@ func Header(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultHeader, erro // HeaderByHash gets header by hash. // More: https://docs.tendermint.com/master/rpc/#/Info/header_by_hash -func HeaderByHash(ctx *rpctypes.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) { +func (env *Environment) HeaderByHash(ctx *rpctypes.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) { // N.B. The hash parameter is HexBytes so that the reflective parameter // decoding logic in the HTTP service will correctly translate from JSON. // See https://github.com/tendermint/tendermint/issues/6802 for context. @@ -110,9 +119,9 @@ func HeaderByHash(ctx *rpctypes.Context, hash bytes.HexBytes) (*ctypes.ResultHea // Block gets block at a given height. // If no height is provided, it will fetch the latest block. -// More: https://docs.tendermint.com/main/rpc/#/Info/block -func Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) { - height, err := getHeight(env.BlockStore.Height(), heightPtr) +// More: https://docs.tendermint.com/master/rpc/#/Info/block +func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) { + height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err } @@ -126,8 +135,8 @@ func Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) } // BlockByHash gets block by hash. -// More: https://docs.tendermint.com/main/rpc/#/Info/block_by_hash -func BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) { +// More: https://docs.tendermint.com/master/rpc/#/Info/block_by_hash +func (env *Environment) BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) { block := env.BlockStore.LoadBlockByHash(hash) if block == nil { return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil @@ -139,9 +148,9 @@ func BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error // Commit gets block commit at a given height. // If no height is provided, it will fetch the commit for the latest block. -// More: https://docs.tendermint.com/main/rpc/#/Info/commit -func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) { - height, err := getHeight(env.BlockStore.Height(), heightPtr) +// More: https://docs.tendermint.com/master/rpc/#/Info/commit +func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) { + height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err } @@ -170,9 +179,9 @@ func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, erro // Results are for the height of the block containing the txs. // Thus response.results.deliver_tx[5] is the results of executing // getBlock(h).Txs[5] -// More: https://docs.tendermint.com/main/rpc/#/Info/block_results -func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) { - height, err := getHeight(env.BlockStore.Height(), heightPtr) +// More: https://docs.tendermint.com/master/rpc/#/Info/block_results +func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) { + height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err } @@ -194,7 +203,7 @@ func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockR // BlockSearch searches for a paginated set of blocks matching BeginBlock and // EndBlock event search criteria. -func BlockSearch( +func (env *Environment) BlockSearch( ctx *rpctypes.Context, query string, pagePtr, perPagePtr *int, @@ -230,7 +239,7 @@ func BlockSearch( // paginate results totalCount := len(results) - perPage := validatePerPage(perPagePtr) + perPage := env.validatePerPage(perPagePtr) page, err := validatePage(pagePtr, perPage, totalCount) if err != nil { diff --git a/rpc/core/blocks_test.go b/rpc/core/blocks_test.go index 71311076c..5ea778fbd 100644 --- a/rpc/core/blocks_test.go +++ b/rpc/core/blocks_test.go @@ -80,7 +80,7 @@ func TestBlockResults(t *testing.T) { BeginBlock: &abci.ResponseBeginBlock{}, } - env = &Environment{} + env := &Environment{} env.StateStore = sm.NewStore(dbm.NewMemDB(), sm.StoreOptions{ DiscardABCIResponses: false, }) @@ -110,7 +110,7 @@ func TestBlockResults(t *testing.T) { } for _, tc := range testCases { - res, err := BlockResults(&rpctypes.Context{}, &tc.height) + res, err := env.BlockResults(&rpctypes.Context{}, &tc.height) if tc.wantErr { assert.Error(t, err) } else { diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index 5237ddfc6..f51b44f22 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -14,10 +14,14 @@ import ( // validators are sorted by their voting power - this is the canonical order // for the validators in the set as used in computing their Merkle root. // -// More: https://docs.tendermint.com/main/rpc/#/Info/validators -func Validators(ctx *rpctypes.Context, heightPtr *int64, pagePtr, perPagePtr *int) (*ctypes.ResultValidators, error) { +// More: https://docs.tendermint.com/master/rpc/#/Info/validators +func (env *Environment) Validators( + ctx *rpctypes.Context, + heightPtr *int64, + pagePtr, perPagePtr *int) (*ctypes.ResultValidators, error) { + // The latest validator that we know is the NextValidator of the last block. - height, err := getHeight(latestUncommittedHeight(), heightPtr) + height, err := env.getHeight(env.latestUncommittedHeight(), heightPtr) if err != nil { return nil, err } @@ -28,7 +32,7 @@ func Validators(ctx *rpctypes.Context, heightPtr *int64, pagePtr, perPagePtr *in } totalCount := len(validators.Validators) - perPage := validatePerPage(perPagePtr) + perPage := env.validatePerPage(perPagePtr) page, err := validatePage(pagePtr, perPage, totalCount) if err != nil { return nil, err @@ -47,8 +51,8 @@ func Validators(ctx *rpctypes.Context, heightPtr *int64, pagePtr, perPagePtr *in // DumpConsensusState dumps consensus state. // UNSTABLE -// More: https://docs.tendermint.com/main/rpc/#/Info/dump_consensus_state -func DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { +// More: https://docs.tendermint.com/master/rpc/#/Info/dump_consensus_state +func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { // Get Peer consensus states. peers := env.P2PPeers.Peers().List() peerStates := make([]ctypes.PeerStateInfo, len(peers)) @@ -80,8 +84,8 @@ func DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState // ConsensusState returns a concise summary of the consensus state. // UNSTABLE -// More: https://docs.tendermint.com/main/rpc/#/Info/consensus_state -func ConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) { +// More: https://docs.tendermint.com/master/rpc/#/Info/consensus_state +func (env *Environment) GetConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) { // Get self round state. bz, err := env.ConsensusState.GetRoundStateSimpleJSON() return &ctypes.ResultConsensusState{RoundState: bz}, err @@ -89,11 +93,14 @@ func ConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) // ConsensusParams gets the consensus parameters at the given block height. // If no height is provided, it will fetch the latest consensus params. -// More: https://docs.tendermint.com/main/rpc/#/Info/consensus_params -func ConsensusParams(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultConsensusParams, error) { +// More: https://docs.tendermint.com/master/rpc/#/Info/consensus_params +func (env *Environment) ConsensusParams( + ctx *rpctypes.Context, + heightPtr *int64) (*ctypes.ResultConsensusParams, error) { + // The latest consensus params that we know is the consensus params after the // last block. - height, err := getHeight(latestUncommittedHeight(), heightPtr) + height, err := env.getHeight(env.latestUncommittedHeight(), heightPtr) if err != nil { return nil, err } diff --git a/rpc/core/dev.go b/rpc/core/dev.go index b70f5f1e1..0e365cdcc 100644 --- a/rpc/core/dev.go +++ b/rpc/core/dev.go @@ -6,7 +6,7 @@ import ( ) // UnsafeFlushMempool removes all transactions from the mempool. -func UnsafeFlushMempool(ctx *rpctypes.Context) (*ctypes.ResultUnsafeFlushMempool, error) { +func (env *Environment) UnsafeFlushMempool(ctx *rpctypes.Context) (*ctypes.ResultUnsafeFlushMempool, error) { env.Mempool.Flush() return &ctypes.ResultUnsafeFlushMempool{}, nil } diff --git a/rpc/core/env.go b/rpc/core/env.go index e92319937..17cf45ff2 100644 --- a/rpc/core/env.go +++ b/rpc/core/env.go @@ -6,7 +6,6 @@ import ( "time" cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/consensus" "github.com/tendermint/tendermint/crypto" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" @@ -33,17 +32,6 @@ const ( genesisChunkSize = 16 * 1024 * 1024 // 16 ) -var ( - // set by Node - env *Environment -) - -// SetEnvironment sets up the given Environment. -// It will race if multiple Node call SetEnvironment. -func SetEnvironment(e *Environment) { - env = e -} - //---------------------------------------------- // These interfaces are used by RPC and must be thread safe @@ -69,6 +57,10 @@ type peers interface { Peers() p2p.IPeerSet } +type consensusReactor interface { + WaitSync() bool +} + // ---------------------------------------------- // Environment contains objects and interfaces used by the RPC. It is expected // to be setup once during startup. @@ -78,21 +70,21 @@ type Environment struct { ProxyAppMempool proxy.AppConnMempool // interfaces defined in types and above - StateStore sm.Store - BlockStore sm.BlockStore - EvidencePool sm.EvidencePool - ConsensusState Consensus - P2PPeers peers - P2PTransport transport + StateStore sm.Store + BlockStore sm.BlockStore + EvidencePool sm.EvidencePool + ConsensusState Consensus + ConsensusReactor consensusReactor + P2PPeers peers + P2PTransport transport // objects - PubKey crypto.PubKey - GenDoc *types.GenesisDoc // cache the genesis structure - TxIndexer txindex.TxIndexer - BlockIndexer indexer.BlockIndexer - ConsensusReactor *consensus.Reactor - EventBus *types.EventBus // thread safe - Mempool mempl.Mempool + PubKey crypto.PubKey + GenDoc *types.GenesisDoc // cache the genesis structure + TxIndexer txindex.TxIndexer + BlockIndexer indexer.BlockIndexer + EventBus *types.EventBus // thread safe + Mempool mempl.Mempool Logger log.Logger @@ -125,7 +117,7 @@ func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { return page, nil } -func validatePerPage(perPagePtr *int) int { +func (env *Environment) validatePerPage(perPagePtr *int) int { if perPagePtr == nil { // no per_page parameter return defaultPerPage } @@ -141,7 +133,7 @@ func validatePerPage(perPagePtr *int) int { // InitGenesisChunks configures the environment and should be called on service // startup. -func InitGenesisChunks() error { +func (env *Environment) InitGenesisChunks() error { if env.genChunks != nil { return nil } @@ -178,7 +170,7 @@ func validateSkipCount(page, perPage int) int { } // latestHeight can be either latest committed or uncommitted (+1) height. -func getHeight(latestHeight int64, heightPtr *int64) (int64, error) { +func (env *Environment) getHeight(latestHeight int64, heightPtr *int64) (int64, error) { if heightPtr != nil { height := *heightPtr if height <= 0 { @@ -198,7 +190,7 @@ func getHeight(latestHeight int64, heightPtr *int64) (int64, error) { return latestHeight, nil } -func latestUncommittedHeight() int64 { +func (env *Environment) latestUncommittedHeight() int64 { nodeIsSyncing := env.ConsensusReactor.WaitSync() if nodeIsSyncing { return env.BlockStore.Height() diff --git a/rpc/core/env_test.go b/rpc/core/env_test.go index b44c21a4c..dc64db1d6 100644 --- a/rpc/core/env_test.go +++ b/rpc/core/env_test.go @@ -70,13 +70,13 @@ func TestPaginationPerPage(t *testing.T) { {5, maxPerPage, maxPerPage}, {5, maxPerPage + 1, maxPerPage}, } - + env := &Environment{} for _, c := range cases { - p := validatePerPage(&c.perPage) + p := env.validatePerPage(&c.perPage) assert.Equal(t, c.newPerPage, p, fmt.Sprintf("%v", c)) } // nil case - p := validatePerPage(nil) + p := env.validatePerPage(nil) assert.Equal(t, defaultPerPage, p) } diff --git a/rpc/core/events.go b/rpc/core/events.go index e8d977363..63ed90fa7 100644 --- a/rpc/core/events.go +++ b/rpc/core/events.go @@ -19,8 +19,8 @@ const ( ) // Subscribe for events via WebSocket. -// More: https://docs.tendermint.com/main/rpc/#/Websocket/subscribe -func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { +// More: https://docs.tendermint.com/master/rpc/#/Websocket/subscribe +func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { addr := ctx.RemoteAddr() if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients { @@ -76,7 +76,7 @@ func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, er return } } - case <-sub.Cancelled(): + case <-sub.Canceled(): if sub.Err() != tmpubsub.ErrUnsubscribed { var reason string if sub.Err() == nil { @@ -102,8 +102,8 @@ func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, er } // Unsubscribe from events via WebSocket. -// More: https://docs.tendermint.com/main/rpc/#/Websocket/unsubscribe -func Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) { +// More: https://docs.tendermint.com/master/rpc/#/Websocket/unsubscribe +func (env *Environment) Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) { addr := ctx.RemoteAddr() env.Logger.Info("Unsubscribe from query", "remote", addr, "query", query) q, err := tmquery.New(query) @@ -118,8 +118,8 @@ func Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe } // UnsubscribeAll from all events via WebSocket. -// More: https://docs.tendermint.com/main/rpc/#/Websocket/unsubscribe_all -func UnsubscribeAll(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) { +// More: https://docs.tendermint.com/master/rpc/#/Websocket/unsubscribe_all +func (env *Environment) UnsubscribeAll(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) { addr := ctx.RemoteAddr() env.Logger.Info("Unsubscribe from all", "remote", addr) err := env.EventBus.UnsubscribeAll(context.Background(), addr) diff --git a/rpc/core/evidence.go b/rpc/core/evidence.go index d1b3753e7..0c5b5b725 100644 --- a/rpc/core/evidence.go +++ b/rpc/core/evidence.go @@ -10,8 +10,11 @@ import ( ) // BroadcastEvidence broadcasts evidence of the misbehavior. -// More: https://docs.tendermint.com/main/rpc/#/Info/broadcast_evidence -func BroadcastEvidence(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { +// More: https://docs.tendermint.com/master/rpc/#/Evidence/broadcast_evidence +func (env *Environment) BroadcastEvidence( + ctx *rpctypes.Context, + ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { + if ev == nil { return nil, errors.New("no evidence was provided") } diff --git a/rpc/core/health.go b/rpc/core/health.go index aaf1ceecf..855911d83 100644 --- a/rpc/core/health.go +++ b/rpc/core/health.go @@ -7,7 +7,7 @@ import ( // Health gets node health. Returns empty result (200 OK) on success, no // response - in case of an error. -// More: https://docs.tendermint.com/main/rpc/#/Info/health -func Health(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) { +// More: https://docs.tendermint.com/master/rpc/#/Info/health +func (env *Environment) Health(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) { return &ctypes.ResultHealth{}, nil } diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index 69b87dd5c..c0f7cd8dc 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -18,8 +18,8 @@ import ( // BroadcastTxAsync returns right away, with no response. Does not wait for // CheckTx nor DeliverTx results. -// More: https://docs.tendermint.com/main/rpc/#/Tx/broadcast_tx_async -func BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_async +func (env *Environment) BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { err := env.Mempool.CheckTx(tx, nil, mempl.TxInfo{}) if err != nil { @@ -30,8 +30,8 @@ func BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadca // BroadcastTxSync returns with the response from CheckTx. Does not wait for // DeliverTx result. -// More: https://docs.tendermint.com/main/rpc/#/Tx/broadcast_tx_sync -func BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_sync +func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { resCh := make(chan *abci.Response, 1) err := env.Mempool.CheckTx(tx, func(res *abci.Response) { select { @@ -60,8 +60,8 @@ func BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcas } // BroadcastTxCommit returns with the responses from CheckTx and DeliverTx. -// More: https://docs.tendermint.com/main/rpc/#/Tx/broadcast_tx_commit -func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_commit +func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { subscriber := ctx.RemoteAddr() if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients { @@ -121,7 +121,7 @@ func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadc Hash: tx.Hash(), Height: deliverTxRes.Height, }, nil - case <-deliverTxSub.Cancelled(): + case <-deliverTxSub.Canceled(): var reason string if deliverTxSub.Err() == nil { reason = "Tendermint exited" @@ -149,10 +149,10 @@ func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadc // UnconfirmedTxs gets unconfirmed transactions (maximum ?limit entries) // including their number. -// More: https://docs.tendermint.com/main/rpc/#/Info/unconfirmed_txs -func UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*ctypes.ResultUnconfirmedTxs, error) { +// More: https://docs.tendermint.com/master/rpc/#/Info/unconfirmed_txs +func (env *Environment) UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*ctypes.ResultUnconfirmedTxs, error) { // reuse per_page validator - limit := validatePerPage(limitPtr) + limit := env.validatePerPage(limitPtr) txs := env.Mempool.ReapMaxTxs(limit) return &ctypes.ResultUnconfirmedTxs{ @@ -163,8 +163,8 @@ func UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*ctypes.ResultUnconfi } // NumUnconfirmedTxs gets number of unconfirmed transactions. -// More: https://docs.tendermint.com/main/rpc/#/Info/num_unconfirmed_txs -func NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) { +// More: https://docs.tendermint.com/master/rpc/#/Info/num_unconfirmed_txs +func (env *Environment) NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) { return &ctypes.ResultUnconfirmedTxs{ Count: env.Mempool.Size(), Total: env.Mempool.Size(), @@ -173,8 +173,8 @@ func NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, err // CheckTx checks the transaction without executing it. The transaction won't // be added to the mempool either. -// More: https://docs.tendermint.com/main/rpc/#/Tx/check_tx -func CheckTx(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { +// More: https://docs.tendermint.com/master/rpc/#/Tx/check_tx +func (env *Environment) CheckTx(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { res, err := env.ProxyAppMempool.CheckTxSync(abci.RequestCheckTx{Tx: tx}) if err != nil { return nil, err diff --git a/rpc/core/net.go b/rpc/core/net.go index 27378bfed..688667203 100644 --- a/rpc/core/net.go +++ b/rpc/core/net.go @@ -11,8 +11,8 @@ import ( ) // NetInfo returns network info. -// More: https://docs.tendermint.com/main/rpc/#/Info/net_info -func NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) { +// More: https://docs.tendermint.com/master/rpc/#/Info/net_info +func (env *Environment) NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) { peersList := env.P2PPeers.Peers().List() peers := make([]ctypes.Peer, 0, len(peersList)) for _, peer := range peersList { @@ -39,7 +39,7 @@ func NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) { } // UnsafeDialSeeds dials the given seeds (comma-separated id@IP:PORT). -func UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { +func (env *Environment) UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { if len(seeds) == 0 { return &ctypes.ResultDialSeeds{}, errors.New("no seeds provided") } @@ -52,8 +52,11 @@ func UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*ctypes.ResultDialS // UnsafeDialPeers dials the given peers (comma-separated id@IP:PORT), // optionally making them persistent. -func UnsafeDialPeers(ctx *rpctypes.Context, peers []string, persistent, unconditional, private bool) ( - *ctypes.ResultDialPeers, error) { +func (env *Environment) UnsafeDialPeers( + ctx *rpctypes.Context, + peers []string, + persistent, unconditional, private bool) (*ctypes.ResultDialPeers, error) { + if len(peers) == 0 { return &ctypes.ResultDialPeers{}, errors.New("no peers provided") } @@ -92,8 +95,8 @@ func UnsafeDialPeers(ctx *rpctypes.Context, peers []string, persistent, uncondit } // Genesis returns genesis file. -// More: https://docs.tendermint.com/main/rpc/#/Info/genesis -func Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { +// More: https://docs.tendermint.com/master/rpc/#/Info/genesis +func (env *Environment) Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { if len(env.genChunks) > 1 { return nil, errors.New("genesis response is large, please use the genesis_chunked API instead") } @@ -101,7 +104,7 @@ func Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { return &ctypes.ResultGenesis{Genesis: env.GenDoc}, nil } -func GenesisChunked(ctx *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) { +func (env *Environment) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) { if env.genChunks == nil { return nil, fmt.Errorf("service configuration error, genesis chunks are not initialized") } diff --git a/rpc/core/net_test.go b/rpc/core/net_test.go index c971776f3..8612e0c50 100644 --- a/rpc/core/net_test.go +++ b/rpc/core/net_test.go @@ -23,6 +23,7 @@ func TestUnsafeDialSeeds(t *testing.T) { } }) + env := &Environment{} env.Logger = log.TestingLogger() env.P2PPeers = sw @@ -36,7 +37,7 @@ func TestUnsafeDialSeeds(t *testing.T) { } for _, tc := range testCases { - res, err := UnsafeDialSeeds(&rpctypes.Context{}, tc.seeds) + res, err := env.UnsafeDialSeeds(&rpctypes.Context{}, tc.seeds) if tc.isErr { assert.Error(t, err) } else { @@ -62,6 +63,7 @@ func TestUnsafeDialPeers(t *testing.T) { } }) + env := &Environment{} env.Logger = log.TestingLogger() env.P2PPeers = sw @@ -76,7 +78,7 @@ func TestUnsafeDialPeers(t *testing.T) { } for _, tc := range testCases { - res, err := UnsafeDialPeers(&rpctypes.Context{}, tc.peers, tc.persistence, tc.unconditional, tc.private) + res, err := env.UnsafeDialPeers(&rpctypes.Context{}, tc.peers, tc.persistence, tc.unconditional, tc.private) if tc.isErr { assert.Error(t, err) } else { diff --git a/rpc/core/routes.go b/rpc/core/routes.go index cd658889f..508bfa016 100644 --- a/rpc/core/routes.go +++ b/rpc/core/routes.go @@ -6,54 +6,58 @@ import ( // TODO: better system than "unsafe" prefix +type RoutesMap map[string]*rpc.RPCFunc + // Routes is a map of available routes. -var Routes = map[string]*rpc.RPCFunc{ - // subscribe/unsubscribe are reserved for websocket events. - "subscribe": rpc.NewWSRPCFunc(Subscribe, "query"), - "unsubscribe": rpc.NewWSRPCFunc(Unsubscribe, "query"), - "unsubscribe_all": rpc.NewWSRPCFunc(UnsubscribeAll, ""), +func (env *Environment) GetRoutes() RoutesMap { + return RoutesMap{ + // subscribe/unsubscribe are reserved for websocket events. + "subscribe": rpc.NewWSRPCFunc(env.Subscribe, "query"), + "unsubscribe": rpc.NewWSRPCFunc(env.Unsubscribe, "query"), + "unsubscribe_all": rpc.NewWSRPCFunc(env.UnsubscribeAll, ""), - // info API - "health": rpc.NewRPCFunc(Health, ""), - "status": rpc.NewRPCFunc(Status, ""), - "net_info": rpc.NewRPCFunc(NetInfo, ""), - "blockchain": rpc.NewRPCFunc(BlockchainInfo, "minHeight,maxHeight", rpc.Cacheable()), - "genesis": rpc.NewRPCFunc(Genesis, "", rpc.Cacheable()), - "genesis_chunked": rpc.NewRPCFunc(GenesisChunked, "chunk", rpc.Cacheable()), - "block": rpc.NewRPCFunc(Block, "height", rpc.Cacheable("height")), - "block_by_hash": rpc.NewRPCFunc(BlockByHash, "hash", rpc.Cacheable()), - "block_results": rpc.NewRPCFunc(BlockResults, "height", rpc.Cacheable("height")), - "commit": rpc.NewRPCFunc(Commit, "height", rpc.Cacheable("height")), - "header": rpc.NewRPCFunc(Header, "height", rpc.Cacheable("height")), - "header_by_hash": rpc.NewRPCFunc(HeaderByHash, "hash", rpc.Cacheable()), - "check_tx": rpc.NewRPCFunc(CheckTx, "tx"), - "tx": rpc.NewRPCFunc(Tx, "hash,prove", rpc.Cacheable()), - "tx_search": rpc.NewRPCFunc(TxSearch, "query,prove,page,per_page,order_by"), - "block_search": rpc.NewRPCFunc(BlockSearch, "query,page,per_page,order_by"), - "validators": rpc.NewRPCFunc(Validators, "height,page,per_page", rpc.Cacheable("height")), - "dump_consensus_state": rpc.NewRPCFunc(DumpConsensusState, ""), - "consensus_state": rpc.NewRPCFunc(ConsensusState, ""), - "consensus_params": rpc.NewRPCFunc(ConsensusParams, "height", rpc.Cacheable("height")), - "unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxs, "limit"), - "num_unconfirmed_txs": rpc.NewRPCFunc(NumUnconfirmedTxs, ""), + // info AP + "health": rpc.NewRPCFunc(env.Health, ""), + "status": rpc.NewRPCFunc(env.Status, ""), + "net_info": rpc.NewRPCFunc(env.NetInfo, ""), + "blockchain": rpc.NewRPCFunc(env.BlockchainInfo, "minHeight,maxHeight", rpc.Cacheable()), + "genesis": rpc.NewRPCFunc(env.Genesis, "", rpc.Cacheable()), + "genesis_chunked": rpc.NewRPCFunc(env.GenesisChunked, "chunk", rpc.Cacheable()), + "block": rpc.NewRPCFunc(env.Block, "height", rpc.Cacheable("height")), + "block_by_hash": rpc.NewRPCFunc(env.BlockByHash, "hash", rpc.Cacheable()), + "block_results": rpc.NewRPCFunc(env.BlockResults, "height", rpc.Cacheable("height")), + "commit": rpc.NewRPCFunc(env.Commit, "height", rpc.Cacheable("height")), + "header": rpc.NewRPCFunc(env.Header, "height", rpc.Cacheable("height")), + "header_by_hash": rpc.NewRPCFunc(env.HeaderByHash, "hash", rpc.Cacheable()), + "check_tx": rpc.NewRPCFunc(env.CheckTx, "tx"), + "tx": rpc.NewRPCFunc(env.Tx, "hash,prove", rpc.Cacheable()), + "tx_search": rpc.NewRPCFunc(env.TxSearch, "query,prove,page,per_page,order_by"), + "block_search": rpc.NewRPCFunc(env.BlockSearch, "query,page,per_page,order_by"), + "validators": rpc.NewRPCFunc(env.Validators, "height,page,per_page", rpc.Cacheable("height")), + "dump_consensus_state": rpc.NewRPCFunc(env.DumpConsensusState, ""), + "consensus_state": rpc.NewRPCFunc(env.GetConsensusState, ""), + "consensus_params": rpc.NewRPCFunc(env.ConsensusParams, "height", rpc.Cacheable("height")), + "unconfirmed_txs": rpc.NewRPCFunc(env.UnconfirmedTxs, "limit"), + "num_unconfirmed_txs": rpc.NewRPCFunc(env.NumUnconfirmedTxs, ""), - // tx broadcast API - "broadcast_tx_commit": rpc.NewRPCFunc(BroadcastTxCommit, "tx"), - "broadcast_tx_sync": rpc.NewRPCFunc(BroadcastTxSync, "tx"), - "broadcast_tx_async": rpc.NewRPCFunc(BroadcastTxAsync, "tx"), + // tx broadcast API + "broadcast_tx_commit": rpc.NewRPCFunc(env.BroadcastTxCommit, "tx"), + "broadcast_tx_sync": rpc.NewRPCFunc(env.BroadcastTxSync, "tx"), + "broadcast_tx_async": rpc.NewRPCFunc(env.BroadcastTxAsync, "tx"), - // abci API - "abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,prove"), - "abci_info": rpc.NewRPCFunc(ABCIInfo, "", rpc.Cacheable()), + // abci API + "abci_query": rpc.NewRPCFunc(env.ABCIQuery, "path,data,height,prove"), + "abci_info": rpc.NewRPCFunc(env.ABCIInfo, "", rpc.Cacheable()), - // evidence API - "broadcast_evidence": rpc.NewRPCFunc(BroadcastEvidence, "evidence"), + // evidence API + "broadcast_evidence": rpc.NewRPCFunc(env.BroadcastEvidence, "evidence"), + } } // AddUnsafeRoutes adds unsafe routes. -func AddUnsafeRoutes() { +func (env *Environment) AddUnsafeRoutes(routes RoutesMap) { // control API - Routes["dial_seeds"] = rpc.NewRPCFunc(UnsafeDialSeeds, "seeds") - Routes["dial_peers"] = rpc.NewRPCFunc(UnsafeDialPeers, "peers,persistent,unconditional,private") - Routes["unsafe_flush_mempool"] = rpc.NewRPCFunc(UnsafeFlushMempool, "") + routes["dial_seeds"] = rpc.NewRPCFunc(env.UnsafeDialSeeds, "seeds") + routes["dial_peers"] = rpc.NewRPCFunc(env.UnsafeDialPeers, "peers,persistent,unconditional,private") + routes["unsafe_flush_mempool"] = rpc.NewRPCFunc(env.UnsafeFlushMempool, "") } diff --git a/rpc/core/status.go b/rpc/core/status.go index a2a70d95d..2fd9bfe49 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -12,8 +12,8 @@ import ( // Status returns Tendermint status including node info, pubkey, latest block // hash, app hash, block height and time. -// More: https://docs.tendermint.com/main/rpc/#/Info/status -func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { +// More: https://docs.tendermint.com/master/rpc/#/Info/status +func (env *Environment) Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { var ( earliestBlockHeight int64 earliestBlockHash tmbytes.HexBytes @@ -47,7 +47,7 @@ func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { // Return the very last voting power, not the voting power of this validator // during the last block. var votingPower int64 - if val := validatorAtHeight(latestUncommittedHeight()); val != nil { + if val := env.validatorAtHeight(env.latestUncommittedHeight()); val != nil { votingPower = val.VotingPower } @@ -74,12 +74,12 @@ func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { return result, nil } -func validatorAtHeight(h int64) *types.Validator { - vals, err := env.StateStore.LoadValidators(h) +func (env *Environment) validatorAtHeight(h int64) *types.Validator { + valsWithH, err := env.StateStore.LoadValidators(h) if err != nil { return nil } privValAddress := env.PubKey.Address() - _, val := vals.GetByAddress(privValAddress) + _, val := valsWithH.GetByAddress(privValAddress) return val } diff --git a/rpc/core/tx.go b/rpc/core/tx.go index aa14cab13..d7518752f 100644 --- a/rpc/core/tx.go +++ b/rpc/core/tx.go @@ -17,7 +17,7 @@ import ( // transaction is in the mempool, invalidated, or was not sent in the first // place. // More: https://docs.tendermint.com/main/rpc/#/Info/tx -func Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { +func (env *Environment) Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { // if index is disabled, return error if _, ok := env.TxIndexer.(*null.TxIndex); ok { return nil, fmt.Errorf("transaction indexing is disabled") @@ -51,7 +51,7 @@ func Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error // TxSearch allows you to query for multiple transactions results. It returns a // list of transactions (maximum ?per_page entries) and the total count. // More: https://docs.tendermint.com/main/rpc/#/Info/tx_search -func TxSearch( +func (env *Environment) TxSearch( ctx *rpctypes.Context, query string, prove bool, @@ -98,7 +98,7 @@ func TxSearch( // paginate results totalCount := len(results) - perPage := validatePerPage(perPagePtr) + perPage := env.validatePerPage(perPagePtr) page, err := validatePage(pagePtr, perPage, totalCount) if err != nil { diff --git a/rpc/grpc/api.go b/rpc/grpc/api.go index 62c6b66c1..41597dfd9 100644 --- a/rpc/grpc/api.go +++ b/rpc/grpc/api.go @@ -9,6 +9,7 @@ import ( ) type broadcastAPI struct { + env *core.Environment } func (bapi *broadcastAPI) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { @@ -19,7 +20,7 @@ func (bapi *broadcastAPI) Ping(ctx context.Context, req *RequestPing) (*Response func (bapi *broadcastAPI) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { // NOTE: there's no way to get client's remote address // see https://stackoverflow.com/questions/33684570/session-and-remote-ip-address-in-grpc-go - res, err := core.BroadcastTxCommit(&rpctypes.Context{}, req.Tx) + res, err := bapi.env.BroadcastTxCommit(&rpctypes.Context{}, req.Tx) if err != nil { return nil, err } diff --git a/rpc/grpc/client_server.go b/rpc/grpc/client_server.go index 387a66213..57380c2c5 100644 --- a/rpc/grpc/client_server.go +++ b/rpc/grpc/client_server.go @@ -7,6 +7,7 @@ import ( "google.golang.org/grpc" tmnet "github.com/tendermint/tendermint/libs/net" + "github.com/tendermint/tendermint/rpc/core" ) // Config is an gRPC server configuration. @@ -17,16 +18,16 @@ type Config struct { // StartGRPCServer starts a new gRPC BroadcastAPIServer using the given // net.Listener. // NOTE: This function blocks - you may want to call it in a go-routine. -func StartGRPCServer(ln net.Listener) error { +func StartGRPCServer(env *core.Environment, ln net.Listener) error { grpcServer := grpc.NewServer() - RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{}) + RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{env: env}) return grpcServer.Serve(ln) } // StartGRPCClient dials the gRPC server using protoAddr and returns a new // BroadcastAPIClient. func StartGRPCClient(protoAddr string) BroadcastAPIClient { - //nolint:staticcheck // SA1019 Existing use of deprecated but supported dial option. + //nolint: staticcheck // SA1019 Existing use of deprecated but supported dial option. conn, err := grpc.Dial(protoAddr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) if err != nil { panic(err) diff --git a/rpc/jsonrpc/jsonrpc_test.go b/rpc/jsonrpc/jsonrpc_test.go index c322dfcea..d6d98e5fc 100644 --- a/rpc/jsonrpc/jsonrpc_test.go +++ b/rpc/jsonrpc/jsonrpc_test.go @@ -138,7 +138,7 @@ func setup() { wm.SetLogger(tcpLogger) mux.HandleFunc(websocketEndpoint, wm.WebsocketHandler) config := server.DefaultConfig() - listener1, err := server.Listen(tcpAddr, config) + listener1, err := server.Listen(tcpAddr, config.MaxOpenConnections) if err != nil { panic(err) } @@ -154,7 +154,7 @@ func setup() { wm = server.NewWebsocketManager(Routes) wm.SetLogger(unixLogger) mux2.HandleFunc(websocketEndpoint, wm.WebsocketHandler) - listener2, err := server.Listen(unixAddr, config) + listener2, err := server.Listen(unixAddr, config.MaxOpenConnections) if err != nil { panic(err) } diff --git a/rpc/jsonrpc/server/http_server.go b/rpc/jsonrpc/server/http_server.go index 6dd772e3d..526692674 100644 --- a/rpc/jsonrpc/server/http_server.go +++ b/rpc/jsonrpc/server/http_server.go @@ -258,7 +258,7 @@ func (h maxBytesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Listen starts a new net.Listener on the given address. // It returns an error if the address is invalid or the call to Listen() fails. -func Listen(addr string, config *Config) (listener net.Listener, err error) { +func Listen(addr string, maxOpenConnections int) (listener net.Listener, err error) { parts := strings.SplitN(addr, "://", 2) if len(parts) != 2 { return nil, fmt.Errorf( @@ -271,8 +271,8 @@ func Listen(addr string, config *Config) (listener net.Listener, err error) { if err != nil { return nil, fmt.Errorf("failed to listen on %v: %v", addr, err) } - if config.MaxOpenConnections > 0 { - listener = netutil.LimitListener(listener, config.MaxOpenConnections) + if maxOpenConnections > 0 { + listener = netutil.LimitListener(listener, maxOpenConnections) } return listener, nil diff --git a/rpc/jsonrpc/server/http_server_test.go b/rpc/jsonrpc/server/http_server_test.go index 72e873207..6d4ab385a 100644 --- a/rpc/jsonrpc/server/http_server_test.go +++ b/rpc/jsonrpc/server/http_server_test.go @@ -39,8 +39,7 @@ func TestMaxOpenConnections(t *testing.T) { fmt.Fprint(w, "some body") }) config := DefaultConfig() - config.MaxOpenConnections = max - l, err := Listen("tcp://127.0.0.1:0", config) + l, err := Listen("tcp://127.0.0.1:0", max) require.NoError(t, err) defer l.Close() go Serve(l, mux, log.TestingLogger(), config) //nolint:errcheck // ignore for tests diff --git a/rpc/jsonrpc/test/main.go b/rpc/jsonrpc/test/main.go index fe3ffb769..72583a43e 100644 --- a/rpc/jsonrpc/test/main.go +++ b/rpc/jsonrpc/test/main.go @@ -34,7 +34,7 @@ func main() { rpcserver.RegisterRPCFuncs(mux, routes, logger) config := rpcserver.DefaultConfig() - listener, err := rpcserver.Listen("tcp://127.0.0.1:8008", config) + listener, err := rpcserver.Listen("tcp://127.0.0.1:8008", config.MaxOpenConnections) if err != nil { tmos.Exit(err.Error()) } diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index 510cf39d1..13c0c456a 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -911,6 +911,41 @@ paths: application/json: schema: $ref: "#/components/schemas/ErrorResponse" + /genesis_chunked: + get: + summary: Get Genesis in multiple chunks + operationId: genesis_chunked + tags: + - Info + description: | + Get genesis document in multiple chunks to make it easier to iterate + through larger genesis structures. Each chunk is produced by converting + the genesis document to JSON and then splitting the resulting payload + into 16MB blocks, and then Base64-encoding each block. + + Upon success, the `Cache-Control` header will be set with the default + maximum age. + parameters: + - in: query + name: chunk + description: Sequence number of the chunk to download. + schema: + type: integer + default: 0 + example: 1 + responses: + "200": + description: Genesis chunk response. + content: + application/json: + schema: + $ref: "#/components/schemas/GenesisChunkedResponse" + "500": + description: Error + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" /dump_consensus_state: get: summary: Get consensus state @@ -1975,6 +2010,35 @@ components: properties: {} type: object + GenesisChunkedResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: string + example: "2.0" + id: + type: integer + example: 0 + result: + required: + - "chunk" + - "total" + - "data" + properties: + chunk: + type: integer + example: 0 + total: + type: integer + example: 1 + data: + type: string + example: "Z2VuZXNpcwo=" + DumpConsensusResponse: type: object required: diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 1b88dec51..14be9e22f 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -170,7 +170,7 @@ func NewTendermint(app abci.Application, opts *Options) *nm.Node { } node, err := nm.NewNode(config, pv, nodeKey, papp, nm.DefaultGenesisDocProviderFunc(config), - nm.DefaultDBProvider, + cfg.DefaultDBProvider, nm.DefaultMetricsProvider(config.Instrumentation), logger) if err != nil { diff --git a/spec/abci/abci++_app_requirements.md b/spec/abci/abci++_app_requirements.md index e3f4877d9..4d2dc7011 100644 --- a/spec/abci/abci++_app_requirements.md +++ b/spec/abci/abci++_app_requirements.md @@ -518,7 +518,7 @@ all full nodes have the same value at a given height. #### List of Parameters -These are the current consensus parameters (as of v0.36.x): +These are the current consensus parameters (as of v0.37.x): 1. [BlockParams.MaxBytes](#blockparamsmaxbytes) 2. [BlockParams.MaxGas](#blockparamsmaxgas) diff --git a/state/execution.go b/state/execution.go index 234d81c0f..4ba630db7 100644 --- a/state/execution.go +++ b/state/execution.go @@ -93,7 +93,7 @@ func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher) // CreateProposalBlock calls state.MakeBlock with evidence from the evpool // and txs from the mempool. The max bytes must be big enough to fit the commit. -// Up to 1/10th of the block space is allcoated for maximum sized evidence. +// Up to 1/10th of the block space is allocated for maximum sized evidence. // The rest is given to txs, up to the max gas. // // Contract: application will not return more bytes than are sent over the wire. diff --git a/state/execution_test.go b/state/execution_test.go index 8e1c69d63..53a7c3f31 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -549,7 +549,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) { assert.Equal(t, pubkey, event.ValidatorUpdates[0].PubKey) assert.EqualValues(t, 10, event.ValidatorUpdates[0].VotingPower) } - case <-updatesSub.Cancelled(): + case <-updatesSub.Canceled(): t.Fatalf("updatesSub was canceled (reason: %v)", updatesSub.Err()) case <-time.After(1 * time.Second): t.Fatal("Did not receive EventValidatorSetUpdates within 1 sec.") diff --git a/state/indexer/block/indexer.go b/state/indexer/block/indexer.go new file mode 100644 index 000000000..1e3b7e923 --- /dev/null +++ b/state/indexer/block/indexer.go @@ -0,0 +1,47 @@ +package block + +import ( + "errors" + "fmt" + + dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/state/indexer" + blockidxkv "github.com/tendermint/tendermint/state/indexer/block/kv" + blockidxnull "github.com/tendermint/tendermint/state/indexer/block/null" + "github.com/tendermint/tendermint/state/indexer/sink/psql" + "github.com/tendermint/tendermint/state/txindex" + "github.com/tendermint/tendermint/state/txindex/kv" + "github.com/tendermint/tendermint/state/txindex/null" +) + +// EventSinksFromConfig constructs a slice of indexer.EventSink using the provided +// configuration. +// +//nolint:lll +func IndexerFromConfig(cfg *config.Config, dbProvider config.DBProvider, chainID string) (txindex.TxIndexer, indexer.BlockIndexer, error) { + switch cfg.TxIndex.Indexer { + case "kv": + store, err := dbProvider(&config.DBContext{ID: "tx_index", Config: cfg}) + if err != nil { + return nil, nil, err + } + + return kv.NewTxIndex(store), blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events"))), nil + + case "psql": + conn := cfg.TxIndex.PsqlConn + if conn == "" { + return nil, nil, errors.New("the psql connection settings cannot be empty") + } + es, err := psql.NewEventSink(cfg.TxIndex.PsqlConn, chainID) + if err != nil { + return nil, nil, fmt.Errorf("creating psql indexer: %w", err) + } + return es.TxIndexer(), es.BlockIndexer(), nil + + default: + return &null.TxIndex{}, &blockidxnull.BlockerIndexer{}, nil + } +} diff --git a/state/mocks/block_store.go b/state/mocks/block_store.go index d449f6711..ed921f4dc 100644 --- a/state/mocks/block_store.go +++ b/state/mocks/block_store.go @@ -28,6 +28,20 @@ func (_m *BlockStore) Base() int64 { return r0 } +// Close provides a mock function with given fields: +func (_m *BlockStore) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + // DeleteLatestBlock provides a mock function with given fields: func (_m *BlockStore) DeleteLatestBlock() error { ret := _m.Called() diff --git a/state/services.go b/state/services.go index 0473b43b2..569242cd4 100644 --- a/state/services.go +++ b/state/services.go @@ -36,6 +36,8 @@ type BlockStore interface { LoadSeenCommit(height int64) *types.Commit DeleteLatestBlock() error + + Close() error } //----------------------------------------------------------------------------- diff --git a/state/txindex/indexer_service.go b/state/txindex/indexer_service.go index 0e8fbb9c9..7c2738382 100644 --- a/state/txindex/indexer_service.go +++ b/state/txindex/indexer_service.go @@ -59,54 +59,59 @@ func (is *IndexerService) OnStart() error { go func() { for { - msg := <-blockHeadersSub.Out() - eventDataHeader := msg.Data().(types.EventDataNewBlockHeader) - height := eventDataHeader.Header.Height - batch := NewBatch(eventDataHeader.NumTxs) + select { + case <-blockHeadersSub.Canceled(): + return + case msg := <-blockHeadersSub.Out(): - for i := int64(0); i < eventDataHeader.NumTxs; i++ { - msg2 := <-txsSub.Out() - txResult := msg2.Data().(types.EventDataTx).TxResult + eventDataHeader := msg.Data().(types.EventDataNewBlockHeader) + height := eventDataHeader.Header.Height + batch := NewBatch(eventDataHeader.NumTxs) - if err = batch.Add(&txResult); err != nil { - is.Logger.Error( - "failed to add tx to batch", - "height", height, - "index", txResult.Index, - "err", err, - ) + for i := int64(0); i < eventDataHeader.NumTxs; i++ { + msg2 := <-txsSub.Out() + txResult := msg2.Data().(types.EventDataTx).TxResult + if err = batch.Add(&txResult); err != nil { + is.Logger.Error( + "failed to add tx to batch", + "height", height, + "index", txResult.Index, + "err", err, + ) + + if is.terminateOnError { + if err := is.Stop(); err != nil { + is.Logger.Error("failed to stop", "err", err) + } + return + } + } + } + + if err := is.blockIdxr.Index(eventDataHeader); err != nil { + is.Logger.Error("failed to index block", "height", height, "err", err) if is.terminateOnError { if err := is.Stop(); err != nil { is.Logger.Error("failed to stop", "err", err) } return } + } else { + is.Logger.Info("indexed block exents", "height", height) } - } - if err := is.blockIdxr.Index(eventDataHeader); err != nil { - is.Logger.Error("failed to index block", "height", height, "err", err) - if is.terminateOnError { - if err := is.Stop(); err != nil { - is.Logger.Error("failed to stop", "err", err) + if err = is.txIdxr.AddBatch(batch); err != nil { + is.Logger.Error("failed to index block txs", "height", height, "err", err) + if is.terminateOnError { + if err := is.Stop(); err != nil { + is.Logger.Error("failed to stop", "err", err) + } + return } - return + } else { + is.Logger.Debug("indexed transactions", "height", height, "num_txs", eventDataHeader.NumTxs) } - } else { - is.Logger.Info("indexed block exents", "height", height) - } - - if err = is.txIdxr.AddBatch(batch); err != nil { - is.Logger.Error("failed to index block txs", "height", height, "err", err) - if is.terminateOnError { - if err := is.Stop(); err != nil { - is.Logger.Error("failed to stop", "err", err) - } - return - } - } else { - is.Logger.Debug("indexed transactions", "height", height, "num_txs", eventDataHeader.NumTxs) } } }() diff --git a/test/e2e/Makefile b/test/e2e/Makefile index 8bafe7ca9..7a82f8236 100644 --- a/test/e2e/Makefile +++ b/test/e2e/Makefile @@ -1,7 +1,7 @@ all: docker generator runner docker: - docker build --tag tendermint/e2e-node -f docker/Dockerfile ../.. + docker build --tag tendermint/e2e-node --tag tendermint/e2e-node:local-version -f docker/Dockerfile ../.. # We need to build support for database backends into the app in # order to build a binary with a Tendermint node in it (for built-in diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index c145e03d1..505aab716 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -88,7 +88,7 @@ func DefaultConfig(dir string) *Config { } // NewApplication creates the application. -func NewApplication(cfg *Config) (abci.Application, error) { +func NewApplication(cfg *Config) (*Application, error) { state, err := NewState(cfg.Dir, cfg.PersistInterval) if err != nil { return nil, err diff --git a/test/e2e/app/sync_app.go b/test/e2e/app/sync_app.go deleted file mode 100644 index 8df8da7a3..000000000 --- a/test/e2e/app/sync_app.go +++ /dev/null @@ -1,111 +0,0 @@ -package app - -import ( - "sync" - - abci "github.com/tendermint/tendermint/abci/types" -) - -// SyncApplication wraps an Application, managing its own synchronization. This -// allows it to be called from an unsynchronized local client, as it is -// implemented in a thread-safe way. -type SyncApplication struct { - mtx sync.RWMutex - app *Application -} - -var _ abci.Application = (*SyncApplication)(nil) - -func NewSyncApplication(cfg *Config) (abci.Application, error) { - app, err := NewApplication(cfg) - if err != nil { - return nil, err - } - return &SyncApplication{ - app: app.(*Application), - }, nil -} - -func (app *SyncApplication) Info(req abci.RequestInfo) abci.ResponseInfo { - app.mtx.RLock() - defer app.mtx.RUnlock() - return app.app.Info(req) -} - -func (app *SyncApplication) InitChain(req abci.RequestInitChain) abci.ResponseInitChain { - app.mtx.Lock() - defer app.mtx.Unlock() - return app.app.InitChain(req) -} - -func (app *SyncApplication) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { - app.mtx.RLock() - defer app.mtx.RUnlock() - return app.app.CheckTx(req) -} - -func (app *SyncApplication) PrepareProposal(req abci.RequestPrepareProposal) abci.ResponsePrepareProposal { - // app.app.PrepareProposal does not modify state - app.mtx.RLock() - defer app.mtx.RUnlock() - return app.app.PrepareProposal(req) -} - -func (app *SyncApplication) ProcessProposal(req abci.RequestProcessProposal) abci.ResponseProcessProposal { - // app.app.ProcessProposal does not modify state - app.mtx.RLock() - defer app.mtx.RUnlock() - return app.app.ProcessProposal(req) -} - -func (app *SyncApplication) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { - app.mtx.Lock() - defer app.mtx.Unlock() - return app.app.DeliverTx(req) -} - -func (app *SyncApplication) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock { - app.mtx.Lock() - defer app.mtx.Unlock() - return app.app.BeginBlock(req) -} - -func (app *SyncApplication) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { - app.mtx.Lock() - defer app.mtx.Unlock() - return app.app.EndBlock(req) -} - -func (app *SyncApplication) Commit() abci.ResponseCommit { - app.mtx.Lock() - defer app.mtx.Unlock() - return app.app.Commit() -} - -func (app *SyncApplication) Query(req abci.RequestQuery) abci.ResponseQuery { - app.mtx.RLock() - defer app.mtx.RUnlock() - return app.app.Query(req) -} - -func (app *SyncApplication) ApplySnapshotChunk(req abci.RequestApplySnapshotChunk) abci.ResponseApplySnapshotChunk { - app.mtx.Lock() - defer app.mtx.Unlock() - return app.app.ApplySnapshotChunk(req) -} - -func (app *SyncApplication) ListSnapshots(req abci.RequestListSnapshots) abci.ResponseListSnapshots { - // Calls app.snapshots.List(), which is thread-safe. - return app.app.ListSnapshots(req) -} - -func (app *SyncApplication) LoadSnapshotChunk(req abci.RequestLoadSnapshotChunk) abci.ResponseLoadSnapshotChunk { - // Calls app.snapshots.LoadChunk, which is thread-safe. - return app.app.LoadSnapshotChunk(req) -} - -func (app *SyncApplication) OfferSnapshot(req abci.RequestOfferSnapshot) abci.ResponseOfferSnapshot { - app.mtx.Lock() - defer app.mtx.Unlock() - return app.app.OfferSnapshot(req) -} diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index a0c65305f..e9806afb5 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -22,12 +22,14 @@ var ( }, "validators": {"genesis", "initchain"}, } - + nodeVersions = weightedChoice{ + "": 2, + } // The following specify randomly chosen values for testnet nodes. nodeDatabases = uniformChoice{"goleveldb", "cleveldb", "rocksdb", "boltdb", "badgerdb"} ipv6 = uniformChoice{false, true} // FIXME: grpc disabled due to https://github.com/tendermint/tendermint/issues/5439 - nodeABCIProtocols = uniformChoice{"unix", "tcp", "builtin"} // "grpc" + nodeABCIProtocols = uniformChoice{"unix", "tcp", "builtin", "builtin_unsync"} // "grpc" nodePrivvalProtocols = uniformChoice{"file", "unix", "tcp"} nodeBlockSyncs = uniformChoice{"v0"} // "v2" nodeStateSyncs = uniformChoice{false, true} @@ -50,7 +52,10 @@ var ( ) // Generate generates random testnets using the given RNG. -func Generate(r *rand.Rand) ([]e2e.Manifest, error) { +func Generate(r *rand.Rand, multiversion string) ([]e2e.Manifest, error) { + if multiversion != "" { + nodeVersions[multiversion] = 1 + } manifests := []e2e.Manifest{} for _, opt := range combinations(testnetCombinations) { manifest, err := generateTestnet(r, opt) @@ -105,7 +110,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er // First we generate seed nodes, starting at the initial height. for i := 1; i <= numSeeds; i++ { manifest.Nodes[fmt.Sprintf("seed%02d", i)] = generateNode( - r, e2e.ModeSeed, false, 0, manifest.InitialHeight, false) + r, e2e.ModeSeed, 0, manifest.InitialHeight, false) } // Next, we generate validators. We make sure a BFT quorum of validators start @@ -120,12 +125,8 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er nextStartAt += 5 } name := fmt.Sprintf("validator%02d", i) - syncApp := false - if manifest.ABCIProtocol == string(e2e.ProtocolBuiltin) { - syncApp = r.Intn(100) >= 50 - } manifest.Nodes[name] = generateNode( - r, e2e.ModeValidator, syncApp, startAt, manifest.InitialHeight, i <= 2) + r, e2e.ModeValidator, startAt, manifest.InitialHeight, i <= 2) if startAt == 0 { (*manifest.Validators)[name] = int64(30 + r.Intn(71)) @@ -153,12 +154,8 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er startAt = nextStartAt nextStartAt += 5 } - syncApp := false - if manifest.ABCIProtocol == string(e2e.ProtocolBuiltin) { - syncApp = r.Intn(100) >= 50 - } manifest.Nodes[fmt.Sprintf("full%02d", i)] = generateNode( - r, e2e.ModeFull, syncApp, startAt, manifest.InitialHeight, false) + r, e2e.ModeFull, startAt, manifest.InitialHeight, false) } // We now set up peer discovery for nodes. Seed nodes are fully meshed with @@ -221,11 +218,11 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er // here, since we need to know the overall network topology and startup // sequencing. func generateNode( - r *rand.Rand, mode e2e.Mode, syncApp bool, startAt int64, initialHeight int64, forceArchive bool, + r *rand.Rand, mode e2e.Mode, startAt int64, initialHeight int64, forceArchive bool, ) *e2e.ManifestNode { node := e2e.ManifestNode{ + Version: nodeVersions.Choose(r).(string), Mode: string(mode), - SyncApp: syncApp, StartAt: startAt, Database: nodeDatabases.Choose(r).(string), PrivvalProtocol: nodePrivvalProtocols.Choose(r).(string), diff --git a/test/e2e/generator/main.go b/test/e2e/generator/main.go index d5b6ab639..16bcc1b58 100644 --- a/test/e2e/generator/main.go +++ b/test/e2e/generator/main.go @@ -44,25 +44,31 @@ func NewCLI() *CLI { if err != nil { return err } - return cli.generate(dir, groups) + multiversion, err := cmd.Flags().GetString("multi-version") + if err != nil { + return err + } + return cli.generate(dir, groups, multiversion) }, } cli.root.PersistentFlags().StringP("dir", "d", "", "Output directory for manifests") _ = cli.root.MarkPersistentFlagRequired("dir") + cli.root.PersistentFlags().StringP("multi-version", "m", "", "Include multi-version testing."+ + "If multi-version is not specified, then only the current Tendermint version will be used in generated testnets.") cli.root.PersistentFlags().IntP("groups", "g", 0, "Number of groups") return cli } // generate generates manifests in a directory. -func (cli *CLI) generate(dir string, groups int) error { +func (cli *CLI) generate(dir string, groups int, multiversion string) error { err := os.MkdirAll(dir, 0o755) if err != nil { return err } - manifests, err := Generate(rand.New(rand.NewSource(randomSeed))) //nolint:gosec + manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), multiversion) //nolint:gosec if err != nil { return err } diff --git a/test/e2e/generator/random.go b/test/e2e/generator/random.go index f21502118..4312eb30d 100644 --- a/test/e2e/generator/random.go +++ b/test/e2e/generator/random.go @@ -83,3 +83,25 @@ func (usc uniformSetChoice) Choose(r *rand.Rand) []string { } return choices } + +// weightedChoice chooses a single random key from a map of keys and weights. +type weightedChoice map[interface{}]uint + +func (wc weightedChoice) Choose(r *rand.Rand) interface{} { + total := 0 + choices := make([]interface{}, 0, len(wc)) + for choice, weight := range wc { + total += int(weight) + choices = append(choices, choice) + } + + rem := r.Intn(total) + for _, choice := range choices { + rem -= int(wc[choice]) + if rem <= 0 { + return choice + } + } + + return nil +} diff --git a/test/e2e/networks/ci.toml b/test/e2e/networks/ci.toml index 67e088554..804012415 100644 --- a/test/e2e/networks/ci.toml +++ b/test/e2e/networks/ci.toml @@ -8,6 +8,8 @@ initial_state = { initial01 = "a", initial02 = "b", initial03 = "c" } prepare_proposal_delay = "100ms" process_proposal_delay = "100ms" check_tx_delay = "0ms" +# The most common case (e.g. Cosmos SDK-based chains). +abci_protocol = "builtin" [validators] validator01 = 100 @@ -41,7 +43,6 @@ perturb = ["disconnect"] [node.validator02] seeds = ["seed01"] database = "boltdb" -abci_protocol = "tcp" privval_protocol = "tcp" persist_interval = 0 perturb = ["restart"] @@ -49,8 +50,6 @@ perturb = ["restart"] [node.validator03] seeds = ["seed01"] database = "badgerdb" -# FIXME: should be grpc, disabled due to https://github.com/tendermint/tendermint/issues/5439 -#abci_protocol = "grpc" privval_protocol = "unix" persist_interval = 3 retain_blocks = 10 @@ -59,8 +58,6 @@ perturb = ["kill"] [node.validator04] persistent_peers = ["validator01"] database = "rocksdb" -abci_protocol = "builtin" -sync_app = true perturb = ["pause"] [node.validator05] @@ -69,8 +66,6 @@ start_at = 1005 # Becomes part of the validator set at 1010 persistent_peers = ["validator01", "full01"] database = "cleveldb" mempool_version = "v1" -# FIXME: should be grpc, disabled due to https://github.com/tendermint/tendermint/issues/5439 -#abci_protocol = "grpc" privval_protocol = "tcp" perturb = ["kill", "pause", "disconnect", "restart"] diff --git a/test/e2e/networks/simple.toml b/test/e2e/networks/simple.toml index 05cda1819..96b81f79f 100644 --- a/test/e2e/networks/simple.toml +++ b/test/e2e/networks/simple.toml @@ -2,4 +2,3 @@ [node.validator02] [node.validator03] [node.validator04] - diff --git a/test/e2e/node/config.go b/test/e2e/node/config.go index a677ab12e..9faa9fe2d 100644 --- a/test/e2e/node/config.go +++ b/test/e2e/node/config.go @@ -7,7 +7,6 @@ import ( "github.com/BurntSushi/toml" "github.com/tendermint/tendermint/test/e2e/app" - e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) // Config is the application configuration. @@ -17,7 +16,6 @@ type Config struct { Protocol string `toml:"protocol"` Dir string `toml:"dir"` Mode string `toml:"mode"` - SyncApp bool `toml:"sync_app"` PersistInterval uint64 `toml:"persist_interval"` SnapshotInterval uint64 `toml:"snapshot_interval"` RetainBlocks uint64 `toml:"retain_blocks"` @@ -62,12 +60,8 @@ func (cfg Config) Validate() error { switch { case cfg.ChainID == "": return errors.New("chain_id parameter is required") - case cfg.Listen == "" && cfg.Protocol != "builtin": + case cfg.Listen == "" && cfg.Protocol != "builtin" && cfg.Protocol != "builtin_unsync": return errors.New("listen parameter is required") - case cfg.SyncApp && cfg.Protocol != string(e2e.ProtocolBuiltin): - return errors.New("sync_app parameter is only relevant for builtin applications") - case cfg.SyncApp && cfg.Mode != string(e2e.ModeFull) && cfg.Mode != string(e2e.ModeValidator): - return errors.New("sync_app parameter is only relevant to full nodes and validators") default: return nil } diff --git a/test/e2e/node/main.go b/test/e2e/node/main.go index c18878aad..232be0cab 100644 --- a/test/e2e/node/main.go +++ b/test/e2e/node/main.go @@ -62,7 +62,7 @@ func run(configFile string) error { if err = startSigner(cfg); err != nil { return err } - if cfg.Protocol == "builtin" { + if cfg.Protocol == "builtin" || cfg.Protocol == "builtin_unsync" { time.Sleep(1 * time.Second) } } @@ -71,7 +71,7 @@ func run(configFile string) error { switch cfg.Protocol { case "socket", "grpc": err = startApp(cfg) - case "builtin": + case "builtin", "builtin_unsync": if cfg.Mode == string(e2e.ModeLight) { err = startLightClient(cfg) } else { @@ -113,22 +113,9 @@ func startApp(cfg *Config) error { // // FIXME There is no way to simply load the configuration from a file, so we need to pull in Viper. func startNode(cfg *Config) error { - var cc proxy.ClientCreator - - if cfg.SyncApp { - app, err := app.NewSyncApplication(cfg.App()) - if err != nil { - return err - } - cc = proxy.NewUnsyncLocalClientCreator(app) - logger.Info("Using synchronized app with unsynchronized local client") - } else { - app, err := app.NewApplication(cfg.App()) - if err != nil { - return err - } - cc = proxy.NewLocalClientCreator(app) - logger.Info("Using regular app with synchronized (regular) local client") + app, err := app.NewApplication(cfg.App()) + if err != nil { + return err } tmcfg, nodeLogger, nodeKey, err := setupNode() @@ -136,12 +123,21 @@ func startNode(cfg *Config) error { return fmt.Errorf("failed to setup config: %w", err) } + var clientCreator proxy.ClientCreator + if cfg.Protocol == string(e2e.ProtocolBuiltinUnsync) { + clientCreator = proxy.NewUnsyncLocalClientCreator(app) + nodeLogger.Info("Using unsynchronized local client creator") + } else { + clientCreator = proxy.NewLocalClientCreator(app) + nodeLogger.Info("Using default (synchronized) local client creator") + } + n, err := node.NewNode(tmcfg, privval.LoadOrGenFilePV(tmcfg.PrivValidatorKeyFile(), tmcfg.PrivValidatorStateFile()), nodeKey, - cc, + clientCreator, node.DefaultGenesisDocProviderFunc(tmcfg), - node.DefaultDBProvider, + config.DefaultDBProvider, node.DefaultMetricsProvider(tmcfg.Instrumentation), nodeLogger, ) @@ -157,8 +153,8 @@ func startLightClient(cfg *Config) error { return err } - dbContext := &node.DBContext{ID: "light", Config: tmcfg} - lightDB, err := node.DefaultDBProvider(dbContext) + dbContext := &config.DBContext{ID: "light", Config: tmcfg} + lightDB, err := config.DefaultDBProvider(dbContext) if err != nil { return err } diff --git a/test/e2e/pkg/infra/docker/docker.go b/test/e2e/pkg/infra/docker/docker.go index bc33f8ab5..7145c14e2 100644 --- a/test/e2e/pkg/infra/docker/docker.go +++ b/test/e2e/pkg/infra/docker/docker.go @@ -28,7 +28,7 @@ func (p *Provider) Setup() error { } //nolint: gosec // G306: Expect WriteFile permissions to be 0600 or less - err = os.WriteFile(filepath.Join(p.Testnet.Dir, "docker-compose.yml"), compose, 0644) + err = os.WriteFile(filepath.Join(p.Testnet.Dir, "docker-compose.yml"), compose, 0o644) if err != nil { return err } @@ -78,9 +78,12 @@ services: labels: e2e: true container_name: {{ .Name }} - image: tendermint/e2e-node + image: tendermint/e2e-node:{{ .Version }} {{- if eq .ABCIProtocol "builtin" }} entrypoint: /usr/bin/entrypoint-builtin +{{- else }}{{ if eq .ABCIProtocol "builtin_unsync" }} + entrypoint: /usr/bin/entrypoint-builtin +{{- end }} {{- end }} init: true ports: diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index a91f21c63..5a88fb990 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -57,9 +57,15 @@ type Manifest struct { Evidence int `toml:"evidence"` // ABCIProtocol specifies the protocol used to communicate with the ABCI - // application: "unix", "tcp", "grpc", or "builtin". Defaults to builtin. - // builtin will build a complete Tendermint node into the application and - // launch it instead of launching a separate Tendermint process. + // application: "unix", "tcp", "grpc", "builtin" or "builtin_unsync". + // + // Defaults to "builtin". "builtin" will build a complete Tendermint node + // into the application and launch it instead of launching a separate + // Tendermint process. + // + // "builtin_unsync" is basically the same as "builtin", except that it uses + // an "unsynchronized" local client creator, which attempts to replicate the + // same concurrency model locally as the socket client. ABCIProtocol string `toml:"abci_protocol"` // Add artificial delays to each of the main ABCI calls to mimic computation time @@ -68,6 +74,10 @@ type Manifest struct { ProcessProposalDelay time.Duration `toml:"process_proposal_delay"` CheckTxDelay time.Duration `toml:"check_tx_delay"` // TODO: add vote extension and finalize block delay (@cmwaters) + + LoadTxSizeBytes int `toml:"load_tx_size_bytes"` + LoadTxBatchSize int `toml:"load_tx_batch_size"` + LoadTxConnections int `toml:"load_tx_connections"` } // ManifestNode represents a node in a testnet manifest. @@ -77,14 +87,12 @@ type ManifestNode struct { // is generated), and seed nodes run in seed mode with the PEX reactor enabled. Mode string `toml:"mode"` - // SyncApp specifies whether this node should use a synchronized application - // with an unsynchronized local client. By default this is `false`, meaning - // that the node will run an unsynchronized application with a synchronized - // local client. - // - // Only applies to validators and full nodes where their ABCI protocol is - // "builtin". - SyncApp bool `toml:"sync_app"` + // Version specifies which version of Tendermint this node is. Specifying different + // versions for different nodes allows for testing the interaction of different + // node's compatibility. Note that in order to use a node at a particular version, + // there must be a docker image of the test app tagged with this version present + // on the machine where the test is being run. + Version string `toml:"version"` // Seeds is the list of node names to use as P2P seed nodes. Defaults to none. Seeds []string `toml:"seeds"` @@ -145,6 +153,11 @@ type ManifestNode struct { // pause: temporarily pauses (freezes) the node // restart: restarts the node, shutting it down with SIGTERM Perturb []string `toml:"perturb"` + + // SendNoLoad determines if the e2e test should send load to this node. + // It defaults to false so unless the configured, the node will + // receive load. + SendNoLoad bool `toml:"send_no_load"` } // Save saves the testnet manifest to a file. diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index 65fa53119..508b78953 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -22,6 +22,10 @@ import ( const ( randomSeed int64 = 2308084734268 proxyPortFirst uint32 = 5701 + + defaultBatchSize = 2 + defaultConnections = 1 + defaultTxSizeBytes = 1024 ) type ( @@ -36,11 +40,12 @@ const ( ModeLight Mode = "light" ModeSeed Mode = "seed" - ProtocolBuiltin Protocol = "builtin" - ProtocolFile Protocol = "file" - ProtocolGRPC Protocol = "grpc" - ProtocolTCP Protocol = "tcp" - ProtocolUNIX Protocol = "unix" + ProtocolBuiltin Protocol = "builtin" + ProtocolBuiltinUnsync Protocol = "builtin_unsync" + ProtocolFile Protocol = "file" + ProtocolGRPC Protocol = "grpc" + ProtocolTCP Protocol = "tcp" + ProtocolUNIX Protocol = "unix" PerturbationDisconnect Perturbation = "disconnect" PerturbationKill Perturbation = "kill" @@ -64,6 +69,9 @@ type Testnet struct { Nodes []*Node KeyType string Evidence int + LoadTxSizeBytes int + LoadTxBatchSize int + LoadTxConnections int ABCIProtocol string PrepareProposalDelay time.Duration ProcessProposalDelay time.Duration @@ -73,9 +81,9 @@ type Testnet struct { // Node represents a Tendermint node in a testnet. type Node struct { Name string + Version string Testnet *Testnet Mode Mode - SyncApp bool // Should we use a synchronized app with an unsynchronized local client? PrivvalKey crypto.PrivKey NodeKey crypto.PrivKey IP net.IP @@ -93,6 +101,9 @@ type Node struct { Seeds []*Node PersistentPeers []*Node Perturbations []Perturbation + + // SendNoLoad determines if the e2e test should send load to this node. + SendNoLoad bool } // LoadTestnet loads a testnet from a manifest file, using the filename to @@ -119,6 +130,9 @@ func LoadTestnet(manifest Manifest, fname string, ifd InfrastructureData) (*Test ValidatorUpdates: map[int64]map[*Node]int64{}, Nodes: []*Node{}, Evidence: manifest.Evidence, + LoadTxSizeBytes: manifest.LoadTxSizeBytes, + LoadTxBatchSize: manifest.LoadTxBatchSize, + LoadTxConnections: manifest.LoadTxConnections, ABCIProtocol: manifest.ABCIProtocol, PrepareProposalDelay: manifest.PrepareProposalDelay, ProcessProposalDelay: manifest.ProcessProposalDelay, @@ -133,6 +147,15 @@ func LoadTestnet(manifest Manifest, fname string, ifd InfrastructureData) (*Test if testnet.ABCIProtocol == "" { testnet.ABCIProtocol = string(ProtocolBuiltin) } + if testnet.LoadTxConnections == 0 { + testnet.LoadTxConnections = defaultConnections + } + if testnet.LoadTxBatchSize == 0 { + testnet.LoadTxBatchSize = defaultBatchSize + } + if testnet.LoadTxSizeBytes == 0 { + testnet.LoadTxSizeBytes = defaultTxSizeBytes + } // Set up nodes, in alphabetical order (IPs and ports get same order). nodeNames := []string{} @@ -147,15 +170,19 @@ func LoadTestnet(manifest Manifest, fname string, ifd InfrastructureData) (*Test if !ok { return nil, fmt.Errorf("information for node '%s' missing from infrastucture data", name) } + v := nodeManifest.Version + if v == "" { + v = "local-version" + } node := &Node{ Name: name, + Version: v, Testnet: testnet, PrivvalKey: keyGen.Generate(manifest.KeyType), NodeKey: keyGen.Generate("ed25519"), IP: ind.IPAddress, ProxyPort: ind.Port, Mode: ModeValidator, - SyncApp: nodeManifest.SyncApp, Database: "goleveldb", ABCIProtocol: Protocol(testnet.ABCIProtocol), PrivvalProtocol: ProtocolFile, @@ -167,6 +194,7 @@ func LoadTestnet(manifest Manifest, fname string, ifd InfrastructureData) (*Test SnapshotInterval: nodeManifest.SnapshotInterval, RetainBlocks: nodeManifest.RetainBlocks, Perturbations: []Perturbation{}, + SendNoLoad: nodeManifest.SendNoLoad, } if node.StartAt == testnet.InitialHeight { node.StartAt = 0 // normalize to 0 for initial nodes, since code expects this @@ -318,11 +346,11 @@ func (n Node) Validate(testnet Testnet) error { return fmt.Errorf("invalid database setting %q", n.Database) } switch n.ABCIProtocol { - case ProtocolBuiltin, ProtocolUNIX, ProtocolTCP, ProtocolGRPC: + case ProtocolBuiltin, ProtocolBuiltinUnsync, ProtocolUNIX, ProtocolTCP, ProtocolGRPC: default: return fmt.Errorf("invalid ABCI protocol setting %q", n.ABCIProtocol) } - if n.Mode == ModeLight && n.ABCIProtocol != ProtocolBuiltin { + if n.Mode == ModeLight && n.ABCIProtocol != ProtocolBuiltin && n.ABCIProtocol != ProtocolBuiltinUnsync { return errors.New("light client must use builtin protocol") } switch n.PrivvalProtocol { diff --git a/test/e2e/runner/load.go b/test/e2e/runner/load.go index 5f6d1e1ba..b4478bf6d 100644 --- a/test/e2e/runner/load.go +++ b/test/e2e/runner/load.go @@ -2,47 +2,45 @@ package main import ( "context" - "crypto/rand" "errors" "fmt" - "math" + "sync" "time" + "github.com/google/uuid" "github.com/tendermint/tendermint/libs/log" rpchttp "github.com/tendermint/tendermint/rpc/client/http" e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/test/loadtime/payload" "github.com/tendermint/tendermint/types" ) +const workerPoolSize = 16 + // Load generates transactions against the network until the given context is -// canceled. A multiplier of greater than one can be supplied if load needs to -// be generated beyond a minimum amount. -func Load(ctx context.Context, testnet *e2e.Testnet, multiplier int) error { - // Since transactions are executed across all nodes in the network, we need - // to reduce transaction load for larger networks to avoid using too much - // CPU. This gives high-throughput small networks and low-throughput large ones. - // This also limits the number of TCP connections, since each worker has - // a connection to all nodes. - concurrency := 64 / len(testnet.Nodes) - if concurrency == 0 { - concurrency = 1 - } +// canceled. +func Load(ctx context.Context, testnet *e2e.Testnet) error { initialTimeout := 1 * time.Minute stallTimeout := 30 * time.Second - - chTx := make(chan types.Tx) - chSuccess := make(chan types.Tx) + chSuccess := make(chan struct{}) ctx, cancel := context.WithCancel(ctx) defer cancel() - // Spawn job generator and processors. - logger.Info("load", "msg", log.NewLazySprintf("Starting transaction load (%v workers)...", concurrency)) + logger.Info("load", "msg", log.NewLazySprintf("Starting transaction load (%v workers)...", workerPoolSize)) started := time.Now() + u := [16]byte(uuid.New()) // generate run ID on startup - go loadGenerate(ctx, chTx, multiplier) + txCh := make(chan types.Tx) + go loadGenerate(ctx, txCh, testnet, u[:]) - for w := 0; w < concurrency; w++ { - go loadProcess(ctx, testnet, chTx, chSuccess) + for _, n := range testnet.Nodes { + if n.SendNoLoad { + continue + } + + for w := 0; w < testnet.LoadTxConnections; w++ { + go loadProcess(ctx, txCh, chSuccess, n) + } } // Monitor successful transactions, and abort on stalls. @@ -67,58 +65,85 @@ func Load(ctx context.Context, testnet *e2e.Testnet, multiplier int) error { } // loadGenerate generates jobs until the context is canceled -func loadGenerate(ctx context.Context, chTx chan<- types.Tx, multiplier int) { - for i := 0; i < math.MaxInt64; i++ { - // We keep generating the same 1000 keys over and over, with different values. - // This gives a reasonable load without putting too much data in the app. - id := i % 1000 - - bz := make([]byte, 1024) // 1kb hex-encoded - _, err := rand.Read(bz) - if err != nil { - panic(fmt.Sprintf("Failed to read random bytes: %v", err)) - } - tx := types.Tx(fmt.Sprintf("load-%X=%x", id, bz)) - +func loadGenerate(ctx context.Context, txCh chan<- types.Tx, testnet *e2e.Testnet, id []byte) { + t := time.NewTimer(0) + defer t.Stop() + for { select { - case chTx <- tx: - time.Sleep(time.Second / time.Duration(multiplier)) - + case <-t.C: case <-ctx.Done(): - close(chTx) + close(txCh) return } + t.Reset(time.Second) + + // A context with a timeout is created here to time the createTxBatch + // function out. If createTxBatch has not completed its work by the time + // the next batch is set to be sent out, then the context is cancled so that + // the current batch is halted, allowing the next batch to begin. + tctx, cf := context.WithTimeout(ctx, time.Second) + createTxBatch(tctx, txCh, testnet, id) + cf() } } -// loadProcess processes transactions -func loadProcess(ctx context.Context, testnet *e2e.Testnet, chTx <-chan types.Tx, chSuccess chan<- types.Tx) { - // Each worker gets its own client to each node, which allows for some - // concurrency while still bounding it. - clients := map[string]*rpchttp.HTTP{} +// createTxBatch creates new transactions and sends them into the txCh. createTxBatch +// returns when either a full batch has been sent to the txCh or the context +// is canceled. +func createTxBatch(ctx context.Context, txCh chan<- types.Tx, testnet *e2e.Testnet, id []byte) { + wg := &sync.WaitGroup{} + genCh := make(chan struct{}) + for i := 0; i < workerPoolSize; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for range genCh { + tx, err := payload.NewBytes(&payload.Payload{ + Id: id, + Size: uint64(testnet.LoadTxSizeBytes), + Rate: uint64(testnet.LoadTxBatchSize), + Connections: uint64(testnet.LoadTxConnections), + }) + if err != nil { + panic(fmt.Sprintf("Failed to generate tx: %v", err)) + } - var err error - for tx := range chTx { - node := testnet.RandomNode() - client, ok := clients[node.Name] - if !ok { - client, err = node.Client() - if err != nil { - continue + select { + case txCh <- tx: + case <-ctx.Done(): + return + } } - - // check that the node is up - _, err = client.Health(ctx) - if err != nil { - continue - } - - clients[node.Name] = client + }() + } + for i := 0; i < testnet.LoadTxBatchSize; i++ { + select { + case genCh <- struct{}{}: + case <-ctx.Done(): + break } + } + close(genCh) + wg.Wait() +} +// loadProcess processes transactions by sending transactions received on the txCh +// to the client. +func loadProcess(ctx context.Context, txCh <-chan types.Tx, chSuccess chan<- struct{}, n *e2e.Node) { + var client *rpchttp.HTTP + var err error + s := struct{}{} + for tx := range txCh { + if client == nil { + client, err = n.Client() + if err != nil { + logger.Info("non-fatal error creating node client", "error", err) + continue + } + } if _, err = client.BroadcastTxSync(ctx, tx); err != nil { continue } - chSuccess <- tx + chSuccess <- s } } diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go index 3b7a6f3f8..099485a74 100644 --- a/test/e2e/runner/main.go +++ b/test/e2e/runner/main.go @@ -121,7 +121,7 @@ func NewCLI() *CLI { ctx, loadCancel := context.WithCancel(context.Background()) defer loadCancel() go func() { - err := Load(ctx, cli.testnet, 1) + err := Load(ctx, cli.testnet) if err != nil { logger.Error(fmt.Sprintf("Transaction load failed: %v", err.Error())) } @@ -232,20 +232,10 @@ func NewCLI() *CLI { }) cli.root.AddCommand(&cobra.Command{ - Use: "load [multiplier]", - Args: cobra.MaximumNArgs(1), + Use: "load", Short: "Generates transaction load until the command is canceled", RunE: func(cmd *cobra.Command, args []string) (err error) { - m := 1 - - if len(args) == 1 { - m, err = strconv.Atoi(args[0]) - if err != nil { - return err - } - } - - return Load(context.Background(), cli.testnet, m) + return Load(context.Background(), cli.testnet) }, }) @@ -314,7 +304,7 @@ func NewCLI() *CLI { Max Block Interval over a 100 block sampling period. -Does not run any perbutations. +Does not run any perturbations. `, RunE: func(cmd *cobra.Command, args []string) error { if err := Cleanup(cli.testnet); err != nil { @@ -328,9 +318,9 @@ Does not run any perbutations. ctx, loadCancel := context.WithCancel(context.Background()) defer loadCancel() go func() { - err := Load(ctx, cli.testnet, 1) + err := Load(ctx, cli.testnet) if err != nil { - logger.Error(fmt.Sprintf("Transaction load failed: %v", err.Error())) + logger.Error(fmt.Sprintf("Transaction load errored: %v", err.Error())) } chLoadResult <- err }() diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index c398bbe95..fda6f8129 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -174,7 +174,7 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { case e2e.ProtocolGRPC: cfg.ProxyApp = AppAddressTCP cfg.ABCI = "grpc" - case e2e.ProtocolBuiltin: + case e2e.ProtocolBuiltin, e2e.ProtocolBuiltinUnsync: cfg.ProxyApp = "" cfg.ABCI = "" default: @@ -258,7 +258,6 @@ func MakeAppConfig(node *e2e.Node) ([]byte, error) { "dir": "data/app", "listen": AppAddressUNIX, "mode": node.Mode, - "sync_app": node.SyncApp, "proxy_port": node.ProxyPort, "protocol": "socket", "persist_interval": node.PersistInterval, @@ -277,9 +276,9 @@ func MakeAppConfig(node *e2e.Node) ([]byte, error) { case e2e.ProtocolGRPC: cfg["listen"] = AppAddressTCP cfg["protocol"] = "grpc" - case e2e.ProtocolBuiltin: + case e2e.ProtocolBuiltin, e2e.ProtocolBuiltinUnsync: delete(cfg, "listen") - cfg["protocol"] = "builtin" + cfg["protocol"] = string(node.ABCIProtocol) default: return nil, fmt.Errorf("unexpected ABCI protocol setting %q", node.ABCIProtocol) } diff --git a/types/event_bus.go b/types/event_bus.go index d51ae8e67..3efa008e2 100644 --- a/types/event_bus.go +++ b/types/event_bus.go @@ -23,7 +23,7 @@ type EventBusSubscriber interface { type Subscription interface { Out() <-chan tmpubsub.Message - Cancelled() <-chan struct{} //nolint: misspell + Canceled() <-chan struct{} Err() error } diff --git a/types/event_bus_test.go b/types/event_bus_test.go index 62f57fca6..09461d085 100644 --- a/types/event_bus_test.go +++ b/types/event_bus_test.go @@ -439,7 +439,7 @@ func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *tes for { select { case <-sub.Out(): - case <-sub.Cancelled(): + case <-sub.Canceled(): return } }