diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 30218e098..7993419e8 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,5 +7,4 @@ # global owners are only requested if there isn't a more specific # codeowner specified below. For this reason, the global codeowners # are often repeated in package-level definitions. -* @alexanderbez @ebuchman @cmwaters @tessr @tychoish @williambanfield - +* @alexanderbez @ebuchman @cmwaters @tessr @tychoish @williambanfield @creachadair diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index ee6b0068f..12dd504e3 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -46,7 +46,7 @@ jobs: with: go-version: "1.16" - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v4 + - uses: technote-space/get-diff-action@v5 with: PATTERNS: | **/**.go @@ -68,7 +68,7 @@ jobs: with: go-version: "1.16" - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v4 + - uses: technote-space/get-diff-action@v5 with: PATTERNS: | **/**.go @@ -96,7 +96,7 @@ jobs: needs: tests steps: - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v4 + - uses: technote-space/get-diff-action@v5 with: PATTERNS: | **/**.go @@ -121,7 +121,7 @@ jobs: - run: | cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt if: env.GIT_DIFF - - uses: codecov/codecov-action@v2.0.1 + - uses: codecov/codecov-action@v2.0.2 with: file: ./coverage.txt if: env.GIT_DIFF diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 7fc3cde7a..dd2b44da3 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -18,7 +18,7 @@ jobs: with: go-version: '1.16' - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v4 + - uses: technote-space/get-diff-action@v5 with: PATTERNS: | **/**.go diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index 07392ae2b..c47dc4411 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -23,9 +23,14 @@ jobs: working-directory: test/fuzz run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build - - name: Fuzz mempool + - name: Fuzz mempool-v1 working-directory: test/fuzz - run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool + run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v1 + continue-on-error: true + + - name: Fuzz mempool-v0 + working-directory: test/fuzz + run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v0 continue-on-error: true - name: Fuzz p2p-addrbook diff --git a/.github/workflows/janitor.yml b/.github/workflows/janitor.yml index ccacb6eeb..e6bc45ec1 100644 --- a/.github/workflows/janitor.yml +++ b/.github/workflows/janitor.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 3 steps: - - uses: styfle/cancel-workflow-action@0.9.0 + - uses: styfle/cancel-workflow-action@0.9.1 with: workflow_id: 1041851,1401230,2837803 access_token: ${{ github.token }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 4b0092afc..79cb3685b 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -14,7 +14,7 @@ jobs: timeout-minutes: 8 steps: - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v4 + - uses: technote-space/get-diff-action@v5 with: PATTERNS: | **/**.go diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 929f0075e..1109f09c1 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -14,6 +14,7 @@ jobs: recent activity. It will be closed if no further activity occurs. Thank you for your contributions." days-before-stale: -1 + days-before-close: -1 days-before-pr-stale: 10 days-before-pr-close: 4 exempt-pr-labels: "S:wip" diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 14bb8570b..14d531812 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -19,7 +19,7 @@ jobs: with: go-version: "1.16" - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v4 + - uses: technote-space/get-diff-action@v5 with: PATTERNS: | **/**.go @@ -51,7 +51,7 @@ jobs: with: go-version: "1.16" - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v4 + - uses: technote-space/get-diff-action@v5 with: PATTERNS: | **/**.go @@ -82,7 +82,7 @@ jobs: with: go-version: "1.16" - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v4 + - uses: technote-space/get-diff-action@v5 with: PATTERNS: | **/**.go diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 9c3add9b8..9de5b8bcb 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -24,12 +24,14 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi - [fastsync/rpc] \#6620 Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106) - [rpc/grpc] \#6725 Mark gRPC in the RPC layer as deprecated. - [blockchain/v2] \#6730 Fast Sync v2 is deprecated, please use v0 + - [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents. - Apps - [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez) - [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface - [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`. - [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters. + - [ABCI] \#3546 Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield) - [Version] \#6494 `TMCoreSemVer` has been renamed to `TMVersion`. - It is not required any longer to set ldflags to set version strings - [abci/counter] \#6684 Delete counter example app @@ -71,6 +73,8 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi - [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning) - [types] \#6627 Move `NodeKey` to types to make the type public. - [config] \#6627 Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID` + - [blocksync] \#6755 Rename `FastSync` and `Blockchain` package to `BlockSync` + (@cmwaters) - Blockchain Protocol @@ -81,6 +85,7 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi - Tooling - [tools] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106) + - [cli/indexer] \#6676 Reindex events command line tooling. (@JayT106) ### FEATURES diff --git a/Makefile b/Makefile index 871249f56..a509f3a26 100644 --- a/Makefile +++ b/Makefile @@ -237,7 +237,7 @@ build-docker: build-linux ############################################################################### mockery: - go generate -run="mockery" ./... + go generate -run="./scripts/mockery_generate.sh" ./... .PHONY: mockery ############################################################################### diff --git a/UPGRADING.md b/UPGRADING.md index 6687f8e77..e53c34c29 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -17,22 +17,44 @@ This guide provides instructions for upgrading to specific versions of Tendermin ### Config Changes -* `fast_sync = "v1"` is no longer supported. Please use `v2` instead. +* `fast_sync = "v1"` and `fast_sync = "v2"` are no longer supported. Please use `v0` instead. * All config parameters are now hyphen-case (also known as kebab-case) instead of snake_case. Before restarting the node make sure you have updated all the variables in your `config.toml` file. * Added `--mode` flag and `mode` config variable on `config.toml` for setting Mode of the Node: `full` | `validator` | `seed` (default: `full`) [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) - + * `BootstrapPeers` has been added as part of the new p2p stack. This will eventually replace `Seeds`. Bootstrap peers are connected with on startup if needed for peer discovery. Unlike - persistent peers, there's no gaurantee that the node will remain connected with these peers. + persistent peers, there's no gaurantee that the node will remain connected with these peers. -- configuration values starting with `priv-validator-` have moved to the new +* configuration values starting with `priv-validator-` have moved to the new `priv-validator` section, without the `priv-validator-` prefix. -* Fast Sync v2 has been deprecated, please use v0 to sync a node. +* The fast sync process as well as the blockchain package and service has all + been renamed to block sync + +### Key Format Changes + +The format of all tendermint on-disk database keys changes in +0.35. Upgrading nodes must either re-sync all data or run a migration +script provided in this release. The script located in +`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go` +provides the function `Migrate(context.Context, db.DB)` which you can +operationalize as makes sense for your deployment. + +For ease of use the `tendermint` command includes a CLI version of the +migration script, which you can invoke, as in: + + tendermint key-migrate + +This reads the configuration file as normal and allows the +`--db-backend` and `--db-dir` flags to change database operations as +needed. + +The migration operation is idempotent and can be run more than once, +if needed. ### CLI Changes @@ -65,7 +87,7 @@ are: - `blockchain` - `evidence` -Accordingly, the space `node` package was changed to reduce access to +Accordingly, the `node` package was changed to reduce access to tendermint internals: applications that use tendermint as a library will need to change to accommodate these changes. Most notably: @@ -80,6 +102,16 @@ will need to change to accommodate these changes. Most notably: Mark gRPC in the RPC layer as deprecated and to be removed in 0.36. +### Support for Custom Reactor and Mempool Implementations + +The changes to p2p layer removed existing support for custom +reactors. Based on our understanding of how this functionality was +used, the introduction of the prioritized mempool covers nearly all of +the use cases for custom reactors. If you are currently running custom +reactors and mempools and are having trouble seeing the migration path +for your project please feel free to reach out to the Tendermint Core +development team directly. + ## v0.34.0 **Upgrading to Tendermint 0.34 requires a blockchain restart.** @@ -233,8 +265,8 @@ Other user-relevant changes include: * The old `lite` package was removed; the new light client uses the `light` package. * The `Verifier` was broken up into two pieces: - * Core verification logic (pure `VerifyX` functions) - * `Client` object, which represents the complete light client + * Core verification logic (pure `VerifyX` functions) + * `Client` object, which represents the complete light client * The new light clients stores headers & validator sets as `LightBlock`s * The RPC client can be found in the `/rpc` directory. * The HTTP(S) proxy is located in the `/proxy` directory. @@ -366,12 +398,12 @@ Evidence Params has been changed to include duration. ### Go API * `libs/common` has been removed in favor of specific pkgs. - * `async` - * `service` - * `rand` - * `net` - * `strings` - * `cmap` + * `async` + * `service` + * `rand` + * `net` + * `strings` + * `cmap` * removal of `errors` pkg ### RPC Changes @@ -440,9 +472,9 @@ Prior to the update, suppose your `ResponseDeliverTx` look like: ```go abci.ResponseDeliverTx{ Tags: []kv.Pair{ - {Key: []byte("sender"), Value: []byte("foo")}, - {Key: []byte("recipient"), Value: []byte("bar")}, - {Key: []byte("amount"), Value: []byte("35")}, + {Key: []byte("sender"), Value: []byte("foo")}, + {Key: []byte("recipient"), Value: []byte("bar")}, + {Key: []byte("amount"), Value: []byte("35")}, } } ``` @@ -461,14 +493,14 @@ the following `Events`: ```go abci.ResponseDeliverTx{ Events: []abci.Event{ - { - Type: "transfer", - Attributes: kv.Pairs{ - {Key: []byte("sender"), Value: []byte("foo")}, - {Key: []byte("recipient"), Value: []byte("bar")}, - {Key: []byte("amount"), Value: []byte("35")}, - }, - } + { + Type: "transfer", + Attributes: kv.Pairs{ + {Key: []byte("sender"), Value: []byte("foo")}, + {Key: []byte("recipient"), Value: []byte("bar")}, + {Key: []byte("amount"), Value: []byte("35")}, + }, + } } ``` @@ -516,9 +548,9 @@ In this case, the WS client will receive an error with description: "jsonrpc": "2.0", "id": "{ID}#event", "error": { - "code": -32000, - "msg": "Server error", - "data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)" + "code": -32000, + "msg": "Server error", + "data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)" } } @@ -724,9 +756,9 @@ just the `Data` field set: ```go []ProofOp{ - ProofOp{ - Data: , - } + ProofOp{ + Data: , + } } ``` diff --git a/abci/client/client.go b/abci/client/client.go index 1c25c6877..bb72d748b 100644 --- a/abci/client/client.go +++ b/abci/client/client.go @@ -15,7 +15,7 @@ const ( echoRetryIntervalSeconds = 1 ) -//go:generate mockery --case underscore --name Client +//go:generate ../../scripts/mockery_generate.sh Client // Client defines an interface for an ABCI client. // diff --git a/abci/client/mocks/client.go b/abci/client/mocks/client.go index caa377142..405d586f9 100644 --- a/abci/client/mocks/client.go +++ b/abci/client/mocks/client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v0.0.0-dev. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/abci/types/messages.go b/abci/types/messages.go index ce23cca2b..8c17baeb3 100644 --- a/abci/types/messages.go +++ b/abci/types/messages.go @@ -15,11 +15,7 @@ const ( func WriteMessage(msg proto.Message, w io.Writer) error { protoWriter := protoio.NewDelimitedWriter(w) _, err := protoWriter.WriteMsg(msg) - if err != nil { - return err - } - - return nil + return err } // ReadMessage reads a varint length-delimited protobuf message. diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 03a835c56..0dbe461fd 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -1908,17 +1908,19 @@ func (m *ResponseBeginBlock) GetEvents() []Event { } type ResponseCheckTx struct { - Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` - Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"` - GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"` - Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` - Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` - Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"` - Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"` - MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"` + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"` + Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` + Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"` + Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"` + // mempool_error is set by Tendermint. + // ABCI applictions creating a ResponseCheckTX should not set mempool_error. + MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"` } func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } diff --git a/cmd/tendermint/commands/key_migrate.go b/cmd/tendermint/commands/key_migrate.go new file mode 100644 index 000000000..739af4a7d --- /dev/null +++ b/cmd/tendermint/commands/key_migrate.go @@ -0,0 +1,64 @@ +package commands + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/scripts/keymigrate" +) + +func MakeKeyMigrateCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "key-migrate", + Short: "Run Database key migration", + RunE: func(cmd *cobra.Command, args []string) error { + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() + + contexts := []string{ + // this is ordered to put the + // (presumably) biggest/most important + // subsets first. + "blockstore", + "state", + "peerstore", + "tx_index", + "evidence", + "light", + } + + for idx, dbctx := range contexts { + logger.Info("beginning a key migration", + "dbctx", dbctx, + "num", idx+1, + "total", len(contexts), + ) + + db, err := cfg.DefaultDBProvider(&cfg.DBContext{ + ID: dbctx, + Config: config, + }) + + if err != nil { + return fmt.Errorf("constructing database handle: %w", err) + } + + if err = keymigrate.Migrate(ctx, db); err != nil { + return fmt.Errorf("running migration for context %q: %w", + dbctx, err) + } + } + + logger.Info("completed database migration successfully") + + return nil + }, + } + + // allow database info to be overridden via cli + addDBFlags(cmd) + + return cmd +} diff --git a/cmd/tendermint/commands/reindex_event.go b/cmd/tendermint/commands/reindex_event.go new file mode 100644 index 000000000..ddc585c1f --- /dev/null +++ b/cmd/tendermint/commands/reindex_event.go @@ -0,0 +1,251 @@ +package commands + +import ( + "errors" + "fmt" + "strings" + + "github.com/spf13/cobra" + tmdb "github.com/tendermint/tm-db" + + abcitypes "github.com/tendermint/tendermint/abci/types" + tmcfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/libs/progressbar" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/state/indexer" + "github.com/tendermint/tendermint/state/indexer/sink/kv" + "github.com/tendermint/tendermint/state/indexer/sink/psql" + "github.com/tendermint/tendermint/store" + "github.com/tendermint/tendermint/types" +) + +const ( + reindexFailed = "event re-index failed: " +) + +// ReIndexEventCmd allows re-index the event by given block height interval +var ReIndexEventCmd = &cobra.Command{ + Use: "reindex-event", + Short: "reindex events to the event store backends", + Long: ` + reindex-event is an offline tooling to re-index block and tx events to the eventsinks, + you can run this command when the event store backend dropped/disconnected or you want to replace the backend. + The default start-height is 0, meaning the tooling will start reindex from the base block height(inclusive); and the + default end-height is 0, meaning the tooling will reindex until the latest block height(inclusive). User can omits + either or both arguments. + `, + Example: ` + tendermint reindex-event + tendermint reindex-event --start-height 2 + tendermint reindex-event --end-height 10 + tendermint reindex-event --start-height 2 --end-height 10 + `, + Run: func(cmd *cobra.Command, args []string) { + bs, ss, err := loadStateAndBlockStore(config) + if err != nil { + fmt.Println(reindexFailed, err) + return + } + + if err := checkValidHeight(bs); err != nil { + fmt.Println(reindexFailed, err) + return + } + + es, err := loadEventSinks(config) + if err != nil { + fmt.Println(reindexFailed, err) + return + } + + if err = eventReIndex(cmd, es, bs, ss); err != nil { + fmt.Println(reindexFailed, err) + return + } + + fmt.Println("event re-index finished") + }, +} + +var ( + startHeight int64 + endHeight int64 +) + +func init() { + ReIndexEventCmd.Flags().Int64Var(&startHeight, "start-height", 0, "the block height would like to start for re-index") + ReIndexEventCmd.Flags().Int64Var(&endHeight, "end-height", 0, "the block height would like to finish for re-index") +} + +func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) { + // Check duplicated sinks. + sinks := map[string]bool{} + for _, s := range cfg.TxIndex.Indexer { + sl := strings.ToLower(s) + if sinks[sl] { + return nil, errors.New("found duplicated sinks, please check the tx-index section in the config.toml") + } + sinks[sl] = true + } + + eventSinks := []indexer.EventSink{} + + for k := range sinks { + switch k { + case string(indexer.NULL): + return nil, errors.New("found null event sink, please check the tx-index section in the config.toml") + case string(indexer.KV): + store, err := tmcfg.DefaultDBProvider(&tmcfg.DBContext{ID: "tx_index", Config: cfg}) + if err != nil { + return nil, err + } + eventSinks = append(eventSinks, kv.NewEventSink(store)) + case string(indexer.PSQL): + conn := cfg.TxIndex.PsqlConn + if conn == "" { + return nil, errors.New("the psql connection settings cannot be empty") + } + es, _, err := psql.NewEventSink(conn, chainID) + if err != nil { + return nil, err + } + eventSinks = append(eventSinks, es) + default: + return nil, errors.New("unsupported event sink type") + } + } + + if len(eventSinks) == 0 { + return nil, errors.New("no proper event sink can do event re-indexing," + + " please check the tx-index section in the config.toml") + } + + if !indexer.IndexingEnabled(eventSinks) { + return nil, fmt.Errorf("no event sink has been enabled") + } + + return eventSinks, nil +} + +func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, error) { + dbType := tmdb.BackendType(cfg.DBBackend) + + // Get BlockStore + blockStoreDB, err := tmdb.NewDB("blockstore", dbType, cfg.DBDir()) + if err != nil { + return nil, nil, err + } + blockStore := store.NewBlockStore(blockStoreDB) + + // Get StateStore + stateDB, err := tmdb.NewDB("state", dbType, cfg.DBDir()) + if err != nil { + return nil, nil, err + } + stateStore := state.NewStore(stateDB) + + return blockStore, stateStore, nil +} + +func eventReIndex(cmd *cobra.Command, es []indexer.EventSink, bs state.BlockStore, ss state.Store) error { + + var bar progressbar.Bar + bar.NewOption(startHeight-1, endHeight) + + fmt.Println("start re-indexing events:") + defer bar.Finish() + for i := startHeight; i <= endHeight; i++ { + select { + case <-cmd.Context().Done(): + return fmt.Errorf("event re-index terminated at height %d: %w", i, cmd.Context().Err()) + default: + b := bs.LoadBlock(i) + if b == nil { + return fmt.Errorf("not able to load block at height %d from the blockstore", i) + } + + r, err := ss.LoadABCIResponses(i) + if err != nil { + return fmt.Errorf("not able to load ABCI Response at height %d from the statestore", i) + } + + e := types.EventDataNewBlockHeader{ + Header: b.Header, + NumTxs: int64(len(b.Txs)), + ResultBeginBlock: *r.BeginBlock, + ResultEndBlock: *r.EndBlock, + } + + var batch *indexer.Batch + if e.NumTxs > 0 { + batch = indexer.NewBatch(e.NumTxs) + + for i, tx := range b.Data.Txs { + tr := abcitypes.TxResult{ + Height: b.Height, + Index: uint32(i), + Tx: tx, + Result: *(r.DeliverTxs[i]), + } + + _ = batch.Add(&tr) + } + } + + for _, sink := range es { + if err := sink.IndexBlockEvents(e); err != nil { + return fmt.Errorf("block event re-index at height %d failed: %w", i, err) + } + + if batch != nil { + if err := sink.IndexTxEvents(batch.Ops); err != nil { + return fmt.Errorf("tx event re-index at height %d failed: %w", i, err) + } + } + } + } + + bar.Play(i) + } + + return nil +} + +func checkValidHeight(bs state.BlockStore) error { + base := bs.Base() + + if startHeight == 0 { + startHeight = base + fmt.Printf("set the start block height to the base height of the blockstore %d \n", base) + } + + if startHeight < base { + return fmt.Errorf("%s (requested start height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, startHeight, base) + } + + height := bs.Height() + + if startHeight > height { + return fmt.Errorf( + "%s (requested start height: %d, store height: %d)", ctypes.ErrHeightNotAvailable, startHeight, height) + } + + if endHeight == 0 || endHeight > height { + endHeight = height + fmt.Printf("set the end block height to the latest height of the blockstore %d \n", height) + } + + if endHeight < base { + return fmt.Errorf( + "%s (requested end height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, endHeight, base) + } + + if endHeight < startHeight { + return fmt.Errorf( + "%s (requested the end height: %d is less than the start height: %d)", + ctypes.ErrInvalidRequest, startHeight, endHeight) + } + + return nil +} diff --git a/cmd/tendermint/commands/reindex_event_test.go b/cmd/tendermint/commands/reindex_event_test.go new file mode 100644 index 000000000..5d9459f5a --- /dev/null +++ b/cmd/tendermint/commands/reindex_event_test.go @@ -0,0 +1,171 @@ +package commands + +import ( + "context" + "errors" + "testing" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + abcitypes "github.com/tendermint/tendermint/abci/types" + tmcfg "github.com/tendermint/tendermint/config" + prototmstate "github.com/tendermint/tendermint/proto/tendermint/state" + "github.com/tendermint/tendermint/state/indexer" + "github.com/tendermint/tendermint/state/mocks" + "github.com/tendermint/tendermint/types" +) + +const ( + height int64 = 10 + base int64 = 2 +) + +func setupReIndexEventCmd() *cobra.Command { + reIndexEventCmd := &cobra.Command{ + Use: ReIndexEventCmd.Use, + Run: func(cmd *cobra.Command, args []string) {}, + } + + _ = reIndexEventCmd.ExecuteContext(context.Background()) + + return reIndexEventCmd +} + +func TestReIndexEventCheckHeight(t *testing.T) { + mockBlockStore := &mocks.BlockStore{} + mockBlockStore. + On("Base").Return(base). + On("Height").Return(height) + + testCases := []struct { + startHeight int64 + endHeight int64 + validHeight bool + }{ + {0, 0, true}, + {0, base, true}, + {0, base - 1, false}, + {0, height, true}, + {0, height + 1, true}, + {0, 0, true}, + {base - 1, 0, false}, + {base, 0, true}, + {base, base, true}, + {base, base - 1, false}, + {base, height, true}, + {base, height + 1, true}, + {height, 0, true}, + {height, base, false}, + {height, height - 1, false}, + {height, height, true}, + {height, height + 1, true}, + {height + 1, 0, false}, + } + + for _, tc := range testCases { + startHeight = tc.startHeight + endHeight = tc.endHeight + + err := checkValidHeight(mockBlockStore) + if tc.validHeight { + require.NoError(t, err) + } else { + require.Error(t, err) + } + } +} + +func TestLoadEventSink(t *testing.T) { + testCases := []struct { + sinks []string + connURL string + loadErr bool + }{ + {[]string{}, "", true}, + {[]string{"NULL"}, "", true}, + {[]string{"KV"}, "", false}, + {[]string{"KV", "KV"}, "", true}, + {[]string{"PSQL"}, "", true}, // true because empty connect url + {[]string{"PSQL"}, "wrongUrl", true}, // true because wrong connect url + // skip to test PSQL connect with correct url + {[]string{"UnsupportedSinkType"}, "wrongUrl", true}, + } + + for _, tc := range testCases { + cfg := tmcfg.TestConfig() + cfg.TxIndex.Indexer = tc.sinks + cfg.TxIndex.PsqlConn = tc.connURL + _, err := loadEventSinks(cfg) + if tc.loadErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + } +} + +func TestLoadBlockStore(t *testing.T) { + bs, ss, err := loadStateAndBlockStore(tmcfg.TestConfig()) + require.NoError(t, err) + require.NotNil(t, bs) + require.NotNil(t, ss) + +} +func TestReIndexEvent(t *testing.T) { + mockBlockStore := &mocks.BlockStore{} + mockStateStore := &mocks.Store{} + mockEventSink := &mocks.EventSink{} + + mockBlockStore. + On("Base").Return(base). + On("Height").Return(height). + On("LoadBlock", base).Return(nil).Once(). + On("LoadBlock", base).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}}). + On("LoadBlock", height).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}}) + + mockEventSink. + On("Type").Return(indexer.KV). + On("IndexBlockEvents", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(errors.New("")).Once(). + On("IndexBlockEvents", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(nil). + On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(errors.New("")).Once(). + On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(nil) + + dtx := abcitypes.ResponseDeliverTx{} + abciResp := &prototmstate.ABCIResponses{ + DeliverTxs: []*abcitypes.ResponseDeliverTx{&dtx}, + EndBlock: &abcitypes.ResponseEndBlock{}, + BeginBlock: &abcitypes.ResponseBeginBlock{}, + } + + mockStateStore. + On("LoadABCIResponses", base).Return(nil, errors.New("")).Once(). + On("LoadABCIResponses", base).Return(abciResp, nil). + On("LoadABCIResponses", height).Return(abciResp, nil) + + testCases := []struct { + startHeight int64 + endHeight int64 + reIndexErr bool + }{ + {base, height, true}, // LoadBlock error + {base, height, true}, // LoadABCIResponses error + {base, height, true}, // index block event error + {base, height, true}, // index tx event error + {base, base, false}, + {height, height, false}, + } + + for _, tc := range testCases { + startHeight = tc.startHeight + endHeight = tc.endHeight + + err := eventReIndex(setupReIndexEventCmd(), []indexer.EventSink{mockEventSink}, mockBlockStore, mockStateStore) + if tc.reIndexErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + } +} diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index 1c68fcffe..97d6197a2 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -83,7 +83,10 @@ func AddNodeFlags(cmd *cobra.Command) { config.Consensus.CreateEmptyBlocksInterval.String(), "the possible interval between empty blocks") - // db flags + addDBFlags(cmd) +} + +func addDBFlags(cmd *cobra.Command) { cmd.Flags().String( "db-backend", config.DBBackend, diff --git a/cmd/tendermint/main.go b/cmd/tendermint/main.go index 52a26b4f3..b092f5645 100644 --- a/cmd/tendermint/main.go +++ b/cmd/tendermint/main.go @@ -15,6 +15,7 @@ func main() { rootCmd := cmd.RootCmd rootCmd.AddCommand( cmd.GenValidatorCmd, + cmd.ReIndexEventCmd, cmd.InitFilesCmd, cmd.ProbeUpnpCmd, cmd.LightCmd, @@ -27,6 +28,7 @@ func main() { cmd.ShowNodeIDCmd, cmd.GenNodeKeyCmd, cmd.VersionCmd, + cmd.MakeKeyMigrateCommand(), debug.DebugCmd, cli.NewCompletionCmd(rootCmd, true), ) diff --git a/config/config.go b/config/config.go index 99b82fb46..7d19616aa 100644 --- a/config/config.go +++ b/config/config.go @@ -29,8 +29,8 @@ const ( ModeValidator = "validator" ModeSeed = "seed" - BlockchainV0 = "v0" - BlockchainV2 = "v2" + BlockSyncV0 = "v0" + BlockSyncV2 = "v2" MempoolV0 = "v0" MempoolV1 = "v1" @@ -76,7 +76,7 @@ type Config struct { P2P *P2PConfig `mapstructure:"p2p"` Mempool *MempoolConfig `mapstructure:"mempool"` StateSync *StateSyncConfig `mapstructure:"statesync"` - FastSync *FastSyncConfig `mapstructure:"fastsync"` + BlockSync *BlockSyncConfig `mapstructure:"fastsync"` Consensus *ConsensusConfig `mapstructure:"consensus"` TxIndex *TxIndexConfig `mapstructure:"tx-index"` Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"` @@ -91,7 +91,7 @@ func DefaultConfig() *Config { P2P: DefaultP2PConfig(), Mempool: DefaultMempoolConfig(), StateSync: DefaultStateSyncConfig(), - FastSync: DefaultFastSyncConfig(), + BlockSync: DefaultBlockSyncConfig(), Consensus: DefaultConsensusConfig(), TxIndex: DefaultTxIndexConfig(), Instrumentation: DefaultInstrumentationConfig(), @@ -114,7 +114,7 @@ func TestConfig() *Config { P2P: TestP2PConfig(), Mempool: TestMempoolConfig(), StateSync: TestStateSyncConfig(), - FastSync: TestFastSyncConfig(), + BlockSync: TestBlockSyncConfig(), Consensus: TestConsensusConfig(), TxIndex: TestTxIndexConfig(), Instrumentation: TestInstrumentationConfig(), @@ -151,7 +151,7 @@ func (cfg *Config) ValidateBasic() error { if err := cfg.StateSync.ValidateBasic(); err != nil { return fmt.Errorf("error in [statesync] section: %w", err) } - if err := cfg.FastSync.ValidateBasic(); err != nil { + if err := cfg.BlockSync.ValidateBasic(); err != nil { return fmt.Errorf("error in [fastsync] section: %w", err) } if err := cfg.Consensus.ValidateBasic(); err != nil { @@ -197,6 +197,7 @@ type BaseConfig struct { //nolint: maligned // If this node is many blocks behind the tip of the chain, FastSync // allows them to catchup quickly by downloading blocks in parallel // and verifying their commits + // TODO: This should be moved to the blocksync config FastSyncMode bool `mapstructure:"fast-sync"` // Database backend: goleveldb | cleveldb | boltdb | rocksdb @@ -911,7 +912,7 @@ func DefaultStateSyncConfig() *StateSyncConfig { } } -// TestFastSyncConfig returns a default configuration for the state sync service +// TestStateSyncConfig returns a default configuration for the state sync service func TestStateSyncConfig() *StateSyncConfig { return DefaultStateSyncConfig() } @@ -967,34 +968,33 @@ func (cfg *StateSyncConfig) ValidateBasic() error { } //----------------------------------------------------------------------------- -// FastSyncConfig -// FastSyncConfig defines the configuration for the Tendermint fast sync service -type FastSyncConfig struct { +// BlockSyncConfig (formerly known as FastSync) defines the configuration for the Tendermint block sync service +type BlockSyncConfig struct { Version string `mapstructure:"version"` } -// DefaultFastSyncConfig returns a default configuration for the fast sync service -func DefaultFastSyncConfig() *FastSyncConfig { - return &FastSyncConfig{ - Version: BlockchainV0, +// DefaultBlockSyncConfig returns a default configuration for the block sync service +func DefaultBlockSyncConfig() *BlockSyncConfig { + return &BlockSyncConfig{ + Version: BlockSyncV0, } } -// TestFastSyncConfig returns a default configuration for the fast sync. -func TestFastSyncConfig() *FastSyncConfig { - return DefaultFastSyncConfig() +// TestBlockSyncConfig returns a default configuration for the block sync. +func TestBlockSyncConfig() *BlockSyncConfig { + return DefaultBlockSyncConfig() } // ValidateBasic performs basic validation. -func (cfg *FastSyncConfig) ValidateBasic() error { +func (cfg *BlockSyncConfig) ValidateBasic() error { switch cfg.Version { - case BlockchainV0: + case BlockSyncV0: return nil - case BlockchainV2: - return errors.New("fastsync version v2 is no longer supported. Please use v0") + case BlockSyncV2: + return errors.New("blocksync version v2 is no longer supported. Please use v0") default: - return fmt.Errorf("unknown fastsync version %s", cfg.Version) + return fmt.Errorf("unknown blocksync version %s", cfg.Version) } } diff --git a/config/config_test.go b/config/config_test.go index 9801b75cd..075cedc6a 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -125,8 +125,8 @@ func TestStateSyncConfigValidateBasic(t *testing.T) { require.NoError(t, cfg.ValidateBasic()) } -func TestFastSyncConfigValidateBasic(t *testing.T) { - cfg := TestFastSyncConfig() +func TestBlockSyncConfigValidateBasic(t *testing.T) { + cfg := TestBlockSyncConfig() assert.NoError(t, cfg.ValidateBasic()) // tamper with version diff --git a/config/toml.go b/config/toml.go index aae716a58..edb192109 100644 --- a/config/toml.go +++ b/config/toml.go @@ -452,14 +452,14 @@ chunk-request-timeout = "{{ .StateSync.ChunkRequestTimeout }}" fetchers = "{{ .StateSync.Fetchers }}" ####################################################### -### Fast Sync Configuration Connections ### +### Block Sync Configuration Connections ### ####################################################### [fastsync] -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation +# Block Sync version to use: +# 1) "v0" (default) - the legacy block sync implementation # 2) "v2" - DEPRECATED, please use v0 -version = "{{ .FastSync.Version }}" +version = "{{ .BlockSync.Version }}" ####################################################### ### Consensus Configuration Options ### diff --git a/crypto/merkle/proof.go b/crypto/merkle/proof.go index 2994e8048..80b289d23 100644 --- a/crypto/merkle/proof.go +++ b/crypto/merkle/proof.go @@ -204,7 +204,10 @@ func (spn *ProofNode) FlattenAunts() [][]byte { case spn.Right != nil: innerHashes = append(innerHashes, spn.Right.Hash) default: - break + // FIXME(fromberger): Per the documentation above, exactly one of + // these fields should be set. If that is true, this should probably + // be a panic since it violates the invariant. If not, when can it + // be OK to have no siblings? Does this occur at the leaves? } spn = spn.Parent } diff --git a/docs/architecture/README.md b/docs/architecture/README.md index a4e326274..7025a72f6 100644 --- a/docs/architecture/README.md +++ b/docs/architecture/README.md @@ -97,3 +97,4 @@ Note the context/background should be written in the present tense. - [ADR-041: Proposer-Selection-via-ABCI](./adr-041-proposer-selection-via-abci.md) - [ADR-045: ABCI-Evidence](./adr-045-abci-evidence.md) - [ADR-057: RPC](./adr-057-RPC.md) +- [ADR-069: Node Initialization](./adr-069-flexible-node-initialization.md) diff --git a/docs/architecture/adr-069-flexible-node-intitalization.md b/docs/architecture/adr-069-flexible-node-intitalization.md new file mode 100644 index 000000000..ec66725be --- /dev/null +++ b/docs/architecture/adr-069-flexible-node-intitalization.md @@ -0,0 +1,273 @@ +# ADR 069: Flexible Node Initialization + +## Changlog + +- 2021-06-09: Initial Draft (@tychoish) + +- 2021-07-21: Major Revision (@tychoish) + +## Status + +Proposed. + +## Context + +In an effort to support [Go-API-Stability](./adr-060-go-api-stability.md), +during the 0.35 development cycle, we have attempted to reduce the the API +surface area by moving most of the interface of the `node` package into +unexported functions, as well as moving the reactors to an `internal` +package. Having this coincide with the 0.35 release made a lot of sense +because these interfaces were _already_ changing as a result of the `p2p` +[refactor](./adr-061-p2p-refactor-scope.md), so it made sense to think a bit +more about how tendermint exposes this API. + +While the interfaces of the P2P layer and most of the node package are already +internalized, this precludes some operational patterns that are important to +users who use tendermint as a library. Specifically, introspecting the +tendermint node service and replacing components is not supported in the latest +version of the code, and some of these use cases would require maintaining a +vendor copy of the code. Adding these features requires rather extensive +(internal/implementation) changes to the `node` and `rpc` packages, and this +ADR describes a model for changing the way that tendermint nodes initialize, in +service of providing this kind of functionality. + +We consider node initialization, because the current implemention +provides strong connections between all components, as well as between +the components of the node and the RPC layer, and being able to think +about the interactions of these components will help enable these +features and help define the requirements of the node package. + +## Alternative Approaches + +These alternatives are presented to frame the design space and to +contextualize the decision in terms of product requirements. These +ideas are not inherently bad, and may even be possible or desireable +in the (distant) future, and merely provide additional context for how +we, in the moment came to our decision(s). + +### Do Nothing + +The current implementation is functional and sufficient for the vast +majority of use cases (e.g., all users of the Cosmos-SDK as well as +anyone who runs tendermint and the ABCI application in separate +processes). In the current implementation, and even previous versions, +modifying node initialization or injecting custom components required +copying most of the `node` package, which required such users +to maintain a vendored copy of tendermint. + +While this is (likely) not tenable in the long term, as users do want +more modularity, and the current service implementation is brittle and +difficult to maintain, in the short term it may be possible to delay +implementation somewhat. Eventually, however, we will need to make the +`node` package easier to maintain and reason about. + +### Generic Service Pluggability + +One possible system design would export interfaces (in the Golang +sense) for all components of the system, to permit runtime dependency +injection of all components in the system, so that users can compose +tendermint nodes of arbitrary user-supplied components. + +Although this level of customization would provide benefits, it would be a huge +undertaking (particularly with regards to API design work) that we do not have +scope for at the moment. Eventually providing support for some kinds of +pluggability may be useful, so the current solution does not explicitly +foreclose the possibility of this alternative. + +### Abstract Dependency Based Startup and Shutdown + +The main proposal in this document makes tendermint node initialization simpler +and more abstract, but the system lacks a number of +features which daemon/service initialization could provide, such as a +system allowing the authors of services to control initialization and shutdown order +of components using dependency relationships. + +Such a system could work by allowing services to declare +initialization order dependencies to other reactors (by ID, perhaps) +so that the node could decide the initialization based on the +dependencies declared by services rather than requiring the node to +encode this logic directly. + +This level of configuration is probably more complicated than is needed. Given +that the authors of components in the current implementation of tendermint +already *do* need to know about other components, a dependency-based system +would probably be overly-abstract at this stage. + +## Decisions + +- To the greatest extent possible, factor the code base so that + packages are responsible for their own initialization, and minimize + the amount of code in the `node` package itself. + +- As a design goal, reduce direct coupling and dependencies between + components in the implementation of `node`. + +- Begin iterating on a more-flexible internal framework for + initializing tendermint nodes to make the initatilization process + less hard-coded by the implementation of the node objects. + + - Reactors should not need to expose their interfaces *within* the + implementation of the node type + + - This refactoring should be entirely opaque to users. + + - These node initialization changes should not require a + reevaluation of the `service.Service` or a generic initialization + orchestration framework. + +- Do not proactively provide a system for injecting + components/services within a tendtermint node, though make it + possible to retrofit this kind of plugability in the future if + needed. + +- Prioritize implementation of p2p-based statesync reactor to obviate + need for users to inject a custom state-sync provider. + +## Detailed Design + +The [current +nodeImpl](https://github.com/tendermint/tendermint/blob/master/node/node.go#L47) +includes direct references to the implementations of each of the +reactors, which should be replaced by references to `service.Service` +objects. This will require moving construction of the [rpc +service](https://github.com/tendermint/tendermint/blob/master/node/node.go#L771) +into the constructor of +[makeNode](https://github.com/tendermint/tendermint/blob/master/node/node.go#L126). One +possible implementation of this would be to eliminate the current +`ConfigureRPC` method on the node package and instead [configure it +here](https://github.com/tendermint/tendermint/pull/6798/files#diff-375d57e386f20eaa5f09f02bb9d28bfc48ac3dca18d0325f59492208219e5618R441). + +To avoid adding complexity to the `node` package, we will add a +composite service implementation to the `service` package +that implements `service.Service` and is composed of a sequence of +underlying `service.Service` objects and handles their +startup/shutdown in the specified sequential order. + +Consensus, blocksync (*née* fast sync), and statesync all depend on +each other, and have significant initialization dependencies that are +presently encoded in the `node` package. As part of this change, a +new package/component (likely named `blocks` located at +`internal/blocks`) will encapsulate the initialization of these block +management areas of the code. + +### Injectable Component Option + +This section briefly describes a possible implementation for +user-supplied services running within a node. This should not be +implemented unless user-supplied components are a hard requirement for +a user. + +In order to allow components to be replaced, a new public function +will be added to the public interface of `node` with a signature that +resembles the following: + +```go +func NewWithServices(conf *config.Config, + logger log.Logger, + cf proxy.ClientCreator, + gen *types.GenesisDoc, + srvs []service.Service, +) (service.Service, error) { +``` + +The `service.Service` objects will be initialized in the order supplied, after +all pre-configured/default services have started (and shut down in reverse +order). The given services may implement additional interfaces, allowing them +to replace specific default services. `NewWithServices` will validate input +service lists with the following rules: + +- None of the services may already be running. +- The caller may not supply more than one replacement reactor for a given + default service type. + +If callers violate any of these rules, `NewWithServices` will return +an error. To retract support for this kind of operation in the future, +the function can be modified to *always* return an error. + +## Consequences + +### Positive + +- The node package will become easier to maintain. + +- It will become easier to add additional services within tendermint + nodes. + +- It will become possible to replace default components in the node + package without vendoring the tendermint repo and modifying internal + code. + +- The current end-to-end (e2e) test suite will be able to prevent any + regressions, and the new functionality can be thoroughly unit tested. + +- The scope of this project is very narrow, which minimizes risk. + +### Negative + +- This increases our reliance on the `service.Service` interface which + is probably not an interface that we want to fully commit to. + +- This proposal implements a fairly minimal set of functionality and + leaves open the possibility for many additional features which are + not included in the scope of this proposal. + +### Neutral + +N/A + +## Open Questions + +- To what extent does this new initialization framework need to accommodate + the legacy p2p stack? Would it be possible to delay a great deal of this + work to the 0.36 cycle to avoid this complexity? + + - Answer: _depends on timing_, and the requirement to ship pluggable reactors in 0.35. + +- Where should additional public types be exported for the 0.35 + release? + + Related to the general project of API stabilization we want to deprecate + the `types` package, and move its contents into a new `pkg` hierarchy; + however, the design of the `pkg` interface is currently underspecified. + If `types` is going to remain for the 0.35 release, then we should consider + the impact of using multiple organizing modalities for this code within a + single release. + +## Future Work + +- Improve or simplify the `service.Service` interface. There are some + pretty clear limitations with this interface as written (there's no + way to timeout slow startup or shut down, the cycle between the + `service.BaseService` and `service.Service` implementations is + troubling, the default panic in `OnReset` seems troubling.) + +- As part of the refactor of `service.Service` have all services/nodes + respect the lifetime of a `context.Context` object, and avoid the + current practice of creating `context.Context` objects in p2p and + reactor code. This would be required for in-process multi-tenancy. + +- Support explicit dependencies between components and allow for + parallel startup, so that different reactors can startup at the same + time, where possible. + +## References + +- [this + branch](https://github.com/tendermint/tendermint/tree/tychoish/scratch-node-minimize) + contains experimental work in the implementation of the node package + to unwind some of the hard dependencies between components. + +- [the component + graph](https://peter.bourgon.org/go-for-industrial-programming/#the-component-graph) + as a framing for internal service construction. + +## Appendix + +### Dependencies + +There's a relationship between the blockchain and consensus reactor +described by the following dependency graph makes replacing some of +these components more difficult relative to other reactors or +components. + +![consensus blockchain dependency graph](./img/consensus_blockchain.png) diff --git a/docs/architecture/img/consensus_blockchain.png b/docs/architecture/img/consensus_blockchain.png new file mode 100644 index 000000000..dd0f4daa8 Binary files /dev/null and b/docs/architecture/img/consensus_blockchain.png differ diff --git a/docs/nodes/configuration.md b/docs/nodes/configuration.md index 6e2665b26..b5259f93f 100644 --- a/docs/nodes/configuration.md +++ b/docs/nodes/configuration.md @@ -36,7 +36,7 @@ proxy-app = "tcp://127.0.0.1:26658" # A custom human readable name for this node moniker = "anonymous" -# If this node is many blocks behind the tip of the chain, FastSync +# If this node is many blocks behind the tip of the chain, BlockSync # allows them to catchup quickly by downloading blocks in parallel # and verifying their commits fast-sync = true @@ -354,12 +354,12 @@ discovery-time = "15s" temp-dir = "" ####################################################### -### Fast Sync Configuration Connections ### +### BlockSync Configuration Connections ### ####################################################### [fastsync] -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation +# Block Sync version to use: +# 1) "v0" (default) - the legacy block sync implementation # 2) "v2" - complete redesign of v0, optimized for testability & readability version = "v0" diff --git a/docs/tendermint-core/README.md b/docs/tendermint-core/README.md index 666eff16d..fb359a08b 100644 --- a/docs/tendermint-core/README.md +++ b/docs/tendermint-core/README.md @@ -14,7 +14,7 @@ This section dives into the internals of Go-Tendermint. - [Subscribing to events](./subscription.md) - [Block Structure](./block-structure.md) - [RPC](./rpc.md) -- [Fast Sync](./fast-sync.md) +- [Block Sync](./block-sync.md) - [State Sync](./state-sync.md) - [Mempool](./mempool.md) - [Light Client](./light-client.md) diff --git a/docs/tendermint-core/fast-sync.md b/docs/tendermint-core/block-sync.md similarity index 79% rename from docs/tendermint-core/fast-sync.md rename to docs/tendermint-core/block-sync.md index afc668277..9d362424f 100644 --- a/docs/tendermint-core/fast-sync.md +++ b/docs/tendermint-core/block-sync.md @@ -2,7 +2,8 @@ order: 10 --- -# Fast Sync +# Block Sync +*Formerly known as Fast Sync* In a proof of work blockchain, syncing with the chain is the same process as staying up-to-date with the consensus: download blocks, and @@ -14,7 +15,7 @@ scratch can take a very long time. It's much faster to just download blocks and check the merkle tree of validators than to run the real-time consensus gossip protocol. -## Using Fast Sync +## Using Block Sync To support faster syncing, Tendermint offers a `fast-sync` mode, which is enabled by default, and can be toggled in the `config.toml` or via @@ -22,36 +23,36 @@ is enabled by default, and can be toggled in the `config.toml` or via In this mode, the Tendermint daemon will sync hundreds of times faster than if it used the real-time consensus process. Once caught up, the -daemon will switch out of fast sync and into the normal consensus mode. +daemon will switch out of Block Sync and into the normal consensus mode. After running for some time, the node is considered `caught up` if it has at least one peer and it's height is at least as high as the max reported peer height. See [the IsCaughtUp method](https://github.com/tendermint/tendermint/blob/b467515719e686e4678e6da4e102f32a491b85a0/blockchain/pool.go#L128). -Note: There are three versions of fast sync. We recommend using v0 as v2 is still in beta. +Note: There are two versions of Block Sync. We recommend using v0 as v2 is still in beta. If you would like to use a different version you can do so by changing the version in the `config.toml`: ```toml ####################################################### -### Fast Sync Configuration Connections ### +### Block Sync Configuration Connections ### ####################################################### [fastsync] -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation +# Block Sync version to use: +# 1) "v0" (default) - the legacy Block Sync implementation # 2) "v2" - complete redesign of v0, optimized for testability & readability version = "v0" ``` -If we're lagging sufficiently, we should go back to fast syncing, but +If we're lagging sufficiently, we should go back to block syncing, but this is an [open issue](https://github.com/tendermint/tendermint/issues/129). -## The Fast Sync event -When the tendermint blockchain core launches, it might switch to the `fast-sync` +## The Block Sync event +When the tendermint blockchain core launches, it might switch to the `block-sync` mode to catch up the states to the current network best height. the core will emits a fast-sync event to expose the current status and the sync height. Once it catched the network best height, it will switches to the state sync mechanism and then emit another event for exposing the fast-sync `complete` status and the state `height`. -The user can query the events by subscribing `EventQueryFastSyncStatus` +The user can query the events by subscribing `EventQueryBlockSyncStatus` Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details. \ No newline at end of file diff --git a/docs/tendermint-core/state-sync.md b/docs/tendermint-core/state-sync.md index 623de4953..52286e6c7 100644 --- a/docs/tendermint-core/state-sync.md +++ b/docs/tendermint-core/state-sync.md @@ -4,7 +4,7 @@ order: 11 # State Sync -With fast sync a node is downloading all of the data of an application from genesis and verifying it. +With block sync a node is downloading all of the data of an application from genesis and verifying it. With state sync your node will download data related to the head or near the head of the chain and verify the data. This leads to drastically shorter times for joining a network. diff --git a/go.mod b/go.mod index a9a9a5370..a23bc5167 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/tendermint/tendermint go 1.16 require ( - github.com/BurntSushi/toml v0.3.1 + github.com/BurntSushi/toml v0.4.1 github.com/Masterminds/squirrel v1.5.0 github.com/Workiva/go-datastructures v1.0.53 github.com/adlio/schema v1.1.13 @@ -34,8 +34,10 @@ require ( github.com/spf13/viper v1.8.1 github.com/stretchr/testify v1.7.0 github.com/tendermint/tm-db v0.6.4 + github.com/vektra/mockery/v2 v2.9.0 golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 - google.golang.org/grpc v1.39.0 + google.golang.org/grpc v1.39.1 gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect + pgregory.net/rapid v0.4.7 ) diff --git a/go.sum b/go.sum index 29f64f4d3..3e20b60ab 100644 --- a/go.sum +++ b/go.sum @@ -46,8 +46,9 @@ contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EU dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= @@ -408,6 +409,7 @@ github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51 github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -762,6 +764,7 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.8.0 h1:P2KMzcFwrPoSjkF1WLRPsp3UMLyql8L4v9hQpVeK5so= github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.18.0/go.mod h1:9nvC1axdVrAHcu/s9taAVfBuIdTZLVQmKQyvrUjF5+I= github.com/rs/zerolog v1.23.0 h1:UskrK+saS9P9Y789yNNulYKdARjPZuS35B8gJF2x60g= github.com/rs/zerolog v1.23.0/go.mod h1:6c7hFfxPOy7TacJc4Fcdi24/J0NKYGzjG8FWRI916Qo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -815,6 +818,7 @@ github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= @@ -826,6 +830,7 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44= @@ -873,6 +878,7 @@ github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoi github.com/tommy-muehle/go-mnd/v2 v2.4.0 h1:1t0f8Uiaq+fqKteUR4N9Umr6E99R+lDnLnq7PwX2PPE= github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= @@ -888,6 +894,8 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/vektra/mockery/v2 v2.9.0 h1:+3FhCL3EviR779mTzXwUuhPNnqFUA7sDnt9OFkXaFd4= +github.com/vektra/mockery/v2 v2.9.0/go.mod h1:2gU4Cf/f8YyC8oEaSXfCnZBMxMjMl/Ko205rlP0fO90= github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= @@ -902,6 +910,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= @@ -1002,6 +1011,7 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1135,6 +1145,7 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1171,6 +1182,7 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1200,6 +1212,7 @@ golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200323144430-8dcfad9e016e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= @@ -1333,6 +1346,7 @@ google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxH google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -1354,8 +1368,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1 h1:f37vZbBVTiJ6jKG5mWz8ySOBxNqy6ViPgyhSdVnxF3E= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1424,6 +1438,8 @@ mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphD mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7 h1:HT3e4Krq+IE44tiN36RvVEb6tvqeIdtsVSsxmNPqlFU= mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= +pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= +pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/internal/blockchain/doc.go b/internal/blocksync/doc.go similarity index 73% rename from internal/blockchain/doc.go rename to internal/blocksync/doc.go index f1db243c6..3111130e4 100644 --- a/internal/blockchain/doc.go +++ b/internal/blocksync/doc.go @@ -1,19 +1,19 @@ /* -Package blockchain implements two versions of a reactor Service that are -responsible for block propagation and gossip between peers. This mechanism is -more formally known as fast-sync. +Package blocksync implements two versions of a reactor Service that are +responsible for block propagation and gossip between peers. This mechanism was +formerly known as fast-sync. In order for a full node to successfully participate in consensus, it must have -the latest view of state. The fast-sync protocol is a mechanism in which peers +the latest view of state. The blocksync protocol is a mechanism in which peers may exchange and gossip entire blocks with one another, in a request/response type model, until they've successfully synced to the latest head block. Once succussfully synced, the full node can switch to an active role in consensus and -will no longer fast-sync and thus no longer run the fast-sync process. +will no longer blocksync and thus no longer run the blocksync process. -Note, the blockchain reactor Service gossips entire block and relevant data such -that each receiving peer may construct the entire view of the blockchain state. +Note, the blocksync reactor Service gossips entire block and relevant data such +that each receiving peer may construct the entire view of the blocksync state. -There are two versions of the blockchain reactor Service, i.e. fast-sync: +There are currently two versions of the blocksync reactor Service: - v0: The initial implementation that is battle-tested, but whose test coverage is lacking and is not formally verifiable. @@ -22,7 +22,7 @@ There are two versions of the blockchain reactor Service, i.e. fast-sync: is known to have various bugs that could make it unreliable in production environments. -The v0 blockchain reactor Service has one p2p channel, BlockchainChannel. This +The v0 blocksync reactor Service has one p2p channel, BlockchainChannel. This channel is responsible for handling messages that both request blocks and respond to block requests from peers. For every block request from a peer, the reactor will execute respondToPeer which will fetch the block from the node's state store @@ -33,4 +33,4 @@ Internally, v0 runs a poolRoutine that constantly checks for what blocks it need and requests them. The poolRoutine is also responsible for taking blocks from the pool, saving and executing each block. */ -package blockchain +package blocksync diff --git a/internal/blockchain/msgs.go b/internal/blocksync/msgs.go similarity index 90% rename from internal/blockchain/msgs.go rename to internal/blocksync/msgs.go index e901ae1e3..caad44b7b 100644 --- a/internal/blockchain/msgs.go +++ b/internal/blocksync/msgs.go @@ -1,7 +1,7 @@ -package blockchain +package blocksync import ( - bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain" + bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" "github.com/tendermint/tendermint/types" ) diff --git a/internal/blockchain/v0/pool.go b/internal/blocksync/v0/pool.go similarity index 99% rename from internal/blockchain/v0/pool.go rename to internal/blocksync/v0/pool.go index c9c4030a8..b3704f333 100644 --- a/internal/blockchain/v0/pool.go +++ b/internal/blocksync/v0/pool.go @@ -65,7 +65,7 @@ type BlockRequest struct { PeerID types.NodeID } -// BlockPool keeps track of the fast sync peers, block requests and block responses. +// BlockPool keeps track of the block sync peers, block requests and block responses. type BlockPool struct { service.BaseService lastAdvance time.Time diff --git a/internal/blockchain/v0/pool_test.go b/internal/blocksync/v0/pool_test.go similarity index 100% rename from internal/blockchain/v0/pool_test.go rename to internal/blocksync/v0/pool_test.go diff --git a/internal/blockchain/v0/reactor.go b/internal/blocksync/v0/reactor.go similarity index 96% rename from internal/blockchain/v0/reactor.go rename to internal/blocksync/v0/reactor.go index 52a17b693..c43959808 100644 --- a/internal/blockchain/v0/reactor.go +++ b/internal/blocksync/v0/reactor.go @@ -6,13 +6,13 @@ import ( "sync" "time" - bc "github.com/tendermint/tendermint/internal/blockchain" + bc "github.com/tendermint/tendermint/internal/blocksync" cons "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" tmSync "github.com/tendermint/tendermint/libs/sync" - bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain" + bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" @@ -60,7 +60,7 @@ const ( ) type consensusReactor interface { - // For when we switch from blockchain reactor and fast sync to the consensus + // For when we switch from blockchain reactor and block sync to the consensus // machine. SwitchToConsensus(state sm.State, skipWAL bool) } @@ -85,7 +85,7 @@ type Reactor struct { store *store.BlockStore pool *BlockPool consReactor consensusReactor - fastSync *tmSync.AtomicBool + blockSync *tmSync.AtomicBool blockchainCh *p2p.Channel // blockchainOutBridgeCh defines a channel that acts as a bridge between sending Envelope @@ -121,7 +121,7 @@ func NewReactor( consReactor consensusReactor, blockchainCh *p2p.Channel, peerUpdates *p2p.PeerUpdates, - fastSync bool, + blockSync bool, metrics *cons.Metrics, ) (*Reactor, error) { if state.LastBlockHeight != store.Height() { @@ -142,7 +142,7 @@ func NewReactor( store: store, pool: NewBlockPool(startHeight, requestsCh, errorsCh), consReactor: consReactor, - fastSync: tmSync.NewBool(fastSync), + blockSync: tmSync.NewBool(blockSync), requestsCh: requestsCh, errorsCh: errorsCh, blockchainCh: blockchainCh, @@ -162,10 +162,10 @@ func NewReactor( // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. // -// If fastSync is enabled, we also start the pool and the pool processing +// If blockSync is enabled, we also start the pool and the pool processing // goroutine. If the pool fails to start, an error is returned. func (r *Reactor) OnStart() error { - if r.fastSync.IsSet() { + if r.blockSync.IsSet() { if err := r.pool.Start(); err != nil { return err } @@ -183,7 +183,7 @@ func (r *Reactor) OnStart() error { // OnStop stops the reactor by signaling to all spawned goroutines to exit and // blocking until they all exit. func (r *Reactor) OnStop() { - if r.fastSync.IsSet() { + if r.blockSync.IsSet() { if err := r.pool.Stop(); err != nil { r.Logger.Error("failed to stop pool", "err", err) } @@ -371,10 +371,10 @@ func (r *Reactor) processPeerUpdates() { } } -// SwitchToFastSync is called by the state sync reactor when switching to fast +// SwitchToBlockSync is called by the state sync reactor when switching to fast // sync. -func (r *Reactor) SwitchToFastSync(state sm.State) error { - r.fastSync.Set() +func (r *Reactor) SwitchToBlockSync(state sm.State) error { + r.blockSync.Set() r.initialState = state r.pool.height = state.LastBlockHeight + 1 @@ -496,7 +496,7 @@ FOR_LOOP: r.Logger.Error("failed to stop pool", "err", err) } - r.fastSync.UnSet() + r.blockSync.UnSet() if r.consReactor != nil { r.consReactor.SwitchToConsensus(state, blocksSynced > 0 || stateSynced) @@ -591,7 +591,7 @@ FOR_LOOP: if blocksSynced%100 == 0 { lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds()) r.Logger.Info( - "fast sync rate", + "block sync rate", "height", r.pool.height, "max_peer_height", r.pool.MaxPeerHeight(), "blocks/s", lastRate, @@ -614,14 +614,14 @@ func (r *Reactor) GetMaxPeerBlockHeight() int64 { } func (r *Reactor) GetTotalSyncedTime() time.Duration { - if !r.fastSync.IsSet() || r.syncStartTime.IsZero() { + if !r.blockSync.IsSet() || r.syncStartTime.IsZero() { return time.Duration(0) } return time.Since(r.syncStartTime) } func (r *Reactor) GetRemainingSyncTime() time.Duration { - if !r.fastSync.IsSet() { + if !r.blockSync.IsSet() { return time.Duration(0) } diff --git a/internal/blockchain/v0/reactor_test.go b/internal/blocksync/v0/reactor_test.go similarity index 99% rename from internal/blockchain/v0/reactor_test.go rename to internal/blocksync/v0/reactor_test.go index 1c9dc60c4..e038b57af 100644 --- a/internal/blockchain/v0/reactor_test.go +++ b/internal/blocksync/v0/reactor_test.go @@ -15,7 +15,7 @@ import ( "github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" - bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain" + bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" sf "github.com/tendermint/tendermint/state/test/factory" @@ -36,7 +36,7 @@ type reactorTestSuite struct { peerChans map[types.NodeID]chan p2p.PeerUpdate peerUpdates map[types.NodeID]*p2p.PeerUpdates - fastSync bool + blockSync bool } func setup( @@ -61,7 +61,7 @@ func setup( blockchainChannels: make(map[types.NodeID]*p2p.Channel, numNodes), peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), - fastSync: true, + blockSync: true, } chDesc := p2p.ChannelDescriptor{ID: byte(BlockchainChannel)} @@ -163,7 +163,7 @@ func (rts *reactorTestSuite) addNode(t *testing.T, nil, rts.blockchainChannels[nodeID], rts.peerUpdates[nodeID], - rts.fastSync, + rts.blockSync, cons.NopMetrics()) require.NoError(t, err) diff --git a/internal/blockchain/v2/internal/behavior/doc.go b/internal/blocksync/v2/internal/behavior/doc.go similarity index 100% rename from internal/blockchain/v2/internal/behavior/doc.go rename to internal/blocksync/v2/internal/behavior/doc.go diff --git a/internal/blockchain/v2/internal/behavior/peer_behaviour.go b/internal/blocksync/v2/internal/behavior/peer_behaviour.go similarity index 100% rename from internal/blockchain/v2/internal/behavior/peer_behaviour.go rename to internal/blocksync/v2/internal/behavior/peer_behaviour.go diff --git a/internal/blockchain/v2/internal/behavior/reporter.go b/internal/blocksync/v2/internal/behavior/reporter.go similarity index 100% rename from internal/blockchain/v2/internal/behavior/reporter.go rename to internal/blocksync/v2/internal/behavior/reporter.go diff --git a/internal/blockchain/v2/internal/behavior/reporter_test.go b/internal/blocksync/v2/internal/behavior/reporter_test.go similarity index 98% rename from internal/blockchain/v2/internal/behavior/reporter_test.go rename to internal/blocksync/v2/internal/behavior/reporter_test.go index a045f19aa..861a63df0 100644 --- a/internal/blockchain/v2/internal/behavior/reporter_test.go +++ b/internal/blocksync/v2/internal/behavior/reporter_test.go @@ -4,7 +4,7 @@ import ( "sync" "testing" - bh "github.com/tendermint/tendermint/internal/blockchain/v2/internal/behavior" + bh "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior" "github.com/tendermint/tendermint/types" ) diff --git a/internal/blockchain/v2/io.go b/internal/blocksync/v2/io.go similarity index 98% rename from internal/blockchain/v2/io.go rename to internal/blocksync/v2/io.go index 8d02c408e..743428516 100644 --- a/internal/blockchain/v2/io.go +++ b/internal/blocksync/v2/io.go @@ -5,7 +5,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/tendermint/tendermint/internal/p2p" - bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain" + bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -42,7 +42,7 @@ const ( ) type consensusReactor interface { - // for when we switch from blockchain reactor and fast sync to + // for when we switch from blockchain reactor and block sync to // the consensus machine SwitchToConsensus(state state.State, skipWAL bool) } diff --git a/internal/blockchain/v2/metrics.go b/internal/blocksync/v2/metrics.go similarity index 100% rename from internal/blockchain/v2/metrics.go rename to internal/blocksync/v2/metrics.go diff --git a/internal/blockchain/v2/processor.go b/internal/blocksync/v2/processor.go similarity index 100% rename from internal/blockchain/v2/processor.go rename to internal/blocksync/v2/processor.go diff --git a/internal/blockchain/v2/processor_context.go b/internal/blocksync/v2/processor_context.go similarity index 100% rename from internal/blockchain/v2/processor_context.go rename to internal/blocksync/v2/processor_context.go diff --git a/internal/blockchain/v2/processor_test.go b/internal/blocksync/v2/processor_test.go similarity index 100% rename from internal/blockchain/v2/processor_test.go rename to internal/blocksync/v2/processor_test.go diff --git a/internal/blockchain/v2/reactor.go b/internal/blocksync/v2/reactor.go similarity index 92% rename from internal/blockchain/v2/reactor.go rename to internal/blocksync/v2/reactor.go index af18f3c0a..caa5d73f0 100644 --- a/internal/blockchain/v2/reactor.go +++ b/internal/blocksync/v2/reactor.go @@ -7,14 +7,14 @@ import ( proto "github.com/gogo/protobuf/proto" - bc "github.com/tendermint/tendermint/internal/blockchain" - "github.com/tendermint/tendermint/internal/blockchain/v2/internal/behavior" + bc "github.com/tendermint/tendermint/internal/blocksync" + "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior" cons "github.com/tendermint/tendermint/internal/consensus" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/sync" - bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain" + bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -31,12 +31,12 @@ type blockStore interface { Height() int64 } -// BlockchainReactor handles fast sync protocol. +// BlockchainReactor handles block sync protocol. type BlockchainReactor struct { p2p.BaseReactor - fastSync *sync.AtomicBool // enable fast sync on start when it's been Set - stateSynced bool // set to true when SwitchToFastSync is called by state sync + blockSync *sync.AtomicBool // enable block sync on start when it's been Set + stateSynced bool // set to true when SwitchToBlockSync is called by state sync scheduler *Routine processor *Routine logger log.Logger @@ -44,7 +44,7 @@ type BlockchainReactor struct { mtx tmsync.RWMutex maxPeerHeight int64 syncHeight int64 - events chan Event // non-nil during a fast sync + events chan Event // non-nil during a block sync reporter behavior.Reporter io iIO @@ -61,7 +61,7 @@ type blockApplier interface { // XXX: unify naming in this package around tmState func newReactor(state state.State, store blockStore, reporter behavior.Reporter, - blockApplier blockApplier, fastSync bool, metrics *cons.Metrics) *BlockchainReactor { + blockApplier blockApplier, blockSync bool, metrics *cons.Metrics) *BlockchainReactor { initHeight := state.LastBlockHeight + 1 if initHeight == 1 { initHeight = state.InitialHeight @@ -78,7 +78,7 @@ func newReactor(state state.State, store blockStore, reporter behavior.Reporter, store: store, reporter: reporter, logger: log.NewNopLogger(), - fastSync: sync.NewBool(fastSync), + blockSync: sync.NewBool(blockSync), syncStartHeight: initHeight, syncStartTime: time.Time{}, lastSyncRate: 0, @@ -90,10 +90,10 @@ func NewBlockchainReactor( state state.State, blockApplier blockApplier, store blockStore, - fastSync bool, + blockSync bool, metrics *cons.Metrics) *BlockchainReactor { reporter := behavior.NewMockReporter() - return newReactor(state, store, reporter, blockApplier, fastSync, metrics) + return newReactor(state, store, reporter, blockApplier, blockSync, metrics) } // SetSwitch implements Reactor interface. @@ -137,22 +137,22 @@ func (r *BlockchainReactor) SetLogger(logger log.Logger) { // Start implements cmn.Service interface func (r *BlockchainReactor) Start() error { r.reporter = behavior.NewSwitchReporter(r.BaseReactor.Switch) - if r.fastSync.IsSet() { + if r.blockSync.IsSet() { err := r.startSync(nil) if err != nil { - return fmt.Errorf("failed to start fast sync: %w", err) + return fmt.Errorf("failed to start block sync: %w", err) } } return nil } -// startSync begins a fast sync, signaled by r.events being non-nil. If state is non-nil, +// startSync begins a block sync, signaled by r.events being non-nil. If state is non-nil, // the scheduler and processor is updated with this state on startup. func (r *BlockchainReactor) startSync(state *state.State) error { r.mtx.Lock() defer r.mtx.Unlock() if r.events != nil { - return errors.New("fast sync already in progress") + return errors.New("block sync already in progress") } r.events = make(chan Event, chBufferSize) go r.scheduler.start() @@ -167,7 +167,7 @@ func (r *BlockchainReactor) startSync(state *state.State) error { return nil } -// endSync ends a fast sync +// endSync ends a block sync func (r *BlockchainReactor) endSync() { r.mtx.Lock() defer r.mtx.Unlock() @@ -179,8 +179,8 @@ func (r *BlockchainReactor) endSync() { r.processor.stop() } -// SwitchToFastSync is called by the state sync reactor when switching to fast sync. -func (r *BlockchainReactor) SwitchToFastSync(state state.State) error { +// SwitchToBlockSync is called by the state sync reactor when switching to block sync. +func (r *BlockchainReactor) SwitchToBlockSync(state state.State) error { r.stateSynced = true state = state.Copy() @@ -434,7 +434,7 @@ func (r *BlockchainReactor) demux(events <-chan Event) { } else { r.lastSyncRate = 0.9*r.lastSyncRate + 0.1*newSyncRate } - r.logger.Info("Fast Sync Rate", "height", r.syncHeight, + r.logger.Info("block sync Rate", "height", r.syncHeight, "max_peer_height", r.maxPeerHeight, "blocks/s", r.lastSyncRate) lastHundred = time.Now() } @@ -442,12 +442,12 @@ func (r *BlockchainReactor) demux(events <-chan Event) { case pcBlockVerificationFailure: r.scheduler.send(event) case pcFinished: - r.logger.Info("Fast sync complete, switching to consensus") + r.logger.Info("block sync complete, switching to consensus") if !r.io.trySwitchToConsensus(event.tmState, event.blocksSynced > 0 || r.stateSynced) { r.logger.Error("Failed to switch to consensus reactor") } r.endSync() - r.fastSync.UnSet() + r.blockSync.UnSet() return case noOpEvent: default: @@ -617,14 +617,14 @@ func (r *BlockchainReactor) GetMaxPeerBlockHeight() int64 { } func (r *BlockchainReactor) GetTotalSyncedTime() time.Duration { - if !r.fastSync.IsSet() || r.syncStartTime.IsZero() { + if !r.blockSync.IsSet() || r.syncStartTime.IsZero() { return time.Duration(0) } return time.Since(r.syncStartTime) } func (r *BlockchainReactor) GetRemainingSyncTime() time.Duration { - if !r.fastSync.IsSet() { + if !r.blockSync.IsSet() { return time.Duration(0) } diff --git a/internal/blockchain/v2/reactor_test.go b/internal/blocksync/v2/reactor_test.go similarity index 99% rename from internal/blockchain/v2/reactor_test.go rename to internal/blocksync/v2/reactor_test.go index f4ded22da..4120b3942 100644 --- a/internal/blockchain/v2/reactor_test.go +++ b/internal/blocksync/v2/reactor_test.go @@ -15,7 +15,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/blockchain/v2/internal/behavior" + "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior" cons "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/mempool/mock" "github.com/tendermint/tendermint/internal/p2p" @@ -23,7 +23,7 @@ import ( "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain" + bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" sf "github.com/tendermint/tendermint/state/test/factory" diff --git a/internal/blockchain/v2/routine.go b/internal/blocksync/v2/routine.go similarity index 100% rename from internal/blockchain/v2/routine.go rename to internal/blocksync/v2/routine.go diff --git a/internal/blockchain/v2/routine_test.go b/internal/blocksync/v2/routine_test.go similarity index 100% rename from internal/blockchain/v2/routine_test.go rename to internal/blocksync/v2/routine_test.go diff --git a/internal/blockchain/v2/scheduler.go b/internal/blocksync/v2/scheduler.go similarity index 99% rename from internal/blockchain/v2/scheduler.go rename to internal/blocksync/v2/scheduler.go index f884a66ed..b731d96a4 100644 --- a/internal/blockchain/v2/scheduler.go +++ b/internal/blocksync/v2/scheduler.go @@ -163,7 +163,7 @@ type scheduler struct { height int64 // lastAdvance tracks the last time a block execution happened. - // syncTimeout is the maximum time the scheduler waits to advance in the fast sync process before finishing. + // syncTimeout is the maximum time the scheduler waits to advance in the block sync process before finishing. // This covers the cases where there are no peers or all peers have a lower height. lastAdvance time.Time syncTimeout time.Duration diff --git a/internal/blockchain/v2/scheduler_test.go b/internal/blocksync/v2/scheduler_test.go similarity index 100% rename from internal/blockchain/v2/scheduler_test.go rename to internal/blocksync/v2/scheduler_test.go diff --git a/internal/blockchain/v2/types.go b/internal/blocksync/v2/types.go similarity index 100% rename from internal/blockchain/v2/types.go rename to internal/blocksync/v2/types.go diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index 26170f3bc..17ba1ce2e 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -88,6 +88,7 @@ type validatorStub struct { Round int32 types.PrivValidator VotingPower int64 + lastVote *types.Vote } const testMinPower int64 = 10 @@ -121,8 +122,18 @@ func (vs *validatorStub) signVote( BlockID: types.BlockID{Hash: hash, PartSetHeader: header}, } v := vote.ToProto() - err = vs.PrivValidator.SignVote(context.Background(), config.ChainID(), v) + if err := vs.PrivValidator.SignVote(context.Background(), config.ChainID(), v); err != nil { + return nil, fmt.Errorf("sign vote failed: %w", err) + } + + // ref: signVote in FilePV, the vote should use the privious vote info when the sign data is the same. + if signDataIsEqual(vs.lastVote, v) { + v.Signature = vs.lastVote.Signature + v.Timestamp = vs.lastVote.Timestamp + } + vote.Signature = v.Signature + vote.Timestamp = v.Timestamp return vote, err } @@ -139,6 +150,9 @@ func signVote( if err != nil { panic(fmt.Errorf("failed to sign vote: %v", err)) } + + vs.lastVote = v + return v } @@ -876,3 +890,16 @@ func newKVStore() abci.Application { func newPersistentKVStoreWithPath(dbDir string) abci.Application { return kvstore.NewPersistentKVStoreApplication(dbDir) } + +func signDataIsEqual(v1 *types.Vote, v2 *tmproto.Vote) bool { + if v1 == nil || v2 == nil { + return false + } + + return v1.Type == v2.Type && + bytes.Equal(v1.BlockID.Hash, v2.BlockID.GetHash()) && + v1.Height == v2.GetHeight() && + v1.Round == v2.Round && + bytes.Equal(v1.ValidatorAddress.Bytes(), v2.GetValidatorAddress()) && + v1.ValidatorIndex == v2.GetValidatorIndex() +} diff --git a/internal/consensus/metrics.go b/internal/consensus/metrics.go index 5b4c47502..bceac4942 100644 --- a/internal/consensus/metrics.go +++ b/internal/consensus/metrics.go @@ -54,8 +54,8 @@ type Metrics struct { TotalTxs metrics.Gauge // The latest block height. CommittedHeight metrics.Gauge - // Whether or not a node is fast syncing. 1 if yes, 0 if no. - FastSyncing metrics.Gauge + // Whether or not a node is block syncing. 1 if yes, 0 if no. + BlockSyncing metrics.Gauge // Whether or not a node is state syncing. 1 if yes, 0 if no. StateSyncing metrics.Gauge @@ -169,11 +169,11 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "latest_block_height", Help: "The latest block height.", }, labels).With(labelsAndValues...), - FastSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + BlockSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, - Name: "fast_syncing", - Help: "Whether or not a node is fast syncing. 1 if yes, 0 if no.", + Name: "block_syncing", + Help: "Whether or not a node is block syncing. 1 if yes, 0 if no.", }, labels).With(labelsAndValues...), StateSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, @@ -214,7 +214,7 @@ func NopMetrics() *Metrics { BlockSizeBytes: discard.NewHistogram(), TotalTxs: discard.NewGauge(), CommittedHeight: discard.NewGauge(), - FastSyncing: discard.NewGauge(), + BlockSyncing: discard.NewGauge(), StateSyncing: discard.NewGauge(), BlockParts: discard.NewCounter(), } diff --git a/internal/consensus/mocks/cons_sync_reactor.go b/internal/consensus/mocks/cons_sync_reactor.go index fcaa5696d..263969798 100644 --- a/internal/consensus/mocks/cons_sync_reactor.go +++ b/internal/consensus/mocks/cons_sync_reactor.go @@ -1,4 +1,4 @@ -// Code generated by mockery 2.7.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -12,8 +12,8 @@ type ConsSyncReactor struct { mock.Mock } -// SetFastSyncingMetrics provides a mock function with given fields: _a0 -func (_m *ConsSyncReactor) SetFastSyncingMetrics(_a0 float64) { +// SetBlockSyncingMetrics provides a mock function with given fields: _a0 +func (_m *ConsSyncReactor) SetBlockSyncingMetrics(_a0 float64) { _m.Called(_a0) } diff --git a/internal/consensus/mocks/fast_sync_reactor.go b/internal/consensus/mocks/fast_sync_reactor.go index f6a7fabbc..b7f521ff2 100644 --- a/internal/consensus/mocks/fast_sync_reactor.go +++ b/internal/consensus/mocks/fast_sync_reactor.go @@ -9,13 +9,13 @@ import ( time "time" ) -// FastSyncReactor is an autogenerated mock type for the FastSyncReactor type -type FastSyncReactor struct { +// BlockSyncReactor is an autogenerated mock type for the BlockSyncReactor type +type BlockSyncReactor struct { mock.Mock } // GetMaxPeerBlockHeight provides a mock function with given fields: -func (_m *FastSyncReactor) GetMaxPeerBlockHeight() int64 { +func (_m *BlockSyncReactor) GetMaxPeerBlockHeight() int64 { ret := _m.Called() var r0 int64 @@ -29,7 +29,7 @@ func (_m *FastSyncReactor) GetMaxPeerBlockHeight() int64 { } // GetRemainingSyncTime provides a mock function with given fields: -func (_m *FastSyncReactor) GetRemainingSyncTime() time.Duration { +func (_m *BlockSyncReactor) GetRemainingSyncTime() time.Duration { ret := _m.Called() var r0 time.Duration @@ -43,7 +43,7 @@ func (_m *FastSyncReactor) GetRemainingSyncTime() time.Duration { } // GetTotalSyncedTime provides a mock function with given fields: -func (_m *FastSyncReactor) GetTotalSyncedTime() time.Duration { +func (_m *BlockSyncReactor) GetTotalSyncedTime() time.Duration { ret := _m.Called() var r0 time.Duration @@ -56,8 +56,8 @@ func (_m *FastSyncReactor) GetTotalSyncedTime() time.Duration { return r0 } -// SwitchToFastSync provides a mock function with given fields: _a0 -func (_m *FastSyncReactor) SwitchToFastSync(_a0 state.State) error { +// SwitchToBlockSync provides a mock function with given fields: _a0 +func (_m *BlockSyncReactor) SwitchToBlockSync(_a0 state.State) error { ret := _m.Called(_a0) var r0 error diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index e01a6f329..2b9fa7358 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -96,28 +96,28 @@ const ( type ReactorOption func(*Reactor) -// Temporary interface for switching to fast sync, we should get rid of v0. +// NOTE: Temporary interface for switching to block sync, we should get rid of v0. // See: https://github.com/tendermint/tendermint/issues/4595 -type FastSyncReactor interface { - SwitchToFastSync(sm.State) error +type BlockSyncReactor interface { + SwitchToBlockSync(sm.State) error GetMaxPeerBlockHeight() int64 - // GetTotalSyncedTime returns the time duration since the fastsync starting. + // GetTotalSyncedTime returns the time duration since the blocksync starting. GetTotalSyncedTime() time.Duration // GetRemainingSyncTime returns the estimating time the node will be fully synced, - // if will return 0 if the fastsync does not perform or the number of block synced is + // if will return 0 if the blocksync does not perform or the number of block synced is // too small (less than 100). GetRemainingSyncTime() time.Duration } -//go:generate mockery --case underscore --name ConsSyncReactor +//go:generate ../../scripts/mockery_generate.sh ConsSyncReactor // ConsSyncReactor defines an interface used for testing abilities of node.startStateSync. type ConsSyncReactor interface { SwitchToConsensus(sm.State, bool) SetStateSyncingMetrics(float64) - SetFastSyncingMetrics(float64) + SetBlockSyncingMetrics(float64) } // Reactor defines a reactor for the consensus service. @@ -265,7 +265,7 @@ func (r *Reactor) SetEventBus(b *types.EventBus) { r.state.SetEventBus(b) } -// WaitSync returns whether the consensus reactor is waiting for state/fast sync. +// WaitSync returns whether the consensus reactor is waiting for state/block sync. func (r *Reactor) WaitSync() bool { r.mtx.RLock() defer r.mtx.RUnlock() @@ -278,8 +278,8 @@ func ReactorMetrics(metrics *Metrics) ReactorOption { return func(r *Reactor) { r.Metrics = metrics } } -// SwitchToConsensus switches from fast-sync mode to consensus mode. It resets -// the state, turns off fast-sync, and starts the consensus state-machine. +// SwitchToConsensus switches from block-sync mode to consensus mode. It resets +// the state, turns off block-sync, and starts the consensus state-machine. func (r *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) { r.Logger.Info("switching to consensus") @@ -296,7 +296,7 @@ func (r *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) { r.waitSync = false r.mtx.Unlock() - r.Metrics.FastSyncing.Set(0) + r.Metrics.BlockSyncing.Set(0) r.Metrics.StateSyncing.Set(0) if skipWAL { @@ -313,9 +313,9 @@ conR: %+v`, err, r.state, r)) } - d := types.EventDataFastSyncStatus{Complete: true, Height: state.LastBlockHeight} - if err := r.eventBus.PublishEventFastSyncStatus(d); err != nil { - r.Logger.Error("failed to emit the fastsync complete event", "err", err) + d := types.EventDataBlockSyncStatus{Complete: true, Height: state.LastBlockHeight} + if err := r.eventBus.PublishEventBlockSyncStatus(d); err != nil { + r.Logger.Error("failed to emit the blocksync complete event", "err", err) } } @@ -969,7 +969,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { go r.gossipVotesRoutine(ps) go r.queryMaj23Routine(ps) - // Send our state to the peer. If we're fast-syncing, broadcast a + // Send our state to the peer. If we're block-syncing, broadcast a // RoundStepMessage later upon SwitchToConsensus(). if !r.waitSync { go r.sendNewRoundStepMessage(ps.peerID) @@ -1219,7 +1219,7 @@ func (r *Reactor) handleVoteSetBitsMessage(envelope p2p.Envelope, msgI Message) // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. // -// NOTE: We process these messages even when we're fast_syncing. Messages affect +// NOTE: We process these messages even when we're block syncing. Messages affect // either a peer state or the consensus state. Peer state updates can happen in // parallel, but processing of proposals, block parts, and votes are ordered by // the p2p channel. @@ -1442,6 +1442,6 @@ func (r *Reactor) SetStateSyncingMetrics(v float64) { r.Metrics.StateSyncing.Set(v) } -func (r *Reactor) SetFastSyncingMetrics(v float64) { - r.Metrics.FastSyncing.Set(v) +func (r *Reactor) SetBlockSyncingMetrics(v float64) { + r.Metrics.BlockSyncing.Set(v) } diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index 900abc0ff..8c70ca1d5 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -43,7 +43,7 @@ type reactorTestSuite struct { states map[types.NodeID]*State reactors map[types.NodeID]*Reactor subs map[types.NodeID]types.Subscription - fastsyncSubs map[types.NodeID]types.Subscription + blocksyncSubs map[types.NodeID]types.Subscription stateChannels map[types.NodeID]*p2p.Channel dataChannels map[types.NodeID]*p2p.Channel voteChannels map[types.NodeID]*p2p.Channel @@ -60,11 +60,11 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu t.Helper() rts := &reactorTestSuite{ - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), - states: make(map[types.NodeID]*State), - reactors: make(map[types.NodeID]*Reactor, numNodes), - subs: make(map[types.NodeID]types.Subscription, numNodes), - fastsyncSubs: make(map[types.NodeID]types.Subscription, numNodes), + network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), + states: make(map[types.NodeID]*State), + reactors: make(map[types.NodeID]*Reactor, numNodes), + subs: make(map[types.NodeID]types.Subscription, numNodes), + blocksyncSubs: make(map[types.NodeID]types.Subscription, numNodes), } rts.stateChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(StateChannel), new(tmcons.Message), size) @@ -94,13 +94,13 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu blocksSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, size) require.NoError(t, err) - fsSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryFastSyncStatus, size) + fsSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryBlockSyncStatus, size) require.NoError(t, err) rts.states[nodeID] = state rts.subs[nodeID] = blocksSub rts.reactors[nodeID] = reactor - rts.fastsyncSubs[nodeID] = fsSub + rts.blocksyncSubs[nodeID] = fsSub // simulate handle initChain in handshake if state.state.LastBlockHeight == 0 { @@ -263,9 +263,9 @@ func waitForBlockWithUpdatedValsAndValidateIt( wg.Wait() } -func ensureFastSyncStatus(t *testing.T, msg tmpubsub.Message, complete bool, height int64) { +func ensureBlockSyncStatus(t *testing.T, msg tmpubsub.Message, complete bool, height int64) { t.Helper() - status, ok := msg.Data().(types.EventDataFastSyncStatus) + status, ok := msg.Data().(types.EventDataBlockSyncStatus) require.True(t, ok) require.Equal(t, complete, status.Complete) @@ -301,14 +301,14 @@ func TestReactorBasic(t *testing.T) { wg.Wait() - for _, sub := range rts.fastsyncSubs { + for _, sub := range rts.blocksyncSubs { wg.Add(1) // wait till everyone makes the consensus switch go func(s types.Subscription) { defer wg.Done() msg := <-s.Out() - ensureFastSyncStatus(t, msg, true, 0) + ensureBlockSyncStatus(t, msg, true, 0) }(sub) } diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index e7c480cea..4d1c9c6b2 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -1203,8 +1203,8 @@ func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSe func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit { return bs.commits[height-1] } -func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit { - return bs.commits[height-1] +func (bs *mockBlockStore) LoadSeenCommit() *types.Commit { + return bs.commits[len(bs.commits)-1] } func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) { diff --git a/internal/consensus/state.go b/internal/consensus/state.go index a20f488e4..4da989b40 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -314,7 +314,14 @@ func (cs *State) LoadCommit(height int64) *types.Commit { defer cs.mtx.RUnlock() if height == cs.blockStore.Height() { - return cs.blockStore.LoadSeenCommit(height) + commit := cs.blockStore.LoadSeenCommit() + // NOTE: Retrieving the height of the most recent block and retrieving + // the most recent commit does not currently occur as an atomic + // operation. We check the height and commit here in case a more recent + // commit has arrived since retrieving the latest height. + if commit != nil && commit.Height == height { + return commit + } } return cs.blockStore.LoadBlockCommit(height) @@ -594,15 +601,19 @@ func (cs *State) sendInternalMessage(mi msgInfo) { // Reconstruct LastCommit from SeenCommit, which we saved along with the block, // (which happens even before saving the state) func (cs *State) reconstructLastCommit(state sm.State) { - seenCommit := cs.blockStore.LoadSeenCommit(state.LastBlockHeight) - if seenCommit == nil { + commit := cs.blockStore.LoadSeenCommit() + if commit == nil || commit.Height != state.LastBlockHeight { + commit = cs.blockStore.LoadBlockCommit(state.LastBlockHeight) + } + + if commit == nil { panic(fmt.Sprintf( - "failed to reconstruct last commit; seen commit for height %v not found", + "failed to reconstruct last commit; commit for height %v not found", state.LastBlockHeight, )) } - lastPrecommits := types.CommitToVoteSet(state.ChainID, seenCommit, state.LastValidators) + lastPrecommits := types.CommitToVoteSet(state.ChainID, commit, state.LastValidators) if !lastPrecommits.HasTwoThirdsMajority() { panic("failed to reconstruct last commit; does not have +2/3 maj") } @@ -2218,6 +2229,7 @@ func (cs *State) signVote( err := cs.privValidator.SignVote(ctx, cs.state.ChainID, v) vote.Signature = v.Signature + vote.Timestamp = v.Timestamp return vote, err } @@ -2317,7 +2329,7 @@ func (cs *State) checkDoubleSigningRisk(height int64) error { } for i := int64(1); i < doubleSignCheckHeight; i++ { - lastCommit := cs.blockStore.LoadSeenCommit(height - i) + lastCommit := cs.LoadCommit(height - i) if lastCommit != nil { for sigIdx, s := range lastCommit.Signatures { if s.BlockIDFlag == types.BlockIDFlagCommit && bytes.Equal(s.ValidatorAddress, valAddr) { diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index b088e2ac7..b3b7c81a3 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -1939,6 +1939,30 @@ func TestStateOutputVoteStats(t *testing.T) { } +func TestSignSameVoteTwice(t *testing.T) { + config := configSetup(t) + + _, vss := randState(config, 2) + + randBytes := tmrand.Bytes(tmhash.Size) + + vote := signVote(vss[1], + config, + tmproto.PrecommitType, + randBytes, + types.PartSetHeader{Total: 10, Hash: randBytes}, + ) + + vote2 := signVote(vss[1], + config, + tmproto.PrecommitType, + randBytes, + types.PartSetHeader{Total: 10, Hash: randBytes}, + ) + + require.Equal(t, vote, vote2) +} + // subscribe subscribes test client to the given query and returns a channel with cap = 1. func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message { sub, err := eventBus.Subscribe(context.Background(), testSubscriber, q) diff --git a/internal/evidence/services.go b/internal/evidence/services.go index 274433cbe..473999b21 100644 --- a/internal/evidence/services.go +++ b/internal/evidence/services.go @@ -4,7 +4,7 @@ import ( "github.com/tendermint/tendermint/types" ) -//go:generate mockery --case underscore --name BlockStore +//go:generate ../../scripts/mockery_generate.sh BlockStore type BlockStore interface { LoadBlockMeta(height int64) *types.BlockMeta diff --git a/internal/libs/clist/clist_property_test.go b/internal/libs/clist/clist_property_test.go new file mode 100644 index 000000000..cdc173ee5 --- /dev/null +++ b/internal/libs/clist/clist_property_test.go @@ -0,0 +1,72 @@ +package clist_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + "pgregory.net/rapid" + + "github.com/tendermint/tendermint/internal/libs/clist" +) + +func TestCListProperties(t *testing.T) { + rapid.Check(t, rapid.Run(&clistModel{})) +} + +// clistModel is used by the rapid state machine testing framework. +// clistModel contains both the clist that is being tested and a slice of *clist.CElements +// that will be used to model the expected clist behavior. +type clistModel struct { + clist *clist.CList + + model []*clist.CElement +} + +// Init is a method used by the rapid state machine testing library. +// Init is called when the test starts to initialize the data that will be used +// in the state machine test. +func (m *clistModel) Init(t *rapid.T) { + m.clist = clist.New() + m.model = []*clist.CElement{} +} + +// PushBack defines an action that will be randomly selected across by the rapid state +// machines testing library. Every call to PushBack calls PushBack on the clist and +// performs a similar action on the model data. +func (m *clistModel) PushBack(t *rapid.T) { + value := rapid.String().Draw(t, "value").(string) + el := m.clist.PushBack(value) + m.model = append(m.model, el) +} + +// Remove defines an action that will be randomly selected across by the rapid state +// machine testing library. Every call to Remove selects an element from the model +// and calls Remove on the CList with that element. The same element is removed from +// the model to keep the objects in sync. +func (m *clistModel) Remove(t *rapid.T) { + if len(m.model) == 0 { + return + } + ix := rapid.IntRange(0, len(m.model)-1).Draw(t, "index").(int) + value := m.model[ix] + m.model = append(m.model[:ix], m.model[ix+1:]...) + m.clist.Remove(value) +} + +// Check is a method required by the rapid state machine testing library. +// Check is run after each action and is used to verify that the state of the object, +// in this case a clist.CList matches the state of the objec. +func (m *clistModel) Check(t *rapid.T) { + require.Equal(t, len(m.model), m.clist.Len()) + if len(m.model) == 0 { + return + } + require.Equal(t, m.model[0], m.clist.Front()) + require.Equal(t, m.model[len(m.model)-1], m.clist.Back()) + + iter := m.clist.Front() + for _, val := range m.model { + require.Equal(t, val, iter) + iter = iter.Next() + } +} diff --git a/internal/libs/progressbar/progressbar.go b/internal/libs/progressbar/progressbar.go new file mode 100644 index 000000000..072804c76 --- /dev/null +++ b/internal/libs/progressbar/progressbar.go @@ -0,0 +1,41 @@ +package progressbar + +import "fmt" + +// the progressbar indicates the current status and progress would be desired. +// ref: https://www.pixelstech.net/article/1596946473-A-simple-example-on-implementing-progress-bar-in-GoLang + +type Bar struct { + percent int64 // progress percentage + cur int64 // current progress + start int64 // the init starting value for progress + total int64 // total value for progress + rate string // the actual progress bar to be printed + graph string // the fill value for progress bar +} + +func (bar *Bar) NewOption(start, total int64) { + bar.cur = start + bar.start = start + bar.total = total + bar.graph = "█" + bar.percent = bar.getPercent() +} + +func (bar *Bar) getPercent() int64 { + return int64(float32(bar.cur-bar.start) / float32(bar.total-bar.start) * 100) +} + +func (bar *Bar) Play(cur int64) { + bar.cur = cur + last := bar.percent + bar.percent = bar.getPercent() + if bar.percent != last && bar.percent%2 == 0 { + bar.rate += bar.graph + } + fmt.Printf("\r[%-50s]%3d%% %8d/%d", bar.rate, bar.percent, bar.cur, bar.total) +} + +func (bar *Bar) Finish() { + fmt.Println() +} diff --git a/internal/libs/progressbar/progressbar_test.go b/internal/libs/progressbar/progressbar_test.go new file mode 100644 index 000000000..d135748f6 --- /dev/null +++ b/internal/libs/progressbar/progressbar_test.go @@ -0,0 +1,41 @@ +package progressbar + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestProgressBar(t *testing.T) { + zero := int64(0) + hundred := int64(100) + + var bar Bar + bar.NewOption(zero, hundred) + + require.Equal(t, zero, bar.start) + require.Equal(t, zero, bar.cur) + require.Equal(t, hundred, bar.total) + require.Equal(t, zero, bar.percent) + require.Equal(t, "█", bar.graph) + require.Equal(t, "", bar.rate) + + defer bar.Finish() + for i := zero; i <= hundred; i++ { + time.Sleep(1 * time.Millisecond) + bar.Play(i) + } + + require.Equal(t, zero, bar.start) + require.Equal(t, hundred, bar.cur) + require.Equal(t, hundred, bar.total) + require.Equal(t, hundred, bar.percent) + + var rate string + for i := zero; i < hundred/2; i++ { + rate += "█" + } + + require.Equal(t, rate, bar.rate) +} diff --git a/internal/mempool/v1/mempool.go b/internal/mempool/v1/mempool.go index aab3020ef..850600697 100644 --- a/internal/mempool/v1/mempool.go +++ b/internal/mempool/v1/mempool.go @@ -188,8 +188,8 @@ func (txmp *TxMempool) WaitForNextTx() <-chan struct{} { // NextGossipTx returns the next valid transaction to gossip. A caller must wait // for WaitForNextTx to signal a transaction is available to gossip first. It is // thread-safe. -func (txmp *TxMempool) NextGossipTx() *WrappedTx { - return txmp.gossipIndex.Front().Value.(*WrappedTx) +func (txmp *TxMempool) NextGossipTx() *clist.CElement { + return txmp.gossipIndex.Front() } // EnableTxsAvailable enables the mempool to trigger events when transactions diff --git a/internal/mempool/v1/reactor.go b/internal/mempool/v1/reactor.go index 436dd9d27..3014e0519 100644 --- a/internal/mempool/v1/reactor.go +++ b/internal/mempool/v1/reactor.go @@ -9,6 +9,7 @@ import ( "time" cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/libs/clist" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" @@ -53,6 +54,10 @@ type Reactor struct { // goroutines. peerWG sync.WaitGroup + // observePanic is a function for observing panics that were recovered in methods on + // Reactor. observePanic is called with the recovered value. + observePanic func(interface{}) + mtx tmsync.Mutex peerRoutines map[types.NodeID]*tmsync.Closer } @@ -76,12 +81,15 @@ func NewReactor( peerUpdates: peerUpdates, closeCh: make(chan struct{}), peerRoutines: make(map[types.NodeID]*tmsync.Closer), + observePanic: defaultObservePanic, } r.BaseService = *service.NewBaseService(logger, "Mempool", r) return r } +func defaultObservePanic(r interface{}) {} + // GetChannelShims returns a map of ChannelDescriptorShim objects, where each // object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding // p2p proto.Message the new p2p Channel is responsible for handling. @@ -187,6 +195,7 @@ func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { defer func() { if e := recover(); e != nil { + r.observePanic(e) err = fmt.Errorf("panic in processing message: %v", e) r.Logger.Error( "recovering from processing message panic", @@ -306,7 +315,7 @@ func (r *Reactor) processPeerUpdates() { func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) { peerMempoolID := r.ids.GetForPeer(peerID) - var memTx *WrappedTx + var nextGossipTx *clist.CElement // remove the peer ID from the map of routines and mark the waitgroup as done defer func() { @@ -317,6 +326,7 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) r.peerWG.Done() if e := recover(); e != nil { + r.observePanic(e) r.Logger.Error( "recovering from broadcasting mempool loop", "err", e, @@ -333,10 +343,10 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) // This happens because the CElement we were looking at got garbage // collected (removed). That is, .NextWait() returned nil. Go ahead and // start from the beginning. - if memTx == nil { + if nextGossipTx == nil { select { case <-r.mempool.WaitForNextTx(): // wait until a tx is available - if memTx = r.mempool.NextGossipTx(); memTx == nil { + if nextGossipTx = r.mempool.NextGossipTx(); nextGossipTx == nil { continue } @@ -352,6 +362,8 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) } } + memTx := nextGossipTx.Value.(*WrappedTx) + if r.peerMgr != nil { height := r.peerMgr.GetHeight(peerID) if height > 0 && height < memTx.height-1 { @@ -380,16 +392,8 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) } select { - case <-memTx.gossipEl.NextWaitChan(): - // If there is a next element in gossip index, we point memTx to that node's - // value, otherwise we reset memTx to nil which will be checked at the - // parent for loop. - next := memTx.gossipEl.Next() - if next != nil { - memTx = next.Value.(*WrappedTx) - } else { - memTx = nil - } + case <-nextGossipTx.NextWaitChan(): + nextGossipTx = nextGossipTx.Next() case <-closer.Done(): // The peer is marked for removal via a PeerUpdate as the doneCh was diff --git a/internal/mempool/v1/reactor_test.go b/internal/mempool/v1/reactor_test.go new file mode 100644 index 000000000..5934d534c --- /dev/null +++ b/internal/mempool/v1/reactor_test.go @@ -0,0 +1,147 @@ +package v1 + +import ( + "os" + "strings" + "sync" + "testing" + + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/config" + tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/p2p/p2ptest" + "github.com/tendermint/tendermint/libs/log" + protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" + "github.com/tendermint/tendermint/types" +) + +type reactorTestSuite struct { + network *p2ptest.Network + logger log.Logger + + reactors map[types.NodeID]*Reactor + mempoolChannels map[types.NodeID]*p2p.Channel + mempools map[types.NodeID]*TxMempool + kvstores map[types.NodeID]*kvstore.Application + + peerChans map[types.NodeID]chan p2p.PeerUpdate + peerUpdates map[types.NodeID]*p2p.PeerUpdates + + nodes []types.NodeID +} + +func setupReactors(t *testing.T, numNodes int, chBuf uint) *reactorTestSuite { + t.Helper() + + cfg := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|")) + t.Cleanup(func() { + os.RemoveAll(cfg.RootDir) + }) + + rts := &reactorTestSuite{ + logger: log.TestingLogger().With("testCase", t.Name()), + network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), + reactors: make(map[types.NodeID]*Reactor, numNodes), + mempoolChannels: make(map[types.NodeID]*p2p.Channel, numNodes), + mempools: make(map[types.NodeID]*TxMempool, numNodes), + kvstores: make(map[types.NodeID]*kvstore.Application, numNodes), + peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), + peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), + } + + chDesc := p2p.ChannelDescriptor{ID: byte(mempool.MempoolChannel)} + rts.mempoolChannels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(protomem.Message), int(chBuf)) + + for nodeID := range rts.network.Nodes { + rts.kvstores[nodeID] = kvstore.NewApplication() + + mempool := setup(t, 0) + rts.mempools[nodeID] = mempool + + rts.peerChans[nodeID] = make(chan p2p.PeerUpdate) + rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1) + rts.network.Nodes[nodeID].PeerManager.Register(rts.peerUpdates[nodeID]) + + rts.reactors[nodeID] = NewReactor( + rts.logger.With("nodeID", nodeID), + cfg.Mempool, + rts.network.Nodes[nodeID].PeerManager, + mempool, + rts.mempoolChannels[nodeID], + rts.peerUpdates[nodeID], + ) + + rts.nodes = append(rts.nodes, nodeID) + + require.NoError(t, rts.reactors[nodeID].Start()) + require.True(t, rts.reactors[nodeID].IsRunning()) + } + + require.Len(t, rts.reactors, numNodes) + + t.Cleanup(func() { + for nodeID := range rts.reactors { + if rts.reactors[nodeID].IsRunning() { + require.NoError(t, rts.reactors[nodeID].Stop()) + require.False(t, rts.reactors[nodeID].IsRunning()) + } + } + }) + + return rts +} + +func (rts *reactorTestSuite) start(t *testing.T) { + t.Helper() + rts.network.Start(t) + require.Len(t, + rts.network.RandomNode().PeerManager.Peers(), + len(rts.nodes)-1, + "network does not have expected number of nodes") +} + +func TestReactorBroadcastDoesNotPanic(t *testing.T) { + numNodes := 2 + rts := setupReactors(t, numNodes, 0) + + observePanic := func(r interface{}) { + t.Fatal("panic detected in reactor") + } + + primary := rts.nodes[0] + secondary := rts.nodes[1] + primaryReactor := rts.reactors[primary] + primaryMempool := primaryReactor.mempool + secondaryReactor := rts.reactors[secondary] + + primaryReactor.observePanic = observePanic + secondaryReactor.observePanic = observePanic + + firstTx := &WrappedTx{} + primaryMempool.insertTx(firstTx) + + // run the router + rts.start(t) + + closer := tmsync.NewCloser() + primaryReactor.peerWG.Add(1) + go primaryReactor.broadcastTxRoutine(secondary, closer) + + wg := &sync.WaitGroup{} + for i := 0; i < 50; i++ { + next := &WrappedTx{} + wg.Add(1) + go func() { + defer wg.Done() + primaryMempool.insertTx(next) + }() + } + + err := primaryReactor.Stop() + require.NoError(t, err) + primaryReactor.peerWG.Wait() + wg.Wait() +} diff --git a/internal/p2p/mocks/connection.go b/internal/p2p/mocks/connection.go index a0d75502b..6c6174117 100644 --- a/internal/p2p/mocks/connection.go +++ b/internal/p2p/mocks/connection.go @@ -1,4 +1,4 @@ -// Code generated by mockery v0.0.0-dev. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/internal/p2p/mocks/peer.go b/internal/p2p/mocks/peer.go index 436542915..b905c1156 100644 --- a/internal/p2p/mocks/peer.go +++ b/internal/p2p/mocks/peer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v0.0.0-dev. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/internal/p2p/mocks/transport.go b/internal/p2p/mocks/transport.go index d45cab016..82bd670cb 100644 --- a/internal/p2p/mocks/transport.go +++ b/internal/p2p/mocks/transport.go @@ -1,4 +1,4 @@ -// Code generated by mockery v0.0.0-dev. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/internal/p2p/peer.go b/internal/p2p/peer.go index 7042518eb..709a1294a 100644 --- a/internal/p2p/peer.go +++ b/internal/p2p/peer.go @@ -14,7 +14,7 @@ import ( "github.com/tendermint/tendermint/types" ) -//go:generate mockery --case underscore --name Peer +//go:generate ../../scripts/mockery_generate.sh Peer const metricsTickerDuration = 10 * time.Second diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index fd6f96933..1e9afb38b 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -385,7 +385,7 @@ func (m *PeerManager) prunePeers() error { peerID := ranked[i].ID switch { case m.store.Size() <= int(m.options.MaxPeers): - break + return nil case m.dialing[peerID]: case m.connected[peerID]: default: diff --git a/internal/p2p/pqueue.go b/internal/p2p/pqueue.go index 0e62c87d9..e4560c7bd 100644 --- a/internal/p2p/pqueue.go +++ b/internal/p2p/pqueue.go @@ -257,7 +257,11 @@ func (s *pqScheduler) process() { s.metrics.PeerSendBytesTotal.With( "chID", chIDStr, "peer_id", string(pqEnv.envelope.To)).Add(float64(pqEnv.size)) - s.dequeueCh <- pqEnv.envelope + select { + case s.dequeueCh <- pqEnv.envelope: + case <-s.closer.Done(): + return + } } case <-s.closer.Done(): diff --git a/internal/p2p/pqueue_test.go b/internal/p2p/pqueue_test.go new file mode 100644 index 000000000..ddb7addbe --- /dev/null +++ b/internal/p2p/pqueue_test.go @@ -0,0 +1,39 @@ +package p2p + +import ( + "testing" + "time" + + "github.com/tendermint/tendermint/libs/log" +) + +func TestCloseWhileDequeueFull(t *testing.T) { + enqueueLength := 5 + chDescs := []ChannelDescriptor{ + {ID: 0x01, Priority: 1, MaxSendBytes: 4}, + } + pqueue := newPQScheduler(log.NewNopLogger(), NopMetrics(), chDescs, uint(enqueueLength), 1, 120) + + for i := 0; i < enqueueLength; i++ { + pqueue.enqueue() <- Envelope{ + channelID: 0x01, + Message: &testMessage{Value: "foo"}, // 5 bytes + } + } + + go pqueue.process() + + // sleep to allow context switch for process() to run + time.Sleep(10 * time.Millisecond) + doneCh := make(chan struct{}) + go func() { + pqueue.close() + close(doneCh) + }() + + select { + case <-doneCh: + case <-time.After(2 * time.Second): + t.Fatal("pqueue failed to close") + } +} diff --git a/internal/p2p/transport.go b/internal/p2p/transport.go index 320b30012..a3245dfc8 100644 --- a/internal/p2p/transport.go +++ b/internal/p2p/transport.go @@ -12,7 +12,7 @@ import ( "github.com/tendermint/tendermint/version" ) -//go:generate mockery --case underscore --name Transport|Connection +//go:generate ../../scripts/mockery_generate.sh Transport|Connection const ( // defaultProtocol is the default protocol used for NodeAddress when diff --git a/internal/statesync/mocks/state_provider.go b/internal/statesync/mocks/state_provider.go index af66fb24b..538c619fc 100644 --- a/internal/statesync/mocks/state_provider.go +++ b/internal/statesync/mocks/state_provider.go @@ -1,4 +1,4 @@ -// Code generated by mockery 2.7.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/internal/statesync/stateprovider.go b/internal/statesync/stateprovider.go index dca1432b1..fd889dc51 100644 --- a/internal/statesync/stateprovider.go +++ b/internal/statesync/stateprovider.go @@ -20,7 +20,7 @@ import ( "github.com/tendermint/tendermint/types" ) -//go:generate mockery --case underscore --name StateProvider +//go:generate ../../scripts/mockery_generate.sh StateProvider // StateProvider is a provider of trusted state data for bootstrapping a node. This refers // to the state.State object, not the state machine. diff --git a/libs/bytes/bytes.go b/libs/bytes/bytes.go index cfb7a8db2..dd8e39737 100644 --- a/libs/bytes/bytes.go +++ b/libs/bytes/bytes.go @@ -27,15 +27,22 @@ func (bz *HexBytes) Unmarshal(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaler interface. The hex bytes is a -// quoted hexadecimal encoded string. +// MarshalJSON implements the json.Marshaler interface. The encoding is a JSON +// quoted string of hexadecimal digits. func (bz HexBytes) MarshalJSON() ([]byte, error) { - s := strings.ToUpper(hex.EncodeToString(bz)) - jbz := make([]byte, len(s)+2) - jbz[0] = '"' - copy(jbz[1:], s) - jbz[len(jbz)-1] = '"' - return jbz, nil + size := hex.EncodedLen(len(bz)) + 2 // +2 for quotation marks + buf := make([]byte, size) + hex.Encode(buf[1:], []byte(bz)) + buf[0] = '"' + buf[size-1] = '"' + + // Ensure letter digits are capitalized. + for i := 1; i < size-1; i++ { + if buf[i] >= 'a' && buf[i] <= 'f' { + buf[i] = 'A' + (buf[i] - 'a') + } + } + return buf, nil } // UnmarshalJSON implements the json.Umarshaler interface. diff --git a/libs/bytes/bytes_test.go b/libs/bytes/bytes_test.go index db882f1c1..6a9ca7c3d 100644 --- a/libs/bytes/bytes_test.go +++ b/libs/bytes/bytes_test.go @@ -37,6 +37,7 @@ func TestJSONMarshal(t *testing.T) { {[]byte(``), `{"B1":"","B2":""}`}, {[]byte(`a`), `{"B1":"YQ==","B2":"61"}`}, {[]byte(`abc`), `{"B1":"YWJj","B2":"616263"}`}, + {[]byte("\x1a\x2b\x3c"), `{"B1":"Gis8","B2":"1A2B3C"}`}, } for i, tc := range cases { diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index 54a030fe8..7548470b5 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -231,34 +231,45 @@ func (s *Server) Unsubscribe(ctx context.Context, args UnsubscribeArgs) error { return err } var qs string + if args.Query != nil { qs = args.Query.String() } - s.mtx.RLock() - clientSubscriptions, ok := s.subscriptions[args.Subscriber] - if args.ID != "" { - qs, ok = clientSubscriptions[args.ID] + clientSubscriptions, err := func() (map[string]string, error) { + s.mtx.RLock() + defer s.mtx.RUnlock() - if ok && args.Query == nil { - var err error - args.Query, err = query.New(qs) - if err != nil { - return err + clientSubscriptions, ok := s.subscriptions[args.Subscriber] + if args.ID != "" { + qs, ok = clientSubscriptions[args.ID] + + if ok && args.Query == nil { + var err error + args.Query, err = query.New(qs) + if err != nil { + return nil, err + } } + } else if qs != "" { + args.ID, ok = clientSubscriptions[qs] } - } else if qs != "" { - args.ID, ok = clientSubscriptions[qs] - } - s.mtx.RUnlock() - if !ok { - return ErrSubscriptionNotFound + if !ok { + return nil, ErrSubscriptionNotFound + } + + return clientSubscriptions, nil + }() + + if err != nil { + return err } select { case s.cmds <- cmd{op: unsub, clientID: args.Subscriber, query: args.Query, subscription: &Subscription{id: args.ID}}: s.mtx.Lock() + defer s.mtx.Unlock() delete(clientSubscriptions, args.ID) delete(clientSubscriptions, qs) @@ -266,7 +277,6 @@ func (s *Server) Unsubscribe(ctx context.Context, args UnsubscribeArgs) error { if len(clientSubscriptions) == 0 { delete(s.subscriptions, args.Subscriber) } - s.mtx.Unlock() return nil case <-ctx.Done(): return ctx.Err() @@ -288,8 +298,10 @@ func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { select { case s.cmds <- cmd{op: unsub, clientID: clientID}: s.mtx.Lock() + defer s.mtx.Unlock() + delete(s.subscriptions, clientID) - s.mtx.Unlock() + return nil case <-ctx.Done(): return ctx.Err() diff --git a/light/client_benchmark_test.go b/light/client_benchmark_test.go index 72930928d..04ea6d1fc 100644 --- a/light/client_benchmark_test.go +++ b/light/client_benchmark_test.go @@ -10,8 +10,8 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/light" "github.com/tendermint/tendermint/light/provider" - mockp "github.com/tendermint/tendermint/light/provider/mock" dbs "github.com/tendermint/tendermint/light/store/db" + "github.com/tendermint/tendermint/types" ) // NOTE: block is produced every minute. Make sure the verification time @@ -21,12 +21,50 @@ import ( // or -benchtime 100x. // // Remember that none of these benchmarks account for network latency. -var ( - benchmarkFullNode = mockp.New(genMockNode(chainID, 1000, 100, 1, bTime)) - genesisBlock, _ = benchmarkFullNode.LightBlock(context.Background(), 1) -) +var () + +type providerBenchmarkImpl struct { + currentHeight int64 + blocks map[int64]*types.LightBlock +} + +func newProviderBenchmarkImpl(headers map[int64]*types.SignedHeader, + vals map[int64]*types.ValidatorSet) provider.Provider { + impl := providerBenchmarkImpl{ + blocks: make(map[int64]*types.LightBlock, len(headers)), + } + for height, header := range headers { + if height > impl.currentHeight { + impl.currentHeight = height + } + impl.blocks[height] = &types.LightBlock{ + SignedHeader: header, + ValidatorSet: vals[height], + } + } + return &impl +} + +func (impl *providerBenchmarkImpl) LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) { + if height == 0 { + return impl.blocks[impl.currentHeight], nil + } + lb, ok := impl.blocks[height] + if !ok { + return nil, provider.ErrLightBlockNotFound + } + return lb, nil +} + +func (impl *providerBenchmarkImpl) ReportEvidence(_ context.Context, _ types.Evidence) error { + panic("not implemented") +} func BenchmarkSequence(b *testing.B) { + headers, vals, _ := genLightBlocksWithKeys(chainID, 1000, 100, 1, bTime) + benchmarkFullNode := newProviderBenchmarkImpl(headers, vals) + genesisBlock, _ := benchmarkFullNode.LightBlock(context.Background(), 1) + c, err := light.NewClient( context.Background(), chainID, @@ -55,6 +93,10 @@ func BenchmarkSequence(b *testing.B) { } func BenchmarkBisection(b *testing.B) { + headers, vals, _ := genLightBlocksWithKeys(chainID, 1000, 100, 1, bTime) + benchmarkFullNode := newProviderBenchmarkImpl(headers, vals) + genesisBlock, _ := benchmarkFullNode.LightBlock(context.Background(), 1) + c, err := light.NewClient( context.Background(), chainID, @@ -82,7 +124,10 @@ func BenchmarkBisection(b *testing.B) { } func BenchmarkBackwards(b *testing.B) { + headers, vals, _ := genLightBlocksWithKeys(chainID, 1000, 100, 1, bTime) + benchmarkFullNode := newProviderBenchmarkImpl(headers, vals) trustedBlock, _ := benchmarkFullNode.LightBlock(context.Background(), 0) + c, err := light.NewClient( context.Background(), chainID, diff --git a/light/client_test.go b/light/client_test.go index 67e0525b8..e8a478a53 100644 --- a/light/client_test.go +++ b/light/client_test.go @@ -3,11 +3,13 @@ package light_test import ( "context" "errors" + "fmt" "sync" "testing" "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" @@ -16,7 +18,7 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/light" "github.com/tendermint/tendermint/light/provider" - mockp "github.com/tendermint/tendermint/light/provider/mock" + provider_mocks "github.com/tendermint/tendermint/light/provider/mocks" dbs "github.com/tendermint/tendermint/light/store/db" "github.com/tendermint/tendermint/types" ) @@ -57,14 +59,9 @@ var ( // last header (3/3 signed) 3: h3, } - l1 = &types.LightBlock{SignedHeader: h1, ValidatorSet: vals} - fullNode = mockp.New( - chainID, - headerSet, - valSet, - ) - deadNode = mockp.NewDeadMock(chainID) - largeFullNode = mockp.New(genMockNode(chainID, 10, 3, 0, bTime)) + l1 = &types.LightBlock{SignedHeader: h1, ValidatorSet: vals} + l2 = &types.LightBlock{SignedHeader: h2, ValidatorSet: vals} + l3 = &types.LightBlock{SignedHeader: h3, ValidatorSet: vals} ) func TestValidateTrustOptions(t *testing.T) { @@ -113,11 +110,6 @@ func TestValidateTrustOptions(t *testing.T) { } -func TestMock(t *testing.T) { - l, _ := fullNode.LightBlock(ctx, 3) - assert.Equal(t, int64(3), l.Height) -} - func TestClient_SequentialVerification(t *testing.T) { newKeys := genPrivKeys(4) newVals := newKeys.ToValidators(10, 1) @@ -216,28 +208,22 @@ func TestClient_SequentialVerification(t *testing.T) { } for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { + testCase := tc + t.Run(testCase.name, func(t *testing.T) { + mockNode := mockNodeFromHeadersAndVals(testCase.otherHeaders, testCase.vals) + mockNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound) c, err := light.NewClient( ctx, chainID, trustOptions, - mockp.New( - chainID, - tc.otherHeaders, - tc.vals, - ), - []provider.Provider{mockp.New( - chainID, - tc.otherHeaders, - tc.vals, - )}, + mockNode, + []provider.Provider{mockNode}, dbs.New(dbm.NewMemDB()), light.SequentialVerification(), light.Logger(log.TestingLogger()), ) - if tc.initErr { + if testCase.initErr { require.Error(t, err) return } @@ -245,11 +231,12 @@ func TestClient_SequentialVerification(t *testing.T) { require.NoError(t, err) _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(3*time.Hour)) - if tc.verifyErr { + if testCase.verifyErr { assert.Error(t, err) } else { assert.NoError(t, err) } + mockNode.AssertExpectations(t) }) } } @@ -343,20 +330,14 @@ func TestClient_SkippingVerification(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { + mockNode := mockNodeFromHeadersAndVals(tc.otherHeaders, tc.vals) + mockNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound) c, err := light.NewClient( ctx, chainID, trustOptions, - mockp.New( - chainID, - tc.otherHeaders, - tc.vals, - ), - []provider.Provider{mockp.New( - chainID, - tc.otherHeaders, - tc.vals, - )}, + mockNode, + []provider.Provider{mockNode}, dbs.New(dbm.NewMemDB()), light.SkippingVerification(light.DefaultTrustLevel), light.Logger(log.TestingLogger()), @@ -382,8 +363,23 @@ func TestClient_SkippingVerification(t *testing.T) { // start from a large light block to make sure that the pivot height doesn't select a height outside // the appropriate range func TestClientLargeBisectionVerification(t *testing.T) { - veryLargeFullNode := mockp.New(genMockNode(chainID, 100, 3, 0, bTime)) - trustedLightBlock, err := veryLargeFullNode.LightBlock(ctx, 5) + numBlocks := int64(300) + mockHeaders, mockVals, _ := genLightBlocksWithKeys(chainID, numBlocks, 101, 2, bTime) + + lastBlock := &types.LightBlock{SignedHeader: mockHeaders[numBlocks], ValidatorSet: mockVals[numBlocks]} + mockNode := &provider_mocks.Provider{} + mockNode.On("LightBlock", mock.Anything, numBlocks). + Return(lastBlock, nil) + + mockNode.On("LightBlock", mock.Anything, int64(200)). + Return(&types.LightBlock{SignedHeader: mockHeaders[200], ValidatorSet: mockVals[200]}, nil) + + mockNode.On("LightBlock", mock.Anything, int64(256)). + Return(&types.LightBlock{SignedHeader: mockHeaders[256], ValidatorSet: mockVals[256]}, nil) + + mockNode.On("LightBlock", mock.Anything, int64(0)).Return(lastBlock, nil) + + trustedLightBlock, err := mockNode.LightBlock(ctx, int64(200)) require.NoError(t, err) c, err := light.NewClient( ctx, @@ -393,20 +389,25 @@ func TestClientLargeBisectionVerification(t *testing.T) { Height: trustedLightBlock.Height, Hash: trustedLightBlock.Hash(), }, - veryLargeFullNode, - []provider.Provider{veryLargeFullNode}, + mockNode, + []provider.Provider{mockNode}, dbs.New(dbm.NewMemDB()), light.SkippingVerification(light.DefaultTrustLevel), ) require.NoError(t, err) - h, err := c.Update(ctx, bTime.Add(100*time.Minute)) + h, err := c.Update(ctx, bTime.Add(300*time.Minute)) assert.NoError(t, err) - h2, err := veryLargeFullNode.LightBlock(ctx, 100) + height, err := c.LastTrustedHeight() + require.NoError(t, err) + require.Equal(t, numBlocks, height) + h2, err := mockNode.LightBlock(ctx, numBlocks) require.NoError(t, err) assert.Equal(t, h, h2) + mockNode.AssertExpectations(t) } func TestClientBisectionBetweenTrustedHeaders(t *testing.T) { + mockFullNode := mockNodeFromHeadersAndVals(headerSet, valSet) c, err := light.NewClient( ctx, chainID, @@ -415,8 +416,8 @@ func TestClientBisectionBetweenTrustedHeaders(t *testing.T) { Height: 1, Hash: h1.Hash(), }, - fullNode, - []provider.Provider{fullNode}, + mockFullNode, + []provider.Provider{mockFullNode}, dbs.New(dbm.NewMemDB()), light.SkippingVerification(light.DefaultTrustLevel), ) @@ -432,15 +433,18 @@ func TestClientBisectionBetweenTrustedHeaders(t *testing.T) { // verify using bisection the light block between the two trusted light blocks _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour)) assert.NoError(t, err) + mockFullNode.AssertExpectations(t) } func TestClient_Cleanup(t *testing.T) { + mockFullNode := &provider_mocks.Provider{} + mockFullNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil) c, err := light.NewClient( ctx, chainID, trustOptions, - fullNode, - []provider.Provider{fullNode}, + mockFullNode, + []provider.Provider{mockFullNode}, dbs.New(dbm.NewMemDB()), light.Logger(log.TestingLogger()), ) @@ -455,12 +459,14 @@ func TestClient_Cleanup(t *testing.T) { l, err := c.TrustedLightBlock(1) assert.Error(t, err) assert.Nil(t, l) + mockFullNode.AssertExpectations(t) } // trustedHeader.Height == options.Height func TestClientRestoresTrustedHeaderAfterStartup(t *testing.T) { // 1. options.Hash == trustedHeader.Hash - { + t.Run("hashes should match", func(t *testing.T) { + mockNode := &provider_mocks.Provider{} trustedStore := dbs.New(dbm.NewMemDB()) err := trustedStore.SaveLightBlock(l1) require.NoError(t, err) @@ -469,8 +475,8 @@ func TestClientRestoresTrustedHeaderAfterStartup(t *testing.T) { ctx, chainID, trustOptions, - fullNode, - []provider.Provider{fullNode}, + mockNode, + []provider.Provider{mockNode}, trustedStore, light.Logger(log.TestingLogger()), ) @@ -481,10 +487,11 @@ func TestClientRestoresTrustedHeaderAfterStartup(t *testing.T) { assert.NotNil(t, l) assert.Equal(t, l.Hash(), h1.Hash()) assert.Equal(t, l.ValidatorSet.Hash(), h1.ValidatorsHash.Bytes()) - } + mockNode.AssertExpectations(t) + }) // 2. options.Hash != trustedHeader.Hash - { + t.Run("hashes should not match", func(t *testing.T) { trustedStore := dbs.New(dbm.NewMemDB()) err := trustedStore.SaveLightBlock(l1) require.NoError(t, err) @@ -492,15 +499,7 @@ func TestClientRestoresTrustedHeaderAfterStartup(t *testing.T) { // header1 != h1 header1 := keys.GenSignedHeader(chainID, 1, bTime.Add(1*time.Hour), nil, vals, vals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) - - primary := mockp.New( - chainID, - map[int64]*types.SignedHeader{ - // trusted header - 1: header1, - }, - valSet, - ) + mockNode := &provider_mocks.Provider{} c, err := light.NewClient( ctx, @@ -510,8 +509,8 @@ func TestClientRestoresTrustedHeaderAfterStartup(t *testing.T) { Height: 1, Hash: header1.Hash(), }, - primary, - []provider.Provider{primary}, + mockNode, + []provider.Provider{mockNode}, trustedStore, light.Logger(log.TestingLogger()), ) @@ -524,16 +523,21 @@ func TestClientRestoresTrustedHeaderAfterStartup(t *testing.T) { assert.Equal(t, l.Hash(), l1.Hash()) assert.NoError(t, l.ValidateBasic(chainID)) } - } + mockNode.AssertExpectations(t) + }) } func TestClient_Update(t *testing.T) { + mockFullNode := &provider_mocks.Provider{} + mockFullNode.On("LightBlock", mock.Anything, int64(0)).Return(l3, nil) + mockFullNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil) + mockFullNode.On("LightBlock", mock.Anything, int64(3)).Return(l3, nil) c, err := light.NewClient( ctx, chainID, trustOptions, - fullNode, - []provider.Provider{fullNode}, + mockFullNode, + []provider.Provider{mockFullNode}, dbs.New(dbm.NewMemDB()), light.Logger(log.TestingLogger()), ) @@ -546,15 +550,19 @@ func TestClient_Update(t *testing.T) { assert.EqualValues(t, 3, l.Height) assert.NoError(t, l.ValidateBasic(chainID)) } + mockFullNode.AssertExpectations(t) } func TestClient_Concurrency(t *testing.T) { + mockFullNode := &provider_mocks.Provider{} + mockFullNode.On("LightBlock", mock.Anything, int64(2)).Return(l2, nil) + mockFullNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil) c, err := light.NewClient( ctx, chainID, trustOptions, - fullNode, - []provider.Provider{fullNode}, + mockFullNode, + []provider.Provider{mockFullNode}, dbs.New(dbm.NewMemDB()), light.Logger(log.TestingLogger()), ) @@ -587,15 +595,20 @@ func TestClient_Concurrency(t *testing.T) { } wg.Wait() + mockFullNode.AssertExpectations(t) } func TestClient_AddProviders(t *testing.T) { + mockFullNode := mockNodeFromHeadersAndVals(map[int64]*types.SignedHeader{ + 1: h1, + 2: h2, + }, valSet) c, err := light.NewClient( ctx, chainID, trustOptions, - fullNode, - []provider.Provider{fullNode}, + mockFullNode, + []provider.Provider{mockFullNode}, dbs.New(dbm.NewMemDB()), light.Logger(log.TestingLogger()), ) @@ -610,22 +623,28 @@ func TestClient_AddProviders(t *testing.T) { }() // NOTE: the light client doesn't check uniqueness of providers - c.AddProvider(fullNode) + c.AddProvider(mockFullNode) require.Len(t, c.Witnesses(), 2) select { case <-closeCh: case <-time.After(5 * time.Second): t.Fatal("concurent light block verification failed to finish in 5s") } + mockFullNode.AssertExpectations(t) } func TestClientReplacesPrimaryWithWitnessIfPrimaryIsUnavailable(t *testing.T) { + mockFullNode := &provider_mocks.Provider{} + mockFullNode.On("LightBlock", mock.Anything, mock.Anything).Return(l1, nil) + + mockDeadNode := &provider_mocks.Provider{} + mockDeadNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrNoResponse) c, err := light.NewClient( ctx, chainID, trustOptions, - deadNode, - []provider.Provider{fullNode, fullNode}, + mockDeadNode, + []provider.Provider{mockFullNode, mockFullNode}, dbs.New(dbm.NewMemDB()), light.Logger(log.TestingLogger()), ) @@ -635,16 +654,25 @@ func TestClientReplacesPrimaryWithWitnessIfPrimaryIsUnavailable(t *testing.T) { require.NoError(t, err) // the primary should no longer be the deadNode - assert.NotEqual(t, c.Primary(), deadNode) + assert.NotEqual(t, c.Primary(), mockDeadNode) // we should still have the dead node as a witness because it // hasn't repeatedly been unresponsive yet assert.Equal(t, 2, len(c.Witnesses())) + mockDeadNode.AssertExpectations(t) + mockFullNode.AssertExpectations(t) } func TestClient_BackwardsVerification(t *testing.T) { { - trustHeader, _ := largeFullNode.LightBlock(ctx, 6) + headers, vals, _ := genLightBlocksWithKeys(chainID, 9, 3, 0, bTime) + delete(headers, 1) + delete(headers, 2) + delete(vals, 1) + delete(vals, 2) + mockLargeFullNode := mockNodeFromHeadersAndVals(headers, vals) + trustHeader, _ := mockLargeFullNode.LightBlock(ctx, 6) + c, err := light.NewClient( ctx, chainID, @@ -653,8 +681,8 @@ func TestClient_BackwardsVerification(t *testing.T) { Height: trustHeader.Height, Hash: trustHeader.Hash(), }, - largeFullNode, - []provider.Provider{largeFullNode}, + mockLargeFullNode, + []provider.Provider{mockLargeFullNode}, dbs.New(dbm.NewMemDB()), light.Logger(log.TestingLogger()), ) @@ -692,41 +720,36 @@ func TestClient_BackwardsVerification(t *testing.T) { // so expect error _, err = c.VerifyLightBlockAtHeight(ctx, 8, bTime.Add(12*time.Minute)) assert.Error(t, err) + mockLargeFullNode.AssertExpectations(t) } { testCases := []struct { - provider provider.Provider + headers map[int64]*types.SignedHeader + vals map[int64]*types.ValidatorSet }{ { // 7) provides incorrect height - mockp.New( - chainID, - map[int64]*types.SignedHeader{ - 1: h1, - 2: keys.GenSignedHeader(chainID, 1, bTime.Add(30*time.Minute), nil, vals, vals, - hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), - 3: h3, - }, - valSet, - ), + headers: map[int64]*types.SignedHeader{ + 2: keys.GenSignedHeader(chainID, 1, bTime.Add(30*time.Minute), nil, vals, vals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), + 3: h3, + }, + vals: valSet, }, { // 8) provides incorrect hash - mockp.New( - chainID, - map[int64]*types.SignedHeader{ - 1: h1, - 2: keys.GenSignedHeader(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, - hash("app_hash2"), hash("cons_hash23"), hash("results_hash30"), 0, len(keys)), - 3: h3, - }, - valSet, - ), + headers: map[int64]*types.SignedHeader{ + 2: keys.GenSignedHeader(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, + hash("app_hash2"), hash("cons_hash23"), hash("results_hash30"), 0, len(keys)), + 3: h3, + }, + vals: valSet, }, } for idx, tc := range testCases { + mockNode := mockNodeFromHeadersAndVals(tc.headers, tc.vals) c, err := light.NewClient( ctx, chainID, @@ -735,8 +758,8 @@ func TestClient_BackwardsVerification(t *testing.T) { Height: 3, Hash: h3.Hash(), }, - tc.provider, - []provider.Provider{tc.provider}, + mockNode, + []provider.Provider{mockNode}, dbs.New(dbm.NewMemDB()), light.Logger(log.TestingLogger()), ) @@ -744,6 +767,7 @@ func TestClient_BackwardsVerification(t *testing.T) { _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour).Add(1*time.Second)) assert.Error(t, err, idx) + mockNode.AssertExpectations(t) } } } @@ -753,60 +777,62 @@ func TestClient_NewClientFromTrustedStore(t *testing.T) { db := dbs.New(dbm.NewMemDB()) err := db.SaveLightBlock(l1) require.NoError(t, err) + mockNode := &provider_mocks.Provider{} c, err := light.NewClientFromTrustedStore( chainID, trustPeriod, - deadNode, - []provider.Provider{deadNode}, + mockNode, + []provider.Provider{mockNode}, db, ) require.NoError(t, err) - // 2) Check light block exists (deadNode is being used to ensure we're not getting - // it from primary) + // 2) Check light block exists h, err := c.TrustedLightBlock(1) assert.NoError(t, err) assert.EqualValues(t, l1.Height, h.Height) + mockNode.AssertExpectations(t) } func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { // different headers hash then primary plus less than 1/3 signed (no fork) - badProvider1 := mockp.New( - chainID, - map[int64]*types.SignedHeader{ - 1: h1, - 2: keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, - hash("app_hash2"), hash("cons_hash"), hash("results_hash"), - len(keys), len(keys), types.BlockID{Hash: h1.Hash()}), - }, - map[int64]*types.ValidatorSet{ - 1: vals, - 2: vals, - }, - ) - // header is empty - badProvider2 := mockp.New( - chainID, - map[int64]*types.SignedHeader{ - 1: h1, - 2: h2, - }, - map[int64]*types.ValidatorSet{ - 1: vals, - 2: vals, - }, - ) + headers1 := map[int64]*types.SignedHeader{ + 1: h1, + 2: keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, + hash("app_hash2"), hash("cons_hash"), hash("results_hash"), + len(keys), len(keys), types.BlockID{Hash: h1.Hash()}), + } + vals1 := map[int64]*types.ValidatorSet{ + 1: vals, + 2: vals, + } + mockBadNode1 := mockNodeFromHeadersAndVals(headers1, vals1) + mockBadNode1.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound) - lb1, _ := badProvider1.LightBlock(ctx, 2) + // header is empty + headers2 := map[int64]*types.SignedHeader{ + 1: h1, + 2: h2, + } + vals2 := map[int64]*types.ValidatorSet{ + 1: vals, + 2: vals, + } + mockBadNode2 := mockNodeFromHeadersAndVals(headers2, vals2) + mockBadNode2.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound) + + mockFullNode := mockNodeFromHeadersAndVals(headerSet, valSet) + + lb1, _ := mockBadNode1.LightBlock(ctx, 2) require.NotEqual(t, lb1.Hash(), l1.Hash()) c, err := light.NewClient( ctx, chainID, trustOptions, - fullNode, - []provider.Provider{badProvider1, badProvider2}, + mockFullNode, + []provider.Provider{mockBadNode1, mockBadNode2}, dbs.New(dbm.NewMemDB()), light.Logger(log.TestingLogger()), ) @@ -828,12 +854,13 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { } // witness does not have a light block -> left in the list assert.EqualValues(t, 1, len(c.Witnesses())) + mockBadNode1.AssertExpectations(t) + mockBadNode2.AssertExpectations(t) } func TestClient_TrustedValidatorSet(t *testing.T) { differentVals, _ := factory.RandValidatorSet(10, 100) - badValSetNode := mockp.New( - chainID, + mockBadValSetNode := mockNodeFromHeadersAndVals( map[int64]*types.SignedHeader{ 1: h1, // 3/3 signed, but validator set at height 2 below is invalid -> witness @@ -841,21 +868,27 @@ func TestClient_TrustedValidatorSet(t *testing.T) { 2: keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, hash("app_hash2"), hash("cons_hash"), hash("results_hash"), 0, len(keys), types.BlockID{Hash: h1.Hash()}), - 3: h3, }, map[int64]*types.ValidatorSet{ 1: vals, 2: differentVals, - 3: differentVals, + }) + mockFullNode := mockNodeFromHeadersAndVals( + map[int64]*types.SignedHeader{ + 1: h1, + 2: h2, }, - ) + map[int64]*types.ValidatorSet{ + 1: vals, + 2: vals, + }) c, err := light.NewClient( ctx, chainID, trustOptions, - fullNode, - []provider.Provider{badValSetNode, fullNode}, + mockFullNode, + []provider.Provider{mockBadValSetNode, mockFullNode}, dbs.New(dbm.NewMemDB()), light.Logger(log.TestingLogger()), ) @@ -865,15 +898,29 @@ func TestClient_TrustedValidatorSet(t *testing.T) { _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour).Add(1*time.Second)) assert.NoError(t, err) assert.Equal(t, 1, len(c.Witnesses())) + mockBadValSetNode.AssertExpectations(t) + mockFullNode.AssertExpectations(t) } func TestClientPrunesHeadersAndValidatorSets(t *testing.T) { + mockFullNode := mockNodeFromHeadersAndVals( + map[int64]*types.SignedHeader{ + 1: h1, + 3: h3, + 0: h3, + }, + map[int64]*types.ValidatorSet{ + 1: vals, + 3: vals, + 0: vals, + }) + c, err := light.NewClient( ctx, chainID, trustOptions, - fullNode, - []provider.Provider{fullNode}, + mockFullNode, + []provider.Provider{mockFullNode}, dbs.New(dbm.NewMemDB()), light.Logger(log.TestingLogger()), light.PruningSize(1), @@ -888,6 +935,7 @@ func TestClientPrunesHeadersAndValidatorSets(t *testing.T) { _, err = c.TrustedLightBlock(1) assert.Error(t, err) + mockFullNode.AssertExpectations(t) } func TestClientEnsureValidHeadersAndValSets(t *testing.T) { @@ -899,86 +947,108 @@ func TestClientEnsureValidHeadersAndValSets(t *testing.T) { testCases := []struct { headers map[int64]*types.SignedHeader vals map[int64]*types.ValidatorSet - err bool + + errorToThrow error + errorHeight int64 + + err bool }{ { - headerSet, - valSet, - false, - }, - { - headerSet, - map[int64]*types.ValidatorSet{ - 1: vals, - 2: vals, - 3: nil, - }, - true, - }, - { - map[int64]*types.SignedHeader{ + headers: map[int64]*types.SignedHeader{ 1: h1, - 2: h2, - 3: nil, + 3: h3, }, - valSet, - true, + vals: map[int64]*types.ValidatorSet{ + 1: vals, + 3: vals, + }, + err: false, }, { - headerSet, - map[int64]*types.ValidatorSet{ + headers: map[int64]*types.SignedHeader{ + 1: h1, + }, + vals: map[int64]*types.ValidatorSet{ + 1: vals, + }, + errorToThrow: provider.ErrBadLightBlock{Reason: errors.New("nil header or vals")}, + errorHeight: 3, + err: true, + }, + { + headers: map[int64]*types.SignedHeader{ + 1: h1, + }, + errorToThrow: provider.ErrBadLightBlock{Reason: errors.New("nil header or vals")}, + errorHeight: 3, + vals: valSet, + err: true, + }, + { + headers: map[int64]*types.SignedHeader{ + 1: h1, + 3: h3, + }, + vals: map[int64]*types.ValidatorSet{ 1: vals, - 2: vals, 3: emptyValSet, }, - true, + err: true, }, } - for _, tc := range testCases { - badNode := mockp.New( - chainID, - tc.headers, - tc.vals, - ) - c, err := light.NewClient( - ctx, - chainID, - trustOptions, - badNode, - []provider.Provider{badNode, badNode}, - dbs.New(dbm.NewMemDB()), - ) - require.NoError(t, err) + for i, tc := range testCases { + testCase := tc + t.Run(fmt.Sprintf("case: %d", i), func(t *testing.T) { + mockBadNode := mockNodeFromHeadersAndVals(testCase.headers, testCase.vals) + if testCase.errorToThrow != nil { + mockBadNode.On("LightBlock", mock.Anything, testCase.errorHeight).Return(nil, testCase.errorToThrow) + } - _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(2*time.Hour)) - if tc.err { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } + c, err := light.NewClient( + ctx, + chainID, + trustOptions, + mockBadNode, + []provider.Provider{mockBadNode, mockBadNode}, + dbs.New(dbm.NewMemDB()), + ) + require.NoError(t, err) + + _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(2*time.Hour)) + if testCase.err { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + mockBadNode.AssertExpectations(t) + }) } } func TestClientHandlesContexts(t *testing.T) { - p := mockp.New(genMockNode(chainID, 100, 10, 1, bTime)) - genBlock, err := p.LightBlock(ctx, 1) - require.NoError(t, err) + mockNode := &provider_mocks.Provider{} + mockNode.On("LightBlock", + mock.MatchedBy(func(ctx context.Context) bool { return ctx.Err() == nil }), + int64(1)).Return(l1, nil) + mockNode.On("LightBlock", + mock.MatchedBy(func(ctx context.Context) bool { return ctx.Err() == context.DeadlineExceeded }), + mock.Anything).Return(nil, context.DeadlineExceeded) + + mockNode.On("LightBlock", + mock.MatchedBy(func(ctx context.Context) bool { return ctx.Err() == context.Canceled }), + mock.Anything).Return(nil, context.Canceled) // instantiate the light client with a timeout - ctxTimeOut, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + ctxTimeOut, cancel := context.WithTimeout(ctx, 1*time.Nanosecond) defer cancel() - _, err = light.NewClient( + _, err := light.NewClient( ctxTimeOut, chainID, - light.TrustOptions{ - Period: 24 * time.Hour, - Height: 1, - Hash: genBlock.Hash(), - }, - p, - []provider.Provider{p, p}, + trustOptions, + mockNode, + []provider.Provider{mockNode, mockNode}, dbs.New(dbm.NewMemDB()), ) require.Error(t, ctxTimeOut.Err()) @@ -989,19 +1059,15 @@ func TestClientHandlesContexts(t *testing.T) { c, err := light.NewClient( ctx, chainID, - light.TrustOptions{ - Period: 24 * time.Hour, - Height: 1, - Hash: genBlock.Hash(), - }, - p, - []provider.Provider{p, p}, + trustOptions, + mockNode, + []provider.Provider{mockNode, mockNode}, dbs.New(dbm.NewMemDB()), ) require.NoError(t, err) // verify a block with a timeout - ctxTimeOutBlock, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + ctxTimeOutBlock, cancel := context.WithTimeout(ctx, 1*time.Nanosecond) defer cancel() _, err = c.VerifyLightBlockAtHeight(ctxTimeOutBlock, 100, bTime.Add(100*time.Minute)) require.Error(t, ctxTimeOutBlock.Err()) @@ -1010,11 +1076,11 @@ func TestClientHandlesContexts(t *testing.T) { // verify a block with a cancel ctxCancel, cancel := context.WithCancel(ctx) - defer cancel() - time.AfterFunc(10*time.Millisecond, cancel) + cancel() _, err = c.VerifyLightBlockAtHeight(ctxCancel, 100, bTime.Add(100*time.Minute)) require.Error(t, ctxCancel.Err()) require.Error(t, err) require.True(t, errors.Is(err, context.Canceled)) + mockNode.AssertExpectations(t) } diff --git a/light/detector_test.go b/light/detector_test.go index 48efd4130..0bf96ace6 100644 --- a/light/detector_test.go +++ b/light/detector_test.go @@ -1,10 +1,12 @@ package light_test import ( + "bytes" "testing" "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" @@ -12,7 +14,7 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/light" "github.com/tendermint/tendermint/light/provider" - mockp "github.com/tendermint/tendermint/light/provider/mock" + provider_mocks "github.com/tendermint/tendermint/light/provider/mocks" dbs "github.com/tendermint/tendermint/light/store/db" "github.com/tendermint/tendermint/types" ) @@ -20,15 +22,15 @@ import ( func TestLightClientAttackEvidence_Lunatic(t *testing.T) { // primary performs a lunatic attack var ( - latestHeight = int64(10) + latestHeight = int64(3) valSize = 5 - divergenceHeight = int64(6) + divergenceHeight = int64(2) primaryHeaders = make(map[int64]*types.SignedHeader, latestHeight) primaryValidators = make(map[int64]*types.ValidatorSet, latestHeight) ) - witnessHeaders, witnessValidators, chainKeys := genMockNodeWithKeys(chainID, latestHeight, valSize, 2, bTime) - witness := mockp.New(chainID, witnessHeaders, witnessValidators) + witnessHeaders, witnessValidators, chainKeys := genLightBlocksWithKeys(chainID, latestHeight, valSize, 2, bTime) + forgedKeys := chainKeys[divergenceHeight-1].ChangeKeys(3) // we change 3 out of the 5 validators (still 2/5 remain) forgedVals := forgedKeys.ToValidators(2, 0) @@ -42,7 +44,38 @@ func TestLightClientAttackEvidence_Lunatic(t *testing.T) { nil, forgedVals, forgedVals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(forgedKeys)) primaryValidators[height] = forgedVals } - primary := mockp.New(chainID, primaryHeaders, primaryValidators) + + // never called, delete it to make mockery asserts pass + delete(witnessHeaders, 2) + delete(primaryHeaders, 2) + + mockWitness := mockNodeFromHeadersAndVals(witnessHeaders, witnessValidators) + mockPrimary := mockNodeFromHeadersAndVals(primaryHeaders, primaryValidators) + + mockWitness.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(evidence types.Evidence) bool { + evAgainstPrimary := &types.LightClientAttackEvidence{ + // after the divergence height the valset doesn't change so we expect the evidence to be for the latest height + ConflictingBlock: &types.LightBlock{ + SignedHeader: primaryHeaders[latestHeight], + ValidatorSet: primaryValidators[latestHeight], + }, + CommonHeight: 1, + } + return bytes.Equal(evidence.Hash(), evAgainstPrimary.Hash()) + })).Return(nil) + + mockPrimary.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(evidence types.Evidence) bool { + evAgainstWitness := &types.LightClientAttackEvidence{ + // when forming evidence against witness we learn that the canonical chain continued to change validator sets + // hence the conflicting block is at 7 + ConflictingBlock: &types.LightBlock{ + SignedHeader: witnessHeaders[divergenceHeight+1], + ValidatorSet: witnessValidators[divergenceHeight+1], + }, + CommonHeight: divergenceHeight - 1, + } + return bytes.Equal(evidence.Hash(), evAgainstWitness.Hash()) + })).Return(nil) c, err := light.NewClient( ctx, @@ -52,121 +85,134 @@ func TestLightClientAttackEvidence_Lunatic(t *testing.T) { Height: 1, Hash: primaryHeaders[1].Hash(), }, - primary, - []provider.Provider{witness}, + mockPrimary, + []provider.Provider{mockWitness}, dbs.New(dbm.NewMemDB()), light.Logger(log.TestingLogger()), ) require.NoError(t, err) // Check verification returns an error. - _, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour)) + _, err = c.VerifyLightBlockAtHeight(ctx, latestHeight, bTime.Add(1*time.Hour)) if assert.Error(t, err) { assert.Equal(t, light.ErrLightClientAttack, err) } - // Check evidence was sent to both full nodes. - evAgainstPrimary := &types.LightClientAttackEvidence{ - // after the divergence height the valset doesn't change so we expect the evidence to be for height 10 - ConflictingBlock: &types.LightBlock{ - SignedHeader: primaryHeaders[10], - ValidatorSet: primaryValidators[10], - }, - CommonHeight: 4, - } - assert.True(t, witness.HasEvidence(evAgainstPrimary)) - - evAgainstWitness := &types.LightClientAttackEvidence{ - // when forming evidence against witness we learn that the canonical chain continued to change validator sets - // hence the conflicting block is at 7 - ConflictingBlock: &types.LightBlock{ - SignedHeader: witnessHeaders[7], - ValidatorSet: witnessValidators[7], - }, - CommonHeight: 4, - } - assert.True(t, primary.HasEvidence(evAgainstWitness)) + mockWitness.AssertExpectations(t) + mockPrimary.AssertExpectations(t) } func TestLightClientAttackEvidence_Equivocation(t *testing.T) { - verificationOptions := map[string]light.Option{ - "sequential": light.SequentialVerification(), - "skipping": light.SkippingVerification(light.DefaultTrustLevel), + cases := []struct { + name string + lightOption light.Option + unusedWitnessBlockHeights []int64 + unusedPrimaryBlockHeights []int64 + latestHeight int64 + divergenceHeight int64 + }{ + { + name: "sequential", + lightOption: light.SequentialVerification(), + unusedWitnessBlockHeights: []int64{4, 6}, + latestHeight: int64(5), + divergenceHeight: int64(3), + }, + { + name: "skipping", + lightOption: light.SkippingVerification(light.DefaultTrustLevel), + unusedWitnessBlockHeights: []int64{2, 4, 6}, + unusedPrimaryBlockHeights: []int64{2, 4, 6}, + latestHeight: int64(5), + divergenceHeight: int64(3), + }, } - for s, verificationOption := range verificationOptions { - t.Log("==> verification", s) - - // primary performs an equivocation attack - var ( - latestHeight = int64(10) - valSize = 5 - divergenceHeight = int64(6) - primaryHeaders = make(map[int64]*types.SignedHeader, latestHeight) - primaryValidators = make(map[int64]*types.ValidatorSet, latestHeight) - ) - // validators don't change in this network (however we still use a map just for convenience) - witnessHeaders, witnessValidators, chainKeys := genMockNodeWithKeys(chainID, latestHeight+2, valSize, 2, bTime) - witness := mockp.New(chainID, witnessHeaders, witnessValidators) - - for height := int64(1); height <= latestHeight; height++ { - if height < divergenceHeight { - primaryHeaders[height] = witnessHeaders[height] + for _, tc := range cases { + testCase := tc + t.Run(testCase.name, func(t *testing.T) { + // primary performs an equivocation attack + var ( + valSize = 5 + primaryHeaders = make(map[int64]*types.SignedHeader, testCase.latestHeight) + // validators don't change in this network (however we still use a map just for convenience) + primaryValidators = make(map[int64]*types.ValidatorSet, testCase.latestHeight) + ) + witnessHeaders, witnessValidators, chainKeys := genLightBlocksWithKeys(chainID, + testCase.latestHeight+1, valSize, 2, bTime) + for height := int64(1); height <= testCase.latestHeight; height++ { + if height < testCase.divergenceHeight { + primaryHeaders[height] = witnessHeaders[height] + primaryValidators[height] = witnessValidators[height] + continue + } + // we don't have a network partition so we will make 4/5 (greater than 2/3) malicious and vote again for + // a different block (which we do by adding txs) + primaryHeaders[height] = chainKeys[height].GenSignedHeader(chainID, height, + bTime.Add(time.Duration(height)*time.Minute), []types.Tx{[]byte("abcd")}, + witnessValidators[height], witnessValidators[height+1], hash("app_hash"), + hash("cons_hash"), hash("results_hash"), 0, len(chainKeys[height])-1) primaryValidators[height] = witnessValidators[height] - continue } - // we don't have a network partition so we will make 4/5 (greater than 2/3) malicious and vote again for - // a different block (which we do by adding txs) - primaryHeaders[height] = chainKeys[height].GenSignedHeader(chainID, height, - bTime.Add(time.Duration(height)*time.Minute), []types.Tx{[]byte("abcd")}, - witnessValidators[height], witnessValidators[height+1], hash("app_hash"), - hash("cons_hash"), hash("results_hash"), 0, len(chainKeys[height])-1) - primaryValidators[height] = witnessValidators[height] - } - primary := mockp.New(chainID, primaryHeaders, primaryValidators) - c, err := light.NewClient( - ctx, - chainID, - light.TrustOptions{ - Period: 4 * time.Hour, - Height: 1, - Hash: primaryHeaders[1].Hash(), - }, - primary, - []provider.Provider{witness}, - dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), - verificationOption, - ) - require.NoError(t, err) + for _, height := range testCase.unusedWitnessBlockHeights { + delete(witnessHeaders, height) + } + mockWitness := mockNodeFromHeadersAndVals(witnessHeaders, witnessValidators) + for _, height := range testCase.unusedPrimaryBlockHeights { + delete(primaryHeaders, height) + } + mockPrimary := mockNodeFromHeadersAndVals(primaryHeaders, primaryValidators) - // Check verification returns an error. - _, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour)) - if assert.Error(t, err) { - assert.Equal(t, light.ErrLightClientAttack, err) - } + // Check evidence was sent to both full nodes. + // Common height should be set to the height of the divergent header in the instance + // of an equivocation attack and the validator sets are the same as what the witness has + mockWitness.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(evidence types.Evidence) bool { + evAgainstPrimary := &types.LightClientAttackEvidence{ + ConflictingBlock: &types.LightBlock{ + SignedHeader: primaryHeaders[testCase.divergenceHeight], + ValidatorSet: primaryValidators[testCase.divergenceHeight], + }, + CommonHeight: testCase.divergenceHeight, + } + return bytes.Equal(evidence.Hash(), evAgainstPrimary.Hash()) + })).Return(nil) + mockPrimary.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(evidence types.Evidence) bool { + evAgainstWitness := &types.LightClientAttackEvidence{ + ConflictingBlock: &types.LightBlock{ + SignedHeader: witnessHeaders[testCase.divergenceHeight], + ValidatorSet: witnessValidators[testCase.divergenceHeight], + }, + CommonHeight: testCase.divergenceHeight, + } + return bytes.Equal(evidence.Hash(), evAgainstWitness.Hash()) + })).Return(nil) - // Check evidence was sent to both full nodes. - // Common height should be set to the height of the divergent header in the instance - // of an equivocation attack and the validator sets are the same as what the witness has - evAgainstPrimary := &types.LightClientAttackEvidence{ - ConflictingBlock: &types.LightBlock{ - SignedHeader: primaryHeaders[divergenceHeight], - ValidatorSet: primaryValidators[divergenceHeight], - }, - CommonHeight: divergenceHeight, - } - assert.True(t, witness.HasEvidence(evAgainstPrimary)) + c, err := light.NewClient( + ctx, + chainID, + light.TrustOptions{ + Period: 4 * time.Hour, + Height: 1, + Hash: primaryHeaders[1].Hash(), + }, + mockPrimary, + []provider.Provider{mockWitness}, + dbs.New(dbm.NewMemDB()), + light.Logger(log.TestingLogger()), + testCase.lightOption, + ) + require.NoError(t, err) - evAgainstWitness := &types.LightClientAttackEvidence{ - ConflictingBlock: &types.LightBlock{ - SignedHeader: witnessHeaders[divergenceHeight], - ValidatorSet: witnessValidators[divergenceHeight], - }, - CommonHeight: divergenceHeight, - } - assert.True(t, primary.HasEvidence(evAgainstWitness)) + // Check verification returns an error. + _, err = c.VerifyLightBlockAtHeight(ctx, testCase.latestHeight, bTime.Add(300*time.Second)) + if assert.Error(t, err) { + assert.Equal(t, light.ErrLightClientAttack, err) + } + + mockWitness.AssertExpectations(t) + mockPrimary.AssertExpectations(t) + }) } } @@ -182,7 +228,10 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { primaryValidators = make(map[int64]*types.ValidatorSet, forgedHeight) ) - witnessHeaders, witnessValidators, chainKeys := genMockNodeWithKeys(chainID, latestHeight, valSize, 2, bTime) + witnessHeaders, witnessValidators, chainKeys := genLightBlocksWithKeys(chainID, latestHeight, valSize, 2, bTime) + for _, unusedHeader := range []int64{3, 5, 6, 8} { + delete(witnessHeaders, unusedHeader) + } // primary has the exact same headers except it forges one extra header in the future using keys from 2/5ths of // the validators @@ -190,6 +239,9 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { primaryHeaders[h] = witnessHeaders[h] primaryValidators[h] = witnessValidators[h] } + for _, unusedHeader := range []int64{3, 5, 6, 8} { + delete(primaryHeaders, unusedHeader) + } forgedKeys := chainKeys[latestHeight].ChangeKeys(3) // we change 3 out of the 5 validators (still 2/5 remain) primaryValidators[forgedHeight] = forgedKeys.ToValidators(2, 0) primaryHeaders[forgedHeight] = forgedKeys.GenSignedHeader( @@ -204,15 +256,36 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { hash("results_hash"), 0, len(forgedKeys), ) + mockPrimary := mockNodeFromHeadersAndVals(primaryHeaders, primaryValidators) + lastBlock, _ := mockPrimary.LightBlock(ctx, forgedHeight) + mockPrimary.On("LightBlock", mock.Anything, int64(0)).Return(lastBlock, nil) + mockPrimary.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound) - witness := mockp.New(chainID, witnessHeaders, witnessValidators) - primary := mockp.New(chainID, primaryHeaders, primaryValidators) + /* + for _, unusedHeader := range []int64{3, 5, 6, 8} { + delete(witnessHeaders, unusedHeader) + } + */ + mockWitness := mockNodeFromHeadersAndVals(witnessHeaders, witnessValidators) + lastBlock, _ = mockWitness.LightBlock(ctx, latestHeight) + mockWitness.On("LightBlock", mock.Anything, int64(0)).Return(lastBlock, nil).Once() + mockWitness.On("LightBlock", mock.Anything, int64(12)).Return(nil, provider.ErrHeightTooHigh) - laggingWitness := witness.Copy("laggingWitness") + mockWitness.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(evidence types.Evidence) bool { + // Check evidence was sent to the witness against the full node + evAgainstPrimary := &types.LightClientAttackEvidence{ + ConflictingBlock: &types.LightBlock{ + SignedHeader: primaryHeaders[forgedHeight], + ValidatorSet: primaryValidators[forgedHeight], + }, + CommonHeight: latestHeight, + } + return bytes.Equal(evidence.Hash(), evAgainstPrimary.Hash()) + })).Return(nil).Twice() // In order to perform the attack, the primary needs at least one accomplice as a witness to also // send the forged block - accomplice := primary + accomplice := mockPrimary c, err := light.NewClient( ctx, @@ -222,8 +295,8 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { Height: 1, Hash: primaryHeaders[1].Hash(), }, - primary, - []provider.Provider{witness, accomplice}, + mockPrimary, + []provider.Provider{mockWitness, accomplice}, dbs.New(dbm.NewMemDB()), light.Logger(log.TestingLogger()), light.MaxClockDrift(1*time.Second), @@ -251,7 +324,7 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { } go func() { time.Sleep(2 * time.Second) - witness.AddLightBlock(newLb) + mockWitness.On("LightBlock", mock.Anything, int64(0)).Return(newLb, nil) }() // Now assert that verification returns an error. We craft the light clients time to be a little ahead of the chain @@ -261,26 +334,19 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { assert.Equal(t, light.ErrLightClientAttack, err) } - // Check evidence was sent to the witness against the full node - evAgainstPrimary := &types.LightClientAttackEvidence{ - ConflictingBlock: &types.LightBlock{ - SignedHeader: primaryHeaders[forgedHeight], - ValidatorSet: primaryValidators[forgedHeight], - }, - CommonHeight: latestHeight, - } - assert.True(t, witness.HasEvidence(evAgainstPrimary)) - // We attempt the same call but now the supporting witness has a block which should // immediately conflict in time with the primary _, err = c.VerifyLightBlockAtHeight(ctx, forgedHeight, bTime.Add(time.Duration(forgedHeight)*time.Minute)) if assert.Error(t, err) { assert.Equal(t, light.ErrLightClientAttack, err) } - assert.True(t, witness.HasEvidence(evAgainstPrimary)) // Lastly we test the unfortunate case where the light clients supporting witness doesn't update // in enough time + mockLaggingWitness := mockNodeFromHeadersAndVals(witnessHeaders, witnessValidators) + mockLaggingWitness.On("LightBlock", mock.Anything, int64(12)).Return(nil, provider.ErrHeightTooHigh) + lastBlock, _ = mockLaggingWitness.LightBlock(ctx, latestHeight) + mockLaggingWitness.On("LightBlock", mock.Anything, int64(0)).Return(lastBlock, nil) c, err = light.NewClient( ctx, chainID, @@ -289,8 +355,8 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { Height: 1, Hash: primaryHeaders[1].Hash(), }, - primary, - []provider.Provider{laggingWitness, accomplice}, + mockPrimary, + []provider.Provider{mockLaggingWitness, accomplice}, dbs.New(dbm.NewMemDB()), light.Logger(log.TestingLogger()), light.MaxClockDrift(1*time.Second), @@ -300,17 +366,20 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { _, err = c.Update(ctx, bTime.Add(time.Duration(forgedHeight)*time.Minute)) assert.NoError(t, err) - + mockPrimary.AssertExpectations(t) + mockWitness.AssertExpectations(t) } // 1. Different nodes therefore a divergent header is produced. // => light client returns an error upon creation because primary and witness // have a different view. func TestClientDivergentTraces1(t *testing.T) { - primary := mockp.New(genMockNode(chainID, 10, 5, 2, bTime)) - firstBlock, err := primary.LightBlock(ctx, 1) + headers, vals, _ := genLightBlocksWithKeys(chainID, 1, 5, 2, bTime) + mockPrimary := mockNodeFromHeadersAndVals(headers, vals) + firstBlock, err := mockPrimary.LightBlock(ctx, 1) require.NoError(t, err) - witness := mockp.New(genMockNode(chainID, 10, 5, 2, bTime)) + headers, vals, _ = genLightBlocksWithKeys(chainID, 1, 5, 2, bTime) + mockWitness := mockNodeFromHeadersAndVals(headers, vals) _, err = light.NewClient( ctx, @@ -320,20 +389,25 @@ func TestClientDivergentTraces1(t *testing.T) { Hash: firstBlock.Hash(), Period: 4 * time.Hour, }, - primary, - []provider.Provider{witness}, + mockPrimary, + []provider.Provider{mockWitness}, dbs.New(dbm.NewMemDB()), light.Logger(log.TestingLogger()), ) require.Error(t, err) assert.Contains(t, err.Error(), "does not match primary") + mockWitness.AssertExpectations(t) + mockPrimary.AssertExpectations(t) } // 2. Two out of three nodes don't respond but the third has a header that matches // => verification should be successful and all the witnesses should remain func TestClientDivergentTraces2(t *testing.T) { - primary := mockp.New(genMockNode(chainID, 10, 5, 2, bTime)) - firstBlock, err := primary.LightBlock(ctx, 1) + headers, vals, _ := genLightBlocksWithKeys(chainID, 2, 5, 2, bTime) + mockPrimaryNode := mockNodeFromHeadersAndVals(headers, vals) + mockDeadNode := &provider_mocks.Provider{} + mockDeadNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrNoResponse) + firstBlock, err := mockPrimaryNode.LightBlock(ctx, 1) require.NoError(t, err) c, err := light.NewClient( ctx, @@ -343,31 +417,35 @@ func TestClientDivergentTraces2(t *testing.T) { Hash: firstBlock.Hash(), Period: 4 * time.Hour, }, - primary, - []provider.Provider{deadNode, deadNode, primary}, + mockPrimaryNode, + []provider.Provider{mockDeadNode, mockDeadNode, mockPrimaryNode}, dbs.New(dbm.NewMemDB()), light.Logger(log.TestingLogger()), ) require.NoError(t, err) - _, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour)) + _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour)) assert.NoError(t, err) assert.Equal(t, 3, len(c.Witnesses())) + mockDeadNode.AssertExpectations(t) + mockPrimaryNode.AssertExpectations(t) } // 3. witness has the same first header, but different second header // => creation should succeed, but the verification should fail +//nolint: dupl func TestClientDivergentTraces3(t *testing.T) { - _, primaryHeaders, primaryVals := genMockNode(chainID, 10, 5, 2, bTime) - primary := mockp.New(chainID, primaryHeaders, primaryVals) + // + primaryHeaders, primaryVals, _ := genLightBlocksWithKeys(chainID, 2, 5, 2, bTime) + mockPrimary := mockNodeFromHeadersAndVals(primaryHeaders, primaryVals) - firstBlock, err := primary.LightBlock(ctx, 1) + firstBlock, err := mockPrimary.LightBlock(ctx, 1) require.NoError(t, err) - _, mockHeaders, mockVals := genMockNode(chainID, 10, 5, 2, bTime) + mockHeaders, mockVals, _ := genLightBlocksWithKeys(chainID, 2, 5, 2, bTime) mockHeaders[1] = primaryHeaders[1] mockVals[1] = primaryVals[1] - witness := mockp.New(chainID, mockHeaders, mockVals) + mockWitness := mockNodeFromHeadersAndVals(mockHeaders, mockVals) c, err := light.NewClient( ctx, @@ -377,33 +455,35 @@ func TestClientDivergentTraces3(t *testing.T) { Hash: firstBlock.Hash(), Period: 4 * time.Hour, }, - primary, - []provider.Provider{witness}, + mockPrimary, + []provider.Provider{mockWitness}, dbs.New(dbm.NewMemDB()), light.Logger(log.TestingLogger()), ) require.NoError(t, err) - _, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour)) + _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour)) assert.Error(t, err) assert.Equal(t, 1, len(c.Witnesses())) + mockWitness.AssertExpectations(t) + mockPrimary.AssertExpectations(t) } // 4. Witness has a divergent header but can not produce a valid trace to back it up. // It should be ignored +//nolint: dupl func TestClientDivergentTraces4(t *testing.T) { - _, primaryHeaders, primaryVals := genMockNode(chainID, 10, 5, 2, bTime) - primary := mockp.New(chainID, primaryHeaders, primaryVals) + // + primaryHeaders, primaryVals, _ := genLightBlocksWithKeys(chainID, 2, 5, 2, bTime) + mockPrimary := mockNodeFromHeadersAndVals(primaryHeaders, primaryVals) - firstBlock, err := primary.LightBlock(ctx, 1) + firstBlock, err := mockPrimary.LightBlock(ctx, 1) require.NoError(t, err) - _, mockHeaders, mockVals := genMockNode(chainID, 10, 5, 2, bTime) - witness := primary.Copy("witness") - witness.AddLightBlock(&types.LightBlock{ - SignedHeader: mockHeaders[10], - ValidatorSet: mockVals[10], - }) + witnessHeaders, witnessVals, _ := genLightBlocksWithKeys(chainID, 2, 5, 2, bTime) + primaryHeaders[2] = witnessHeaders[2] + primaryVals[2] = witnessVals[2] + mockWitness := mockNodeFromHeadersAndVals(primaryHeaders, primaryVals) c, err := light.NewClient( ctx, @@ -413,14 +493,16 @@ func TestClientDivergentTraces4(t *testing.T) { Hash: firstBlock.Hash(), Period: 4 * time.Hour, }, - primary, - []provider.Provider{witness}, + mockPrimary, + []provider.Provider{mockWitness}, dbs.New(dbm.NewMemDB()), light.Logger(log.TestingLogger()), ) require.NoError(t, err) - _, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour)) + _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour)) assert.Error(t, err) assert.Equal(t, 1, len(c.Witnesses())) + mockWitness.AssertExpectations(t) + mockPrimary.AssertExpectations(t) } diff --git a/light/example_test.go b/light/example_test.go index 0fd6640c4..2e0feb5e1 100644 --- a/light/example_test.go +++ b/light/example_test.go @@ -2,7 +2,6 @@ package light_test import ( "context" - "fmt" "io/ioutil" stdlog "log" "os" @@ -19,87 +18,12 @@ import ( rpctest "github.com/tendermint/tendermint/rpc/test" ) -// Automatically getting new headers and verifying them. -func ExampleClient_Update() { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - conf := rpctest.CreateConfig("ExampleClient_Update") - - // Start a test application - app := kvstore.NewApplication() - _, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout) - if err != nil { - stdlog.Fatal(err) - } - defer func() { _ = closer(ctx) }() - - // give Tendermint time to generate some blocks - time.Sleep(5 * time.Second) - - dbDir, err := ioutil.TempDir("", "light-client-example") - if err != nil { - stdlog.Fatal(err) - } - defer os.RemoveAll(dbDir) - - chainID := conf.ChainID() - - primary, err := httpp.New(chainID, conf.RPC.ListenAddress) - if err != nil { - stdlog.Fatal(err) - } - - block, err := primary.LightBlock(ctx, 2) - if err != nil { - stdlog.Fatal(err) - } - - db, err := dbm.NewGoLevelDB("light-client-db", dbDir) - if err != nil { - stdlog.Fatal(err) - } - - c, err := light.NewClient( - ctx, - chainID, - light.TrustOptions{ - Period: 504 * time.Hour, // 21 days - Height: 2, - Hash: block.Hash(), - }, - primary, - []provider.Provider{primary}, // NOTE: primary should not be used here - dbs.New(db), - light.Logger(log.TestingLogger()), - ) - if err != nil { - stdlog.Fatal(err) - } - defer func() { - if err := c.Cleanup(); err != nil { - stdlog.Fatal(err) - } - }() - - time.Sleep(2 * time.Second) - - h, err := c.Update(ctx, time.Now()) - if err != nil { - stdlog.Fatal(err) - } - - if h != nil && h.Height > 2 { - fmt.Println("successful update") - } else { - fmt.Println("update failed") - } -} - // Manually getting light blocks and verifying them. -func ExampleClient_VerifyLightBlockAtHeight() { +func ExampleClient() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() conf := rpctest.CreateConfig("ExampleClient_VerifyLightBlockAtHeight") + logger := log.TestingLogger() // Start a test application app := kvstore.NewApplication() @@ -110,9 +34,6 @@ func ExampleClient_VerifyLightBlockAtHeight() { } defer func() { _ = closer(ctx) }() - // give Tendermint time to generate some blocks - time.Sleep(5 * time.Second) - dbDir, err := ioutil.TempDir("", "light-client-example") if err != nil { stdlog.Fatal(err) @@ -126,6 +47,9 @@ func ExampleClient_VerifyLightBlockAtHeight() { stdlog.Fatal(err) } + // give Tendermint time to generate some blocks + time.Sleep(5 * time.Second) + block, err := primary.LightBlock(ctx, 2) if err != nil { stdlog.Fatal(err) @@ -146,7 +70,7 @@ func ExampleClient_VerifyLightBlockAtHeight() { primary, []provider.Provider{primary}, // NOTE: primary should not be used here dbs.New(db), - light.Logger(log.TestingLogger()), + light.Logger(logger), ) if err != nil { stdlog.Fatal(err) @@ -157,15 +81,26 @@ func ExampleClient_VerifyLightBlockAtHeight() { } }() + // wait for a few more blocks to be produced + time.Sleep(2 * time.Second) + + // veify the block at height 3 _, err = c.VerifyLightBlockAtHeight(context.Background(), 3, time.Now()) if err != nil { stdlog.Fatal(err) } - h, err := c.TrustedLightBlock(3) + // retrieve light block at height 3 + _, err = c.TrustedLightBlock(3) if err != nil { stdlog.Fatal(err) } - fmt.Println("got header", h.Height) + // update to the latest height + lb, err := c.Update(ctx, time.Now()) + if err != nil { + stdlog.Fatal(err) + } + + logger.Info("verified light block", "light-block", lb) } diff --git a/light/helpers_test.go b/light/helpers_test.go index 2ca951913..1d25f9166 100644 --- a/light/helpers_test.go +++ b/light/helpers_test.go @@ -3,10 +3,12 @@ package light_test import ( "time" + "github.com/stretchr/testify/mock" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" tmtime "github.com/tendermint/tendermint/libs/time" + provider_mocks "github.com/tendermint/tendermint/light/provider/mocks" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" @@ -169,12 +171,12 @@ func (pkz privKeys) ChangeKeys(delta int) privKeys { return newKeys.Extend(delta) } -// Generates the header and validator set to create a full entire mock node with blocks to height ( -// blockSize) and with variation in validator sets. BlockIntervals are in per minute. +// genLightBlocksWithKeys generates the header and validator set to create +// blocks to height. BlockIntervals are in per minute. // NOTE: Expected to have a large validator set size ~ 100 validators. -func genMockNodeWithKeys( +func genLightBlocksWithKeys( chainID string, - blockSize int64, + numBlocks int64, valSize int, valVariation float32, bTime time.Time) ( @@ -183,9 +185,9 @@ func genMockNodeWithKeys( map[int64]privKeys) { var ( - headers = make(map[int64]*types.SignedHeader, blockSize) - valset = make(map[int64]*types.ValidatorSet, blockSize+1) - keymap = make(map[int64]privKeys, blockSize+1) + headers = make(map[int64]*types.SignedHeader, numBlocks) + valset = make(map[int64]*types.ValidatorSet, numBlocks+1) + keymap = make(map[int64]privKeys, numBlocks+1) keys = genPrivKeys(valSize) totalVariation = valVariation valVariationInt int @@ -207,7 +209,7 @@ func genMockNodeWithKeys( valset[1] = keys.ToValidators(2, 0) keys = newKeys - for height := int64(2); height <= blockSize; height++ { + for height := int64(2); height <= numBlocks; height++ { totalVariation += valVariation valVariationInt = int(totalVariation) totalVariation = -float32(valVariationInt) @@ -226,17 +228,14 @@ func genMockNodeWithKeys( return headers, valset, keymap } -func genMockNode( - chainID string, - blockSize int64, - valSize int, - valVariation float32, - bTime time.Time) ( - string, - map[int64]*types.SignedHeader, - map[int64]*types.ValidatorSet) { - headers, valset, _ := genMockNodeWithKeys(chainID, blockSize, valSize, valVariation, bTime) - return chainID, headers, valset +func mockNodeFromHeadersAndVals(headers map[int64]*types.SignedHeader, + vals map[int64]*types.ValidatorSet) *provider_mocks.Provider { + mockNode := &provider_mocks.Provider{} + for i, header := range headers { + lb := &types.LightBlock{SignedHeader: header, ValidatorSet: vals[i]} + mockNode.On("LightBlock", mock.Anything, i).Return(lb, nil) + } + return mockNode } func hash(s string) []byte { diff --git a/light/light_test.go b/light/light_test.go index a224d71b2..f5d2ddd89 100644 --- a/light/light_test.go +++ b/light/light_test.go @@ -17,6 +17,7 @@ import ( httpp "github.com/tendermint/tendermint/light/provider/http" dbs "github.com/tendermint/tendermint/light/store/db" rpctest "github.com/tendermint/tendermint/rpc/test" + "github.com/tendermint/tendermint/types" ) // NOTE: these are ports of the tests from example_test.go but @@ -48,7 +49,8 @@ func TestClientIntegration_Update(t *testing.T) { primary, err := httpp.New(chainID, conf.RPC.ListenAddress) require.NoError(t, err) - block, err := primary.LightBlock(ctx, 2) + // give Tendermint time to generate some blocks + block, err := waitForBlock(ctx, primary, 2) require.NoError(t, err) db, err := dbm.NewGoLevelDB("light-client-db", dbDir) @@ -71,7 +73,9 @@ func TestClientIntegration_Update(t *testing.T) { defer func() { require.NoError(t, c.Cleanup()) }() - time.Sleep(2 * time.Second) + // ensure Tendermint is at height 3 or higher + _, err = waitForBlock(ctx, primary, 3) + require.NoError(t, err) h, err := c.Update(ctx, time.Now()) require.NoError(t, err) @@ -94,9 +98,6 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) { require.NoError(t, err) defer func() { require.NoError(t, closer(ctx)) }() - // give Tendermint time to generate some blocks - time.Sleep(5 * time.Second) - dbDir, err := ioutil.TempDir("", "light-client-test-verify-example") require.NoError(t, err) defer os.RemoveAll(dbDir) @@ -106,7 +107,8 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) { primary, err := httpp.New(chainID, conf.RPC.ListenAddress) require.NoError(t, err) - block, err := primary.LightBlock(ctx, 2) + // give Tendermint time to generate some blocks + block, err := waitForBlock(ctx, primary, 2) require.NoError(t, err) db, err := dbm.NewGoLevelDB("light-client-db", dbDir) @@ -128,6 +130,10 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) { defer func() { require.NoError(t, c.Cleanup()) }() + // ensure Tendermint is at height 3 or higher + _, err = waitForBlock(ctx, primary, 3) + require.NoError(t, err) + _, err = c.VerifyLightBlockAtHeight(ctx, 3, time.Now()) require.NoError(t, err) @@ -136,3 +142,23 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) { require.EqualValues(t, 3, h.Height) } + +func waitForBlock(ctx context.Context, p provider.Provider, height int64) (*types.LightBlock, error) { + for { + block, err := p.LightBlock(ctx, height) + switch err { + case nil: + return block, nil + // node isn't running yet, wait 1 second and repeat + case provider.ErrNoResponse, provider.ErrHeightTooHigh: + timer := time.NewTimer(1 * time.Second) + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-timer.C: + } + default: + return nil, err + } + } +} diff --git a/light/provider/mock/deadmock.go b/light/provider/mock/deadmock.go deleted file mode 100644 index 6045e45f6..000000000 --- a/light/provider/mock/deadmock.go +++ /dev/null @@ -1,30 +0,0 @@ -package mock - -import ( - "context" - "fmt" - - "github.com/tendermint/tendermint/light/provider" - "github.com/tendermint/tendermint/types" -) - -type deadMock struct { - id string -} - -// NewDeadMock creates a mock provider that always errors. id is used in case of multiple providers. -func NewDeadMock(id string) provider.Provider { - return &deadMock{id: id} -} - -func (p *deadMock) String() string { - return fmt.Sprintf("DeadMock-%s", p.id) -} - -func (p *deadMock) LightBlock(_ context.Context, height int64) (*types.LightBlock, error) { - return nil, provider.ErrNoResponse -} - -func (p *deadMock) ReportEvidence(_ context.Context, ev types.Evidence) error { - return provider.ErrNoResponse -} diff --git a/light/provider/mock/mock.go b/light/provider/mock/mock.go deleted file mode 100644 index fcb8a6fa4..000000000 --- a/light/provider/mock/mock.go +++ /dev/null @@ -1,125 +0,0 @@ -package mock - -import ( - "context" - "errors" - "fmt" - "strings" - "sync" - "time" - - "github.com/tendermint/tendermint/light/provider" - "github.com/tendermint/tendermint/types" -) - -type Mock struct { - id string - - mtx sync.Mutex - headers map[int64]*types.SignedHeader - vals map[int64]*types.ValidatorSet - evidenceToReport map[string]types.Evidence // hash => evidence - latestHeight int64 -} - -var _ provider.Provider = (*Mock)(nil) - -// New creates a mock provider with the given set of headers and validator -// sets. -func New(id string, headers map[int64]*types.SignedHeader, vals map[int64]*types.ValidatorSet) *Mock { - height := int64(0) - for h := range headers { - if h > height { - height = h - } - } - return &Mock{ - id: id, - headers: headers, - vals: vals, - evidenceToReport: make(map[string]types.Evidence), - latestHeight: height, - } -} - -func (p *Mock) String() string { - var headers strings.Builder - for _, h := range p.headers { - fmt.Fprintf(&headers, " %d:%X", h.Height, h.Hash()) - } - - var vals strings.Builder - for _, v := range p.vals { - fmt.Fprintf(&vals, " %X", v.Hash()) - } - - return fmt.Sprintf("Mock{id: %s, headers: %s, vals: %v}", p.id, headers.String(), vals.String()) -} - -func (p *Mock) LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) { - p.mtx.Lock() - defer p.mtx.Unlock() - - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-time.After(10 * time.Millisecond): - } - - var lb *types.LightBlock - - if height > p.latestHeight { - return nil, provider.ErrHeightTooHigh - } - - if height == 0 && len(p.headers) > 0 { - height = p.latestHeight - } - - if _, ok := p.headers[height]; ok { - sh := p.headers[height] - vals := p.vals[height] - lb = &types.LightBlock{ - SignedHeader: sh, - ValidatorSet: vals, - } - } - if lb == nil { - return nil, provider.ErrLightBlockNotFound - } - if lb.SignedHeader == nil || lb.ValidatorSet == nil { - return nil, provider.ErrBadLightBlock{Reason: errors.New("nil header or vals")} - } - if err := lb.ValidateBasic(lb.ChainID); err != nil { - return nil, provider.ErrBadLightBlock{Reason: err} - } - return lb, nil -} - -func (p *Mock) ReportEvidence(_ context.Context, ev types.Evidence) error { - p.evidenceToReport[string(ev.Hash())] = ev - return nil -} - -func (p *Mock) HasEvidence(ev types.Evidence) bool { - _, ok := p.evidenceToReport[string(ev.Hash())] - return ok -} - -func (p *Mock) AddLightBlock(lb *types.LightBlock) { - p.mtx.Lock() - defer p.mtx.Unlock() - - if err := lb.ValidateBasic(lb.ChainID); err != nil { - panic(fmt.Sprintf("unable to add light block, err: %v", err)) - } - p.headers[lb.Height] = lb.SignedHeader - p.vals[lb.Height] = lb.ValidatorSet - if lb.Height > p.latestHeight { - p.latestHeight = lb.Height - } -} - -func (p *Mock) Copy(id string) *Mock { - return New(id, p.headers, p.vals) -} diff --git a/light/provider/mocks/provider.go b/light/provider/mocks/provider.go new file mode 100644 index 000000000..aa36fa2d3 --- /dev/null +++ b/light/provider/mocks/provider.go @@ -0,0 +1,53 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + + types "github.com/tendermint/tendermint/types" +) + +// Provider is an autogenerated mock type for the Provider type +type Provider struct { + mock.Mock +} + +// LightBlock provides a mock function with given fields: ctx, height +func (_m *Provider) LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) { + ret := _m.Called(ctx, height) + + var r0 *types.LightBlock + if rf, ok := ret.Get(0).(func(context.Context, int64) *types.LightBlock); ok { + r0 = rf(ctx, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.LightBlock) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok { + r1 = rf(ctx, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReportEvidence provides a mock function with given fields: _a0, _a1 +func (_m *Provider) ReportEvidence(_a0 context.Context, _a1 types.Evidence) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.Evidence) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/light/provider/provider.go b/light/provider/provider.go index 14b397543..7f15d5c75 100644 --- a/light/provider/provider.go +++ b/light/provider/provider.go @@ -6,7 +6,7 @@ import ( "github.com/tendermint/tendermint/types" ) -//go:generate mockery --case underscore --name Provider +//go:generate ../../scripts/mockery_generate.sh Provider // Provider provides information for the light client to sync (verification // happens in the client). diff --git a/light/rpc/client.go b/light/rpc/client.go index d8b1e954f..48cf7ce73 100644 --- a/light/rpc/client.go +++ b/light/rpc/client.go @@ -25,7 +25,7 @@ import ( type KeyPathFunc func(path string, key []byte) (merkle.KeyPath, error) // LightClient is an interface that contains functionality needed by Client from the light client. -//go:generate mockery --case underscore --name LightClient +//go:generate ../../scripts/mockery_generate.sh LightClient type LightClient interface { ChainID() string Update(ctx context.Context, now time.Time) (*types.LightBlock, error) diff --git a/light/rpc/mocks/light_client.go b/light/rpc/mocks/light_client.go index 644a18a20..cc32cf649 100644 --- a/light/rpc/mocks/light_client.go +++ b/light/rpc/mocks/light_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery 2.7.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/node/node.go b/node/node.go index cb32c5ce6..751c78889 100644 --- a/node/node.go +++ b/node/node.go @@ -18,7 +18,6 @@ import ( cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" cs "github.com/tendermint/tendermint/internal/consensus" - "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/pex" @@ -37,7 +36,6 @@ import ( grpccore "github.com/tendermint/tendermint/rpc/grpc" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) @@ -66,21 +64,17 @@ type nodeImpl struct { eventBus *types.EventBus // pub/sub for services stateStore sm.Store blockStore *store.BlockStore // store the blockchain to disk - bcReactor service.Service // for fast-syncing + bcReactor service.Service // for block-syncing mempoolReactor service.Service // for gossipping transactions mempool mempool.Mempool stateSync bool // whether the node should state sync on startup stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots - consensusState *cs.State // latest consensus state consensusReactor *cs.Reactor // for participating in the consensus - pexReactor *pex.Reactor // for exchanging peer addresses - pexReactorV2 *pex.ReactorV2 // for exchanging peer addresses - evidenceReactor *evidence.Reactor - evidencePool *evidence.Pool // tracking evidence - proxyApp proxy.AppConns // connection to the application + pexReactor service.Service // for exchanging peer addresses + evidenceReactor service.Service rpcListeners []net.Listener // rpc servers - eventSinks []indexer.EventSink - indexerService *indexer.Service + indexerService service.Service + rpcEnv *rpccore.Environment prometheusSrv *http.Server } @@ -225,9 +219,9 @@ func makeNode(config *cfg.Config, } } - // Determine whether we should do fast sync. This must happen after the handshake, since the + // Determine whether we should do block sync. This must happen after the handshake, since the // app may modify the validator set, specifying ourself as the only validator. - fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey) + blockSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey) logNodeStartupInfo(state, pubKey, logger, consensusLogger, config.Mode) @@ -281,15 +275,15 @@ func makeNode(config *cfg.Config, csReactorShim, csReactor, csState := createConsensusReactor( config, state, blockExec, blockStore, mp, evPool, - privValidator, csMetrics, stateSync || fastSync, eventBus, + privValidator, csMetrics, stateSync || blockSync, eventBus, peerManager, router, consensusLogger, ) - // Create the blockchain reactor. Note, we do not start fast sync if we're + // Create the blockchain reactor. Note, we do not start block sync if we're // doing a state sync first. bcReactorShim, bcReactor, err := createBlockchainReactor( logger, config, state, blockExec, blockStore, csReactor, - peerManager, router, fastSync && !stateSync, csMetrics, + peerManager, router, blockSync && !stateSync, csMetrics, ) if err != nil { return nil, fmt.Errorf("could not create blockchain reactor: %w", err) @@ -303,16 +297,16 @@ func makeNode(config *cfg.Config, bcReactorForSwitch = bcReactor.(p2p.Reactor) } - // Make ConsensusReactor. Don't enable fully if doing a state sync and/or fast sync first. + // Make ConsensusReactor. Don't enable fully if doing a state sync and/or block sync first. // FIXME We need to update metrics here, since other reactors don't have access to them. if stateSync { csMetrics.StateSyncing.Set(1) - } else if fastSync { - csMetrics.FastSyncing.Set(1) + } else if blockSync { + csMetrics.BlockSyncing.Set(1) } // Set up state sync reactor, and schedule a sync if requested. - // FIXME The way we do phased startups (e.g. replay -> fast sync -> consensus) is very messy, + // FIXME The way we do phased startups (e.g. replay -> block sync -> consensus) is very messy, // we should clean this whole thing up. See: // https://github.com/tendermint/tendermint/issues/4644 var ( @@ -371,10 +365,9 @@ func makeNode(config *cfg.Config, // Note we currently use the addrBook regardless at least for AddOurAddress var ( - pexReactor *pex.Reactor - pexReactorV2 *pex.ReactorV2 - sw *p2p.Switch - addrBook pex.AddrBook + pexReactor service.Service + sw *p2p.Switch + addrBook pex.AddrBook ) pexCh := pex.ChannelDescriptor() @@ -382,7 +375,7 @@ func makeNode(config *cfg.Config, if config.P2P.DisableLegacy { addrBook = nil - pexReactorV2, err = createPEXReactorV2(config, logger, peerManager, router) + pexReactor, err = createPEXReactorV2(config, logger, peerManager, router) if err != nil { return nil, err } @@ -436,19 +429,37 @@ func makeNode(config *cfg.Config, bcReactor: bcReactor, mempoolReactor: mpReactor, mempool: mp, - consensusState: csState, consensusReactor: csReactor, stateSyncReactor: stateSyncReactor, stateSync: stateSync, pexReactor: pexReactor, - pexReactorV2: pexReactorV2, evidenceReactor: evReactor, - evidencePool: evPool, - proxyApp: proxyApp, indexerService: indexerService, eventBus: eventBus, - eventSinks: eventSinks, + + rpcEnv: &rpccore.Environment{ + ProxyAppQuery: proxyApp.Query(), + ProxyAppMempool: proxyApp.Mempool(), + + StateStore: stateStore, + BlockStore: blockStore, + EvidencePool: evPool, + ConsensusState: csState, + P2PPeers: sw, + BlockSyncReactor: bcReactor.(cs.BlockSyncReactor), + + GenDoc: genDoc, + EventSinks: eventSinks, + ConsensusReactor: csReactor, + EventBus: eventBus, + Mempool: mp, + Logger: logger.With("module", "rpc"), + Config: *config.RPC, + }, } + + node.rpcEnv.P2PTransport = node + node.BaseService = *service.NewBaseService(logger, "Node", node) return node, nil @@ -481,25 +492,6 @@ func makeSeedNode(config *cfg.Config, p2pMetrics := p2p.PrometheusMetrics(config.Instrumentation.Namespace, "chain_id", genDoc.ChainID) p2pLogger := logger.With("module", "p2p") transport := createTransport(p2pLogger, config) - sw := createSwitch( - config, transport, p2pMetrics, nil, nil, - nil, nil, nil, nil, nodeInfo, nodeKey, p2pLogger, - ) - - err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err) - } - - err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) - } - - addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey) - if err != nil { - return nil, fmt.Errorf("could not create addrbook: %w", err) - } peerManager, err := createPeerManager(config, dbProvider, p2pLogger, nodeKey.ID) if err != nil { @@ -513,8 +505,9 @@ func makeSeedNode(config *cfg.Config, } var ( - pexReactor *pex.Reactor - pexReactorV2 *pex.ReactorV2 + pexReactor service.Service + sw *p2p.Switch + addrBook pex.AddrBook ) // add the pex reactor @@ -524,11 +517,31 @@ func makeSeedNode(config *cfg.Config, pexCh := pex.ChannelDescriptor() transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) if config.P2P.DisableLegacy { - pexReactorV2, err = createPEXReactorV2(config, logger, peerManager, router) + pexReactor, err = createPEXReactorV2(config, logger, peerManager, router) if err != nil { return nil, err } } else { + sw = createSwitch( + config, transport, p2pMetrics, nil, nil, + nil, nil, nil, nil, nodeInfo, nodeKey, p2pLogger, + ) + + err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) + if err != nil { + return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err) + } + + err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) + if err != nil { + return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) + } + + addrBook, err = createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey) + if err != nil { + return nil, fmt.Errorf("could not create addrbook: %w", err) + } + pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger) } @@ -551,8 +564,7 @@ func makeSeedNode(config *cfg.Config, peerManager: peerManager, router: router, - pexReactor: pexReactor, - pexReactorV2: pexReactorV2, + pexReactor: pexReactor, } node.BaseService = *service.NewBaseService(logger, "SeedNode", node) @@ -593,23 +605,22 @@ func (n *nodeImpl) OnStart() error { } n.isListening = true - n.Logger.Info("p2p service", "legacy_enabled", !n.config.P2P.DisableLegacy) if n.config.P2P.DisableLegacy { - err = n.router.Start() + if err = n.router.Start(); err != nil { + return err + } } else { // Add private IDs to addrbook to block those peers being added n.addrBook.AddPrivateIDs(strings.SplitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " ")) - err = n.sw.Start() - } - if err != nil { - return err + if err = n.sw.Start(); err != nil { + return err + } } if n.config.Mode != cfg.ModeSeed { - if n.config.FastSync.Version == cfg.BlockchainV0 { - // Start the real blockchain reactor separately since the switch uses the shim. + if n.config.BlockSync.Version == cfg.BlockSyncV0 { if err := n.bcReactor.Start(); err != nil { return err } @@ -636,8 +647,8 @@ func (n *nodeImpl) OnStart() error { } } - if n.config.P2P.DisableLegacy && n.pexReactorV2 != nil { - if err := n.pexReactorV2.Start(); err != nil { + if n.config.P2P.DisableLegacy { + if err := n.pexReactor.Start(); err != nil { return err } } else { @@ -646,12 +657,11 @@ func (n *nodeImpl) OnStart() error { if err != nil { return fmt.Errorf("could not dial peers from persistent-peers field: %w", err) } - } // Run state sync if n.stateSync { - bcR, ok := n.bcReactor.(cs.FastSyncReactor) + bcR, ok := n.bcReactor.(cs.BlockSyncReactor) if !ok { return fmt.Errorf("this blockchain reactor does not support switching from state sync") } @@ -693,7 +703,7 @@ func (n *nodeImpl) OnStop() { if n.config.Mode != cfg.ModeSeed { // now stop the reactors - if n.config.FastSync.Version == cfg.BlockchainV0 { + if n.config.BlockSync.Version == cfg.BlockSyncV0 { // Stop the real blockchain reactor separately since the switch uses the shim. if err := n.bcReactor.Stop(); err != nil { n.Logger.Error("failed to stop the blockchain reactor", "err", err) @@ -721,10 +731,8 @@ func (n *nodeImpl) OnStop() { } } - if n.config.P2P.DisableLegacy && n.pexReactorV2 != nil { - if err := n.pexReactorV2.Stop(); err != nil { - n.Logger.Error("failed to stop the PEX v2 reactor", "err", err) - } + if err := n.pexReactor.Stop(); err != nil { + n.Logger.Error("failed to stop the PEX v2 reactor", "err", err) } if n.config.P2P.DisableLegacy { @@ -765,55 +773,23 @@ func (n *nodeImpl) OnStop() { } } -// ConfigureRPC makes sure RPC has all the objects it needs to operate. -func (n *nodeImpl) ConfigureRPC() (*rpccore.Environment, error) { - rpcCoreEnv := rpccore.Environment{ - ProxyAppQuery: n.proxyApp.Query(), - ProxyAppMempool: n.proxyApp.Mempool(), - - StateStore: n.stateStore, - BlockStore: n.blockStore, - EvidencePool: n.evidencePool, - ConsensusState: n.consensusState, - P2PPeers: n.sw, - P2PTransport: n, - - GenDoc: n.genesisDoc, - EventSinks: n.eventSinks, - ConsensusReactor: n.consensusReactor, - EventBus: n.eventBus, - Mempool: n.mempool, - - Logger: n.Logger.With("module", "rpc"), - - Config: *n.config.RPC, - FastSyncReactor: n.bcReactor.(cs.FastSyncReactor), - } +func (n *nodeImpl) startRPC() ([]net.Listener, error) { if n.config.Mode == cfg.ModeValidator { pubKey, err := n.privValidator.GetPubKey(context.TODO()) if pubKey == nil || err != nil { return nil, fmt.Errorf("can't get pubkey: %w", err) } - rpcCoreEnv.PubKey = pubKey + n.rpcEnv.PubKey = pubKey } - if err := rpcCoreEnv.InitGenesisChunks(); err != nil { - return nil, err - } - - return &rpcCoreEnv, nil -} - -func (n *nodeImpl) startRPC() ([]net.Listener, error) { - env, err := n.ConfigureRPC() - if err != nil { + if err := n.rpcEnv.InitGenesisChunks(); err != nil { return nil, err } listenAddrs := strings.SplitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ") - routes := env.GetRoutes() + routes := n.rpcEnv.GetRoutes() if n.config.RPC.Unsafe { - env.AddUnsafe(routes) + n.rpcEnv.AddUnsafe(routes) } config := rpcserver.DefaultConfig() @@ -910,7 +886,7 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) { return nil, err } go func() { - if err := grpccore.StartGRPCServer(env, listener); err != nil { + if err := grpccore.StartGRPCServer(n.rpcEnv, listener); err != nil { n.Logger.Error("Error starting gRPC server", "err", err) } }() @@ -943,46 +919,16 @@ func (n *nodeImpl) startPrometheusServer(addr string) *http.Server { return srv } -// Switch returns the Node's Switch. -func (n *nodeImpl) Switch() *p2p.Switch { - return n.sw -} - -// BlockStore returns the Node's BlockStore. -func (n *nodeImpl) BlockStore() *store.BlockStore { - return n.blockStore -} - -// ConsensusState returns the Node's ConsensusState. -func (n *nodeImpl) ConsensusState() *cs.State { - return n.consensusState -} - // ConsensusReactor returns the Node's ConsensusReactor. func (n *nodeImpl) ConsensusReactor() *cs.Reactor { return n.consensusReactor } -// MempoolReactor returns the Node's mempool reactor. -func (n *nodeImpl) MempoolReactor() service.Service { - return n.mempoolReactor -} - // Mempool returns the Node's mempool. func (n *nodeImpl) Mempool() mempool.Mempool { return n.mempool } -// PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled. -func (n *nodeImpl) PEXReactor() *pex.Reactor { - return n.pexReactor -} - -// EvidencePool returns the Node's EvidencePool. -func (n *nodeImpl) EvidencePool() *evidence.Pool { - return n.evidencePool -} - // EventBus returns the Node's EventBus. func (n *nodeImpl) EventBus() *types.EventBus { return n.eventBus @@ -999,19 +945,9 @@ func (n *nodeImpl) GenesisDoc() *types.GenesisDoc { return n.genesisDoc } -// ProxyApp returns the Node's AppConns, representing its connections to the ABCI application. -func (n *nodeImpl) ProxyApp() proxy.AppConns { - return n.proxyApp -} - -// Config returns the Node's config. -func (n *nodeImpl) Config() *cfg.Config { - return n.config -} - -// EventSinks returns the Node's event indexing sinks. -func (n *nodeImpl) EventSinks() []indexer.EventSink { - return n.eventSinks +// RPCEnvironment makes sure RPC has all the objects it needs to operate. +func (n *nodeImpl) RPCEnvironment() *rpccore.Environment { + return n.rpcEnv } //------------------------------------------------------------------------------ @@ -1031,14 +967,14 @@ func (n *nodeImpl) NodeInfo() types.NodeInfo { return n.nodeInfo } -// startStateSync starts an asynchronous state sync process, then switches to fast sync mode. +// startStateSync starts an asynchronous state sync process, then switches to block sync mode. func startStateSync( ssR statesync.SyncReactor, - bcR cs.FastSyncReactor, + bcR cs.BlockSyncReactor, conR cs.ConsSyncReactor, sp statesync.StateProvider, config *cfg.StateSyncConfig, - fastSync bool, + blockSync bool, stateInitHeight int64, eb *types.EventBus, ) error { @@ -1072,17 +1008,17 @@ func startStateSync( stateSyncLogger.Error("failed to emit the statesync start event", "err", err) } - if fastSync { + if blockSync { // FIXME Very ugly to have these metrics bleed through here. - conR.SetFastSyncingMetrics(1) - if err := bcR.SwitchToFastSync(state); err != nil { - stateSyncLogger.Error("failed to switch to fast sync", "err", err) + conR.SetBlockSyncingMetrics(1) + if err := bcR.SwitchToBlockSync(state); err != nil { + stateSyncLogger.Error("failed to switch to block sync", "err", err) return } - d := types.EventDataFastSyncStatus{Complete: false, Height: state.LastBlockHeight} - if err := eb.PublishEventFastSyncStatus(d); err != nil { - stateSyncLogger.Error("failed to emit the fastsync starting event", "err", err) + d := types.EventDataBlockSyncStatus{Complete: false, Height: state.LastBlockHeight} + if err := eb.PublishEventBlockSyncStatus(d); err != nil { + stateSyncLogger.Error("failed to emit the block sync starting event", "err", err) } } else { diff --git a/node/node_test.go b/node/node_test.go index eca622bd3..64b28c0bb 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -513,36 +513,50 @@ func TestNodeSetEventSink(t *testing.T) { config := cfg.ResetTestRoot("node_app_version_test") defer os.RemoveAll(config.RootDir) - n := getTestNode(t, config, log.TestingLogger()) + logger := log.TestingLogger() + setupTest := func(t *testing.T, conf *cfg.Config) []indexer.EventSink { + eventBus, err := createAndStartEventBus(logger) + require.NoError(t, err) - assert.Equal(t, 1, len(n.eventSinks)) - assert.Equal(t, indexer.KV, n.eventSinks[0].Type()) + genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) + require.NoError(t, err) + + indexService, eventSinks, err := createAndStartIndexerService(config, + cfg.DefaultDBProvider, eventBus, logger, genDoc.ChainID) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, indexService.Stop()) }) + return eventSinks + } + + eventSinks := setupTest(t, config) + assert.Equal(t, 1, len(eventSinks)) + assert.Equal(t, indexer.KV, eventSinks[0].Type()) config.TxIndex.Indexer = []string{"null"} - n = getTestNode(t, config, log.TestingLogger()) + eventSinks = setupTest(t, config) - assert.Equal(t, 1, len(n.eventSinks)) - assert.Equal(t, indexer.NULL, n.eventSinks[0].Type()) + assert.Equal(t, 1, len(eventSinks)) + assert.Equal(t, indexer.NULL, eventSinks[0].Type()) config.TxIndex.Indexer = []string{"null", "kv"} - n = getTestNode(t, config, log.TestingLogger()) + eventSinks = setupTest(t, config) - assert.Equal(t, 1, len(n.eventSinks)) - assert.Equal(t, indexer.NULL, n.eventSinks[0].Type()) + assert.Equal(t, 1, len(eventSinks)) + assert.Equal(t, indexer.NULL, eventSinks[0].Type()) config.TxIndex.Indexer = []string{"kvv"} - ns, err := newDefaultNode(config, log.TestingLogger()) + ns, err := newDefaultNode(config, logger) assert.Nil(t, ns) assert.Equal(t, errors.New("unsupported event sink type"), err) config.TxIndex.Indexer = []string{} - n = getTestNode(t, config, log.TestingLogger()) + eventSinks = setupTest(t, config) - assert.Equal(t, 1, len(n.eventSinks)) - assert.Equal(t, indexer.NULL, n.eventSinks[0].Type()) + assert.Equal(t, 1, len(eventSinks)) + assert.Equal(t, indexer.NULL, eventSinks[0].Type()) config.TxIndex.Indexer = []string{"psql"} - ns, err = newDefaultNode(config, log.TestingLogger()) + ns, err = newDefaultNode(config, logger) assert.Nil(t, ns) assert.Equal(t, errors.New("the psql connection settings cannot be empty"), err) @@ -550,46 +564,46 @@ func TestNodeSetEventSink(t *testing.T) { config.TxIndex.Indexer = []string{"psql"} config.TxIndex.PsqlConn = psqlConn - n = getTestNode(t, config, log.TestingLogger()) - assert.Equal(t, 1, len(n.eventSinks)) - assert.Equal(t, indexer.PSQL, n.eventSinks[0].Type()) - n.OnStop() + eventSinks = setupTest(t, config) + + assert.Equal(t, 1, len(eventSinks)) + assert.Equal(t, indexer.PSQL, eventSinks[0].Type()) config.TxIndex.Indexer = []string{"psql", "kv"} config.TxIndex.PsqlConn = psqlConn - n = getTestNode(t, config, log.TestingLogger()) - assert.Equal(t, 2, len(n.eventSinks)) + eventSinks = setupTest(t, config) + + assert.Equal(t, 2, len(eventSinks)) // we use map to filter the duplicated sinks, so it's not guarantee the order when append sinks. - if n.eventSinks[0].Type() == indexer.KV { - assert.Equal(t, indexer.PSQL, n.eventSinks[1].Type()) + if eventSinks[0].Type() == indexer.KV { + assert.Equal(t, indexer.PSQL, eventSinks[1].Type()) } else { - assert.Equal(t, indexer.PSQL, n.eventSinks[0].Type()) - assert.Equal(t, indexer.KV, n.eventSinks[1].Type()) + assert.Equal(t, indexer.PSQL, eventSinks[0].Type()) + assert.Equal(t, indexer.KV, eventSinks[1].Type()) } - n.OnStop() config.TxIndex.Indexer = []string{"kv", "psql"} config.TxIndex.PsqlConn = psqlConn - n = getTestNode(t, config, log.TestingLogger()) - assert.Equal(t, 2, len(n.eventSinks)) - if n.eventSinks[0].Type() == indexer.KV { - assert.Equal(t, indexer.PSQL, n.eventSinks[1].Type()) + eventSinks = setupTest(t, config) + + assert.Equal(t, 2, len(eventSinks)) + if eventSinks[0].Type() == indexer.KV { + assert.Equal(t, indexer.PSQL, eventSinks[1].Type()) } else { - assert.Equal(t, indexer.PSQL, n.eventSinks[0].Type()) - assert.Equal(t, indexer.KV, n.eventSinks[1].Type()) + assert.Equal(t, indexer.PSQL, eventSinks[0].Type()) + assert.Equal(t, indexer.KV, eventSinks[1].Type()) } - n.OnStop() var e = errors.New("found duplicated sinks, please check the tx-index section in the config.toml") config.TxIndex.Indexer = []string{"psql", "kv", "Kv"} config.TxIndex.PsqlConn = psqlConn - _, err = newDefaultNode(config, log.TestingLogger()) + _, err = newDefaultNode(config, logger) require.Error(t, err) assert.Equal(t, e, err) config.TxIndex.Indexer = []string{"Psql", "kV", "kv", "pSql"} config.TxIndex.PsqlConn = psqlConn - _, err = newDefaultNode(config, log.TestingLogger()) + _, err = newDefaultNode(config, logger) require.Error(t, err) assert.Equal(t, e, err) } @@ -659,7 +673,7 @@ func loadStatefromGenesis(t *testing.T) sm.State { func TestNodeStartStateSync(t *testing.T) { mockSSR := &statesync.MockSyncReactor{} - mockFSR := &consmocks.FastSyncReactor{} + mockFSR := &consmocks.BlockSyncReactor{} mockCSR := &consmocks.ConsSyncReactor{} mockSP := &ssmocks.StateProvider{} state := loadStatefromGenesis(t) diff --git a/node/setup.go b/node/setup.go index cd1875fb6..ceadcd688 100644 --- a/node/setup.go +++ b/node/setup.go @@ -16,8 +16,8 @@ import ( abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" - bcv0 "github.com/tendermint/tendermint/internal/blockchain/v0" - bcv2 "github.com/tendermint/tendermint/internal/blockchain/v2" + bcv0 "github.com/tendermint/tendermint/internal/blocksync/v0" + bcv2 "github.com/tendermint/tendermint/internal/blocksync/v2" cs "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/mempool" @@ -337,14 +337,14 @@ func createBlockchainReactor( csReactor *cs.Reactor, peerManager *p2p.PeerManager, router *p2p.Router, - fastSync bool, + blockSync bool, metrics *cs.Metrics, ) (*p2p.ReactorShim, service.Service, error) { logger = logger.With("module", "blockchain") - switch config.FastSync.Version { - case cfg.BlockchainV0: + switch config.BlockSync.Version { + case cfg.BlockSyncV0: reactorShim := p2p.NewReactorShim(logger, "BlockchainShim", bcv0.ChannelShims) var ( @@ -362,7 +362,7 @@ func createBlockchainReactor( reactor, err := bcv0.NewReactor( logger, state.Copy(), blockExec, blockStore, csReactor, - channels[bcv0.BlockchainChannel], peerUpdates, fastSync, + channels[bcv0.BlockchainChannel], peerUpdates, blockSync, metrics, ) if err != nil { @@ -371,11 +371,11 @@ func createBlockchainReactor( return reactorShim, reactor, nil - case cfg.BlockchainV2: - return nil, nil, errors.New("fastsync version v2 is no longer supported. Please use v0") + case cfg.BlockSyncV2: + return nil, nil, errors.New("block sync version v2 is no longer supported. Please use v0") default: - return nil, nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version) + return nil, nil, fmt.Errorf("unknown block sync version %s", config.BlockSync.Version) } } @@ -700,7 +700,7 @@ func createPEXReactorV2( logger log.Logger, peerManager *p2p.PeerManager, router *p2p.Router, -) (*pex.ReactorV2, error) { +) (service.Service, error) { channel, err := router.OpenChannel(pex.ChannelDescriptor(), &protop2p.PexMessage{}, 128) if err != nil { @@ -725,15 +725,15 @@ func makeNodeInfo( } var bcChannel byte - switch config.FastSync.Version { - case cfg.BlockchainV0: + switch config.BlockSync.Version { + case cfg.BlockSyncV0: bcChannel = byte(bcv0.BlockchainChannel) - case cfg.BlockchainV2: + case cfg.BlockSyncV2: bcChannel = bcv2.BlockchainChannel default: - return types.NodeInfo{}, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version) + return types.NodeInfo{}, fmt.Errorf("unknown blocksync version %s", config.BlockSync.Version) } nodeInfo := types.NodeInfo{ diff --git a/privval/signer_listener_endpoint.go b/privval/signer_listener_endpoint.go index 16ffc5c98..292e7a476 100644 --- a/privval/signer_listener_endpoint.go +++ b/privval/signer_listener_endpoint.go @@ -142,12 +142,7 @@ func (sl *SignerListenerEndpoint) ensureConnection(maxWait time.Duration) error // block until connected or timeout sl.Logger.Info("SignerListener: Blocking for connection") sl.triggerConnect() - err := sl.WaitConnection(sl.connectionAvailableCh, maxWait) - if err != nil { - return err - } - - return nil + return sl.WaitConnection(sl.connectionAvailableCh, maxWait) } func (sl *SignerListenerEndpoint) acceptNewConnection() (net.Conn, error) { diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index ed80ebef2..0a15eb366 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -197,17 +197,20 @@ message ResponseBeginBlock { } message ResponseCheckTx { - uint32 code = 1; - bytes data = 2; - string log = 3; // nondeterministic - string info = 4; // nondeterministic - int64 gas_wanted = 5 [json_name = "gas_wanted"]; - int64 gas_used = 6 [json_name = "gas_used"]; - repeated Event events = 7 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; - string codespace = 8; - string sender = 9; - int64 priority = 10; - string mempool_error = 11; + uint32 code = 1; + bytes data = 2; + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 gas_wanted = 5 [json_name = "gas_wanted"]; + int64 gas_used = 6 [json_name = "gas_used"]; + repeated Event events = 7 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; + string codespace = 8; + string sender = 9; + int64 priority = 10; + + // mempool_error is set by Tendermint. + // ABCI applictions creating a ResponseCheckTX should not set mempool_error. + string mempool_error = 11; } message ResponseDeliverTx { diff --git a/proto/tendermint/blockchain/message.go b/proto/tendermint/blocksync/message.go similarity index 99% rename from proto/tendermint/blockchain/message.go rename to proto/tendermint/blocksync/message.go index 6143a64e7..d448ccc4b 100644 --- a/proto/tendermint/blockchain/message.go +++ b/proto/tendermint/blocksync/message.go @@ -1,4 +1,4 @@ -package blockchain +package blocksync import ( "errors" diff --git a/proto/tendermint/blockchain/message_test.go b/proto/tendermint/blocksync/message_test.go similarity index 99% rename from proto/tendermint/blockchain/message_test.go rename to proto/tendermint/blocksync/message_test.go index 37a0df217..dd1aebbd0 100644 --- a/proto/tendermint/blockchain/message_test.go +++ b/proto/tendermint/blocksync/message_test.go @@ -1,4 +1,4 @@ -package blockchain_test +package blocksync_test import ( "encoding/hex" @@ -8,7 +8,7 @@ import ( proto "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" - bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain" + bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" "github.com/tendermint/tendermint/types" ) diff --git a/proto/tendermint/blockchain/types.pb.go b/proto/tendermint/blocksync/types.pb.go similarity index 90% rename from proto/tendermint/blockchain/types.pb.go rename to proto/tendermint/blocksync/types.pb.go index bc160b230..fcbef7107 100644 --- a/proto/tendermint/blockchain/types.pb.go +++ b/proto/tendermint/blocksync/types.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/blockchain/types.proto +// source: tendermint/blocksync/types.proto -package blockchain +package blocksync import ( fmt "fmt" @@ -32,7 +32,7 @@ func (m *BlockRequest) Reset() { *m = BlockRequest{} } func (m *BlockRequest) String() string { return proto.CompactTextString(m) } func (*BlockRequest) ProtoMessage() {} func (*BlockRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2927480384e78499, []int{0} + return fileDescriptor_19b397c236e0fa07, []int{0} } func (m *BlockRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -77,7 +77,7 @@ func (m *NoBlockResponse) Reset() { *m = NoBlockResponse{} } func (m *NoBlockResponse) String() string { return proto.CompactTextString(m) } func (*NoBlockResponse) ProtoMessage() {} func (*NoBlockResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2927480384e78499, []int{1} + return fileDescriptor_19b397c236e0fa07, []int{1} } func (m *NoBlockResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -122,7 +122,7 @@ func (m *BlockResponse) Reset() { *m = BlockResponse{} } func (m *BlockResponse) String() string { return proto.CompactTextString(m) } func (*BlockResponse) ProtoMessage() {} func (*BlockResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2927480384e78499, []int{2} + return fileDescriptor_19b397c236e0fa07, []int{2} } func (m *BlockResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -166,7 +166,7 @@ func (m *StatusRequest) Reset() { *m = StatusRequest{} } func (m *StatusRequest) String() string { return proto.CompactTextString(m) } func (*StatusRequest) ProtoMessage() {} func (*StatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2927480384e78499, []int{3} + return fileDescriptor_19b397c236e0fa07, []int{3} } func (m *StatusRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -205,7 +205,7 @@ func (m *StatusResponse) Reset() { *m = StatusResponse{} } func (m *StatusResponse) String() string { return proto.CompactTextString(m) } func (*StatusResponse) ProtoMessage() {} func (*StatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2927480384e78499, []int{4} + return fileDescriptor_19b397c236e0fa07, []int{4} } func (m *StatusResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -262,7 +262,7 @@ func (m *Message) Reset() { *m = Message{} } func (m *Message) String() string { return proto.CompactTextString(m) } func (*Message) ProtoMessage() {} func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_2927480384e78499, []int{5} + return fileDescriptor_19b397c236e0fa07, []int{5} } func (m *Message) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -373,42 +373,41 @@ func (*Message) XXX_OneofWrappers() []interface{} { } func init() { - proto.RegisterType((*BlockRequest)(nil), "tendermint.blockchain.BlockRequest") - proto.RegisterType((*NoBlockResponse)(nil), "tendermint.blockchain.NoBlockResponse") - proto.RegisterType((*BlockResponse)(nil), "tendermint.blockchain.BlockResponse") - proto.RegisterType((*StatusRequest)(nil), "tendermint.blockchain.StatusRequest") - proto.RegisterType((*StatusResponse)(nil), "tendermint.blockchain.StatusResponse") - proto.RegisterType((*Message)(nil), "tendermint.blockchain.Message") + proto.RegisterType((*BlockRequest)(nil), "tendermint.blocksync.BlockRequest") + proto.RegisterType((*NoBlockResponse)(nil), "tendermint.blocksync.NoBlockResponse") + proto.RegisterType((*BlockResponse)(nil), "tendermint.blocksync.BlockResponse") + proto.RegisterType((*StatusRequest)(nil), "tendermint.blocksync.StatusRequest") + proto.RegisterType((*StatusResponse)(nil), "tendermint.blocksync.StatusResponse") + proto.RegisterType((*Message)(nil), "tendermint.blocksync.Message") } -func init() { proto.RegisterFile("tendermint/blockchain/types.proto", fileDescriptor_2927480384e78499) } +func init() { proto.RegisterFile("tendermint/blocksync/types.proto", fileDescriptor_19b397c236e0fa07) } -var fileDescriptor_2927480384e78499 = []byte{ - // 370 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xc1, 0x4e, 0xfa, 0x40, - 0x10, 0xc6, 0xdb, 0x7f, 0x81, 0x7f, 0x32, 0x50, 0x1a, 0x9b, 0xa8, 0xc4, 0x98, 0x46, 0xab, 0x12, - 0x3d, 0xd8, 0x26, 0x78, 0x25, 0x1e, 0x38, 0x11, 0x13, 0x8c, 0xa9, 0xc6, 0x83, 0x17, 0xd2, 0xe2, - 0x86, 0x36, 0x4a, 0x17, 0xd9, 0xed, 0xc1, 0xb7, 0xf0, 0x19, 0x7c, 0x1a, 0x8f, 0x1c, 0x3d, 0x1a, - 0x78, 0x11, 0xc3, 0x6c, 0x29, 0x4b, 0x03, 0xf5, 0xb6, 0x3b, 0xfd, 0xe6, 0x37, 0xdf, 0x7e, 0x99, - 0xc2, 0x31, 0x27, 0xf1, 0x33, 0x99, 0x8c, 0xa2, 0x98, 0xbb, 0xc1, 0x2b, 0x1d, 0xbc, 0x0c, 0x42, - 0x3f, 0x8a, 0x5d, 0xfe, 0x3e, 0x26, 0xcc, 0x19, 0x4f, 0x28, 0xa7, 0xe6, 0xee, 0x4a, 0xe2, 0xac, - 0x24, 0x07, 0x87, 0x52, 0x27, 0xca, 0x45, 0xbf, 0x68, 0xb2, 0x9b, 0x50, 0xeb, 0x2c, 0xae, 0x1e, - 0x79, 0x4b, 0x08, 0xe3, 0xe6, 0x1e, 0x54, 0x42, 0x12, 0x0d, 0x43, 0xde, 0x50, 0x8f, 0xd4, 0x73, - 0xcd, 0x4b, 0x6f, 0xf6, 0x05, 0x18, 0xb7, 0x34, 0x55, 0xb2, 0x31, 0x8d, 0x19, 0xd9, 0x2a, 0xbd, - 0x06, 0x7d, 0x5d, 0x78, 0x09, 0x65, 0x1c, 0x89, 0xba, 0x6a, 0x6b, 0xdf, 0x91, 0x8c, 0x8a, 0x07, - 0x08, 0xbd, 0x50, 0xd9, 0x06, 0xe8, 0xf7, 0xdc, 0xe7, 0x09, 0x4b, 0x3d, 0xd9, 0x6d, 0xa8, 0x2f, - 0x0b, 0xc5, 0xa3, 0x4d, 0x13, 0x4a, 0x81, 0xcf, 0x48, 0xe3, 0x1f, 0x56, 0xf1, 0x6c, 0x7f, 0x6a, - 0xf0, 0xbf, 0x47, 0x18, 0xf3, 0x87, 0xc4, 0xbc, 0x01, 0x1d, 0x67, 0xf4, 0x27, 0x02, 0x9d, 0x3a, - 0x3a, 0x71, 0x36, 0x46, 0xe7, 0xc8, 0xc9, 0x74, 0x15, 0xaf, 0x16, 0xc8, 0x49, 0x3d, 0xc0, 0x4e, - 0x4c, 0xfb, 0x4b, 0x9c, 0x30, 0x86, 0x83, 0xab, 0xad, 0xe6, 0x16, 0x5e, 0x2e, 0xc1, 0xae, 0xe2, - 0x19, 0x71, 0x2e, 0xd4, 0x1e, 0xd4, 0x73, 0x48, 0x0d, 0x91, 0xa7, 0xc5, 0x16, 0x33, 0xa0, 0x1e, - 0xe4, 0x71, 0x0c, 0xa3, 0xcb, 0x5e, 0x5c, 0x2a, 0xc4, 0xad, 0x05, 0xbf, 0xc0, 0x31, 0xb9, 0x60, - 0xde, 0x81, 0x91, 0xe1, 0x52, 0x7b, 0x65, 0xe4, 0x9d, 0xfd, 0xc1, 0xcb, 0xfc, 0xd5, 0xd9, 0x5a, - 0xa5, 0x53, 0x06, 0x8d, 0x25, 0xa3, 0xce, 0xe3, 0xd7, 0xcc, 0x52, 0xa7, 0x33, 0x4b, 0xfd, 0x99, - 0x59, 0xea, 0xc7, 0xdc, 0x52, 0xa6, 0x73, 0x4b, 0xf9, 0x9e, 0x5b, 0xca, 0x53, 0x7b, 0x18, 0xf1, - 0x30, 0x09, 0x9c, 0x01, 0x1d, 0xb9, 0xf2, 0x26, 0xaf, 0x8e, 0xb8, 0xc8, 0xee, 0xc6, 0xff, 0x23, - 0xa8, 0xe0, 0xc7, 0xab, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5e, 0x59, 0x07, 0xbd, 0x3f, 0x03, - 0x00, 0x00, +var fileDescriptor_19b397c236e0fa07 = []byte{ + // 368 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0x4d, 0x4f, 0xfa, 0x40, + 0x10, 0xc6, 0xdb, 0x7f, 0x81, 0x7f, 0x32, 0x50, 0x1a, 0x1b, 0xa3, 0xc4, 0x98, 0x86, 0xd4, 0x97, + 0xe8, 0xc1, 0x36, 0xc1, 0xa3, 0xc6, 0x03, 0x27, 0x4c, 0x7c, 0x49, 0x4a, 0xbc, 0x78, 0x21, 0x14, + 0x37, 0x40, 0x94, 0x2e, 0x32, 0xdb, 0x03, 0xdf, 0xc2, 0x2f, 0xe0, 0xf7, 0xf1, 0xc8, 0xd1, 0xa3, + 0x81, 0x2f, 0x62, 0x98, 0x2d, 0x65, 0x69, 0xb0, 0xb7, 0xdd, 0xe9, 0x33, 0xbf, 0x79, 0xfa, 0x64, + 0x16, 0xea, 0x82, 0x45, 0x2f, 0x6c, 0x32, 0x1a, 0x46, 0xc2, 0x0f, 0xdf, 0x78, 0xef, 0x15, 0xa7, + 0x51, 0xcf, 0x17, 0xd3, 0x31, 0x43, 0x6f, 0x3c, 0xe1, 0x82, 0xdb, 0xbb, 0x6b, 0x85, 0x97, 0x2a, + 0x0e, 0x0e, 0x95, 0x3e, 0x52, 0xcb, 0x6e, 0xd9, 0xe3, 0x9e, 0x42, 0xa5, 0xb9, 0xbc, 0x06, 0xec, + 0x3d, 0x66, 0x28, 0xec, 0x3d, 0x28, 0x0d, 0xd8, 0xb0, 0x3f, 0x10, 0x35, 0xbd, 0xae, 0x9f, 0x19, + 0x41, 0x72, 0x73, 0xcf, 0xc1, 0x7a, 0xe0, 0x89, 0x12, 0xc7, 0x3c, 0x42, 0xf6, 0xa7, 0xf4, 0x06, + 0xcc, 0x4d, 0xe1, 0x05, 0x14, 0x69, 0x24, 0xe9, 0xca, 0x8d, 0x7d, 0x4f, 0xf1, 0x29, 0xfd, 0x4b, + 0xbd, 0x54, 0xb9, 0x16, 0x98, 0x6d, 0xd1, 0x15, 0x31, 0x26, 0x9e, 0xdc, 0x6b, 0xa8, 0xae, 0x0a, + 0xf9, 0xa3, 0x6d, 0x1b, 0x0a, 0x61, 0x17, 0x59, 0xed, 0x1f, 0x55, 0xe9, 0xec, 0x7e, 0x1a, 0xf0, + 0xff, 0x9e, 0x21, 0x76, 0xfb, 0xcc, 0xbe, 0x05, 0x93, 0x66, 0x74, 0x26, 0x12, 0x9d, 0x38, 0x72, + 0xbd, 0x6d, 0xc9, 0x79, 0x6a, 0x30, 0x2d, 0x2d, 0xa8, 0x84, 0x6a, 0x50, 0x6d, 0xd8, 0x89, 0x78, + 0x67, 0x45, 0x93, 0xbe, 0x68, 0x6e, 0xb9, 0x71, 0xb2, 0x1d, 0x97, 0xc9, 0xaf, 0xa5, 0x05, 0x56, + 0x94, 0x89, 0xf4, 0x0e, 0xaa, 0x19, 0xa2, 0x41, 0xc4, 0xa3, 0x5c, 0x83, 0x29, 0xcf, 0x0c, 0xb3, + 0x34, 0xa4, 0xdc, 0xd2, 0xdf, 0x2d, 0xe4, 0xd1, 0x36, 0x42, 0x5f, 0xd2, 0x50, 0x2d, 0xd8, 0x8f, + 0x60, 0xa5, 0xb4, 0xc4, 0x5c, 0x91, 0x70, 0xc7, 0xf9, 0xb8, 0xd4, 0x5d, 0x15, 0x37, 0x2a, 0xcd, + 0x22, 0x18, 0x18, 0x8f, 0x9a, 0x4f, 0x5f, 0x73, 0x47, 0x9f, 0xcd, 0x1d, 0xfd, 0x67, 0xee, 0xe8, + 0x1f, 0x0b, 0x47, 0x9b, 0x2d, 0x1c, 0xed, 0x7b, 0xe1, 0x68, 0xcf, 0x57, 0xfd, 0xa1, 0x18, 0xc4, + 0xa1, 0xd7, 0xe3, 0x23, 0x5f, 0x5d, 0xe2, 0xf5, 0x91, 0x76, 0xd8, 0xdf, 0xf6, 0x30, 0xc2, 0x12, + 0x7d, 0xbb, 0xfc, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xf5, 0x1c, 0xa3, 0x45, 0x37, 0x03, 0x00, 0x00, } func (m *BlockRequest) Marshal() (dAtA []byte, err error) { diff --git a/proto/tendermint/blockchain/types.proto b/proto/tendermint/blocksync/types.proto similarity index 94% rename from proto/tendermint/blockchain/types.proto rename to proto/tendermint/blocksync/types.proto index f5c143cf5..8c187c793 100644 --- a/proto/tendermint/blockchain/types.proto +++ b/proto/tendermint/blocksync/types.proto @@ -1,7 +1,7 @@ syntax = "proto3"; -package tendermint.blockchain; +package tendermint.blocksync; -option go_package = "github.com/tendermint/tendermint/proto/tendermint/blockchain"; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/blocksync"; import "tendermint/types/block.proto"; diff --git a/proxy/app_conn.go b/proxy/app_conn.go index 414ad1864..d8461484c 100644 --- a/proxy/app_conn.go +++ b/proxy/app_conn.go @@ -7,7 +7,7 @@ import ( "github.com/tendermint/tendermint/abci/types" ) -//go:generate mockery --case underscore --name AppConnConsensus|AppConnMempool|AppConnQuery|AppConnSnapshot +//go:generate ../scripts/mockery_generate.sh AppConnConsensus|AppConnMempool|AppConnQuery|AppConnSnapshot //---------------------------------------------------------------------------------------- // Enforce which abci msgs can be sent on a connection at the type level diff --git a/proxy/client.go b/proxy/client.go index 928af3cc3..929933e01 100644 --- a/proxy/client.go +++ b/proxy/client.go @@ -10,6 +10,8 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" ) +//go:generate ../scripts/mockery_generate.sh ClientCreator + // ClientCreator creates new ABCI clients. type ClientCreator interface { // NewABCIClient returns a new ABCI client. diff --git a/proxy/mocks/app_conn_consensus.go b/proxy/mocks/app_conn_consensus.go index ac5868e79..c862f7021 100644 --- a/proxy/mocks/app_conn_consensus.go +++ b/proxy/mocks/app_conn_consensus.go @@ -1,4 +1,4 @@ -// Code generated by mockery 2.7.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/proxy/mocks/app_conn_mempool.go b/proxy/mocks/app_conn_mempool.go index ca6b8406f..2505160d6 100644 --- a/proxy/mocks/app_conn_mempool.go +++ b/proxy/mocks/app_conn_mempool.go @@ -1,4 +1,4 @@ -// Code generated by mockery 2.7.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/proxy/mocks/app_conn_query.go b/proxy/mocks/app_conn_query.go index 00009993d..47ac5bef9 100644 --- a/proxy/mocks/app_conn_query.go +++ b/proxy/mocks/app_conn_query.go @@ -1,4 +1,4 @@ -// Code generated by mockery 2.7.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/proxy/mocks/app_conn_snapshot.go b/proxy/mocks/app_conn_snapshot.go index adc655558..0b6f10ce1 100644 --- a/proxy/mocks/app_conn_snapshot.go +++ b/proxy/mocks/app_conn_snapshot.go @@ -1,4 +1,4 @@ -// Code generated by mockery 2.7.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/proxy/mocks/client_creator.go b/proxy/mocks/client_creator.go index 499313d17..0e4157c2f 100644 --- a/proxy/mocks/client_creator.go +++ b/proxy/mocks/client_creator.go @@ -1,4 +1,4 @@ -// Code generated by mockery v1.1.1. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/rpc/client/evidence_test.go b/rpc/client/evidence_test.go index 944766152..0ff158e56 100644 --- a/rpc/client/evidence_test.go +++ b/rpc/client/evidence_test.go @@ -116,12 +116,6 @@ func TestBroadcastEvidence_DuplicateVoteEvidence(t *testing.T) { defer cancel() n, config := NodeSuite(t) - - // previous versions of this test used a shared fixture with - // other tests, and in this version we give it a little time - // for the node to make progress before running the test - time.Sleep(100 * time.Millisecond) - chainID := config.ChainID() pv, err := privval.LoadOrGenFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile()) @@ -131,6 +125,9 @@ func TestBroadcastEvidence_DuplicateVoteEvidence(t *testing.T) { correct, fakes := makeEvidences(t, pv, chainID) t.Logf("client %d", i) + // make sure that the node has produced enough blocks + waitForBlock(ctx, t, c, 2) + result, err := c.BroadcastEvidence(ctx, correct) require.NoError(t, err, "BroadcastEvidence(%s) failed", correct) assert.Equal(t, correct.Hash(), result.Hash, "expected result hash to match evidence hash") @@ -171,3 +168,21 @@ func TestBroadcastEmptyEvidence(t *testing.T) { assert.Error(t, err) } } + +func waitForBlock(ctx context.Context, t *testing.T, c client.Client, height int64) { + timer := time.NewTimer(0 * time.Millisecond) + defer timer.Stop() + for { + select { + case <-ctx.Done(): + return + case <-timer.C: + status, err := c.Status(ctx) + require.NoError(t, err) + if status.SyncInfo.LatestBlockHeight >= height { + return + } + timer.Reset(200 * time.Millisecond) + } + } +} diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 268b711e7..3547b42ae 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -29,7 +29,7 @@ import ( "github.com/tendermint/tendermint/types" ) -//go:generate mockery --case underscore --name Client +//go:generate ../../scripts/mockery_generate.sh Client // Client wraps most important rpc calls a client would make if you want to // listen for events, test if it also implements events.EventSwitch. diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index 0663ebf67..d752e6a93 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -2,6 +2,7 @@ package local import ( "context" + "errors" "fmt" "time" @@ -46,15 +47,15 @@ type Local struct { // NodeService describes the portion of the node interface that the // local RPC client constructor needs to build a local client. type NodeService interface { - ConfigureRPC() (*rpccore.Environment, error) + RPCEnvironment() *rpccore.Environment EventBus() *types.EventBus } // New configures a client that calls the Node directly. func New(node NodeService) (*Local, error) { - env, err := node.ConfigureRPC() - if err != nil { - return nil, err + env := node.RPCEnvironment() + if env == nil { + return nil, errors.New("rpc is nil") } return &Local{ EventBus: node.EventBus(), diff --git a/rpc/client/mocks/client.go b/rpc/client/mocks/client.go index adf1934cd..ef374b9a8 100644 --- a/rpc/client/mocks/client.go +++ b/rpc/client/mocks/client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v0.0.0-dev. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index d57f76a99..081276d0f 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -135,12 +135,19 @@ func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes // If the next block has not been committed yet, // use a non-canonical commit if height == env.BlockStore.Height() { - commit := env.BlockStore.LoadSeenCommit(height) - return ctypes.NewResultCommit(&header, commit, false), nil + commit := env.BlockStore.LoadSeenCommit() + // NOTE: we can't yet ensure atomicity of operations in asserting + // whether this is the latest height and retrieving the seen commit + if commit != nil && commit.Height == height { + return ctypes.NewResultCommit(&header, commit, false), nil + } } // Return the canonical commit (comes from the block at height+1) commit := env.BlockStore.LoadBlockCommit(height) + if commit == nil { + return nil, nil + } return ctypes.NewResultCommit(&header, commit, true), nil } diff --git a/rpc/core/blocks_test.go b/rpc/core/blocks_test.go index 70bd4c723..29db2f094 100644 --- a/rpc/core/blocks_test.go +++ b/rpc/core/blocks_test.go @@ -129,7 +129,7 @@ func (mockBlockStore) LoadBlock(height int64) *types.Block { retur func (mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { return nil } func (mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } func (mockBlockStore) LoadBlockCommit(height int64) *types.Commit { return nil } -func (mockBlockStore) LoadSeenCommit(height int64) *types.Commit { return nil } +func (mockBlockStore) LoadSeenCommit() *types.Commit { return nil } func (mockBlockStore) PruneBlocks(height int64) (uint64, error) { return 0, nil } func (mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { } diff --git a/rpc/core/env.go b/rpc/core/env.go index c18e84b51..eb7232c01 100644 --- a/rpc/core/env.go +++ b/rpc/core/env.go @@ -81,7 +81,7 @@ type Environment struct { ConsensusReactor *consensus.Reactor EventBus *types.EventBus // thread safe Mempool mempl.Mempool - FastSyncReactor consensus.FastSyncReactor + BlockSyncReactor consensus.BlockSyncReactor Logger log.Logger diff --git a/rpc/core/net.go b/rpc/core/net.go index edcf8fffa..8f3e89d77 100644 --- a/rpc/core/net.go +++ b/rpc/core/net.go @@ -36,6 +36,10 @@ func (env *Environment) NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, e // UnsafeDialSeeds dials the given seeds (comma-separated id@IP:PORT). func (env *Environment) UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { + if env.P2PPeers == nil { + return nil, errors.New("peer management system does not support this operation") + } + if len(seeds) == 0 { return &ctypes.ResultDialSeeds{}, fmt.Errorf("%w: no seeds provided", ctypes.ErrInvalidRequest) } @@ -53,6 +57,10 @@ func (env *Environment) UnsafeDialPeers( peers []string, persistent, unconditional, private bool) (*ctypes.ResultDialPeers, error) { + if env.P2PPeers == nil { + return nil, errors.New("peer management system does not support this operation") + } + if len(peers) == 0 { return &ctypes.ResultDialPeers{}, fmt.Errorf("%w: no peers provided", ctypes.ErrInvalidRequest) } diff --git a/rpc/core/status.go b/rpc/core/status.go index 9febef195..815ab37f5 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -69,10 +69,10 @@ func (env *Environment) Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, err EarliestAppHash: earliestAppHash, EarliestBlockHeight: earliestBlockHeight, EarliestBlockTime: time.Unix(0, earliestBlockTimeNano), - MaxPeerBlockHeight: env.FastSyncReactor.GetMaxPeerBlockHeight(), + MaxPeerBlockHeight: env.BlockSyncReactor.GetMaxPeerBlockHeight(), CatchingUp: env.ConsensusReactor.WaitSync(), - TotalSyncedTime: env.FastSyncReactor.GetTotalSyncedTime(), - RemainingTime: env.FastSyncReactor.GetRemainingSyncTime(), + TotalSyncedTime: env.BlockSyncReactor.GetTotalSyncedTime(), + RemainingTime: env.BlockSyncReactor.GetRemainingSyncTime(), }, ValidatorInfo: validatorInfo, } diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index a2cbd62da..bfd49b461 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -806,6 +806,7 @@ paths: application/json: schema: $ref: "#/components/schemas/ErrorResponse" + /genesis: get: summary: Get Genesis @@ -813,7 +814,7 @@ paths: tags: - Info description: | - Get genesis. + Get the genesis document. responses: "200": description: Genesis results. @@ -827,6 +828,39 @@ paths: application/json: schema: $ref: "#/components/schemas/ErrorResponse" + + /genesis_chunked: + get: + summary: Get Genesis in paginated chunks + operationId: genesis_chunked + tags: + - Info + description: | + Get genesis document in a paginated/chunked format to make it + easier to iterate through larger genesis structures. + parameters: + - in: query + name: chunkID + description: Sequence number of the chunk to download. + schema: + type: integer + default: 0 + example: 1 + responses: + "200": + description: Genesis results. + content: + application/json: + schema: + $ref: "#/components/schemas/GenesisChunkedResponse" + "500": + description: Error + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + + /dump_consensus_state: get: summary: Get consensus state @@ -1894,6 +1928,35 @@ components: properties: {} type: object + GenesisChunkedResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: string + example: "2.0" + id: + type: integer + example: 0 + result: + required: + - "chunk" + - "total" + - "data" + properties: + chunk: + type: integer + example: 0 + total: + type: integer + example: 1 + data: + type: string + example: "Z2VuZXNpcwo=" + DumpConsensusResponse: type: object required: diff --git a/scripts/mockery_generate.sh b/scripts/mockery_generate.sh new file mode 100755 index 000000000..382c277bb --- /dev/null +++ b/scripts/mockery_generate.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +go run github.com/vektra/mockery/v2 --disable-version-string --case underscore --name $* diff --git a/state/indexer/block/kv/kv.go b/state/indexer/block/kv/kv.go index 1787be9ef..bc90eadf5 100644 --- a/state/indexer/block/kv/kv.go +++ b/state/indexer/block/kv/kv.go @@ -186,6 +186,7 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, // fetch matching heights results = make([]int64, 0, len(filteredHeights)) +heights: for _, hBz := range filteredHeights { h := int64FromBytes(hBz) @@ -199,7 +200,7 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, select { case <-ctx.Done(): - break + break heights default: } @@ -240,7 +241,7 @@ func (idx *BlockerIndexer) matchRange( } defer it.Close() -LOOP: +iter: for ; it.Valid(); it.Next() { var ( eventValue string @@ -260,7 +261,7 @@ LOOP: if _, ok := qr.AnyBound().(int64); ok { v, err := strconv.ParseInt(eventValue, 10, 64) if err != nil { - continue LOOP + continue iter } include := true @@ -279,7 +280,7 @@ LOOP: select { case <-ctx.Done(): - break + break iter default: } @@ -372,12 +373,13 @@ func (idx *BlockerIndexer) match( } defer it.Close() + iterExists: for ; it.Valid(); it.Next() { tmpHeights[string(it.Value())] = it.Value() select { case <-ctx.Done(): - break + break iterExists default: } @@ -399,6 +401,7 @@ func (idx *BlockerIndexer) match( } defer it.Close() + iterContains: for ; it.Valid(); it.Next() { eventValue, err := parseValueFromEventKey(it.Key()) if err != nil { @@ -411,7 +414,7 @@ func (idx *BlockerIndexer) match( select { case <-ctx.Done(): - break + break iterContains default: } diff --git a/state/indexer/eventsink.go b/state/indexer/eventsink.go index 8c2529103..d7c9d10e0 100644 --- a/state/indexer/eventsink.go +++ b/state/indexer/eventsink.go @@ -16,6 +16,8 @@ const ( PSQL EventSinkType = "psql" ) +//go:generate ../../scripts/mockery_generate.sh EventSink + // EventSink interface is defined the APIs for the IndexerService to interact with the data store, // including the block/transaction indexing and the search functions. // diff --git a/state/indexer/mocks/event_sink.go b/state/indexer/mocks/event_sink.go new file mode 100644 index 000000000..ce5b8ace5 --- /dev/null +++ b/state/indexer/mocks/event_sink.go @@ -0,0 +1,167 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + indexer "github.com/tendermint/tendermint/state/indexer" + + query "github.com/tendermint/tendermint/libs/pubsub/query" + + tenderminttypes "github.com/tendermint/tendermint/types" + + types "github.com/tendermint/tendermint/abci/types" +) + +// EventSink is an autogenerated mock type for the EventSink type +type EventSink struct { + mock.Mock +} + +// GetTxByHash provides a mock function with given fields: _a0 +func (_m *EventSink) GetTxByHash(_a0 []byte) (*types.TxResult, error) { + ret := _m.Called(_a0) + + var r0 *types.TxResult + if rf, ok := ret.Get(0).(func([]byte) *types.TxResult); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.TxResult) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HasBlock provides a mock function with given fields: _a0 +func (_m *EventSink) HasBlock(_a0 int64) (bool, error) { + ret := _m.Called(_a0) + + var r0 bool + if rf, ok := ret.Get(0).(func(int64) bool); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(bool) + } + + var r1 error + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IndexBlockEvents provides a mock function with given fields: _a0 +func (_m *EventSink) IndexBlockEvents(_a0 tenderminttypes.EventDataNewBlockHeader) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(tenderminttypes.EventDataNewBlockHeader) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// IndexTxEvents provides a mock function with given fields: _a0 +func (_m *EventSink) IndexTxEvents(_a0 []*types.TxResult) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func([]*types.TxResult) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SearchBlockEvents provides a mock function with given fields: _a0, _a1 +func (_m *EventSink) SearchBlockEvents(_a0 context.Context, _a1 *query.Query) ([]int64, error) { + ret := _m.Called(_a0, _a1) + + var r0 []int64 + if rf, ok := ret.Get(0).(func(context.Context, *query.Query) []int64); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int64) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *query.Query) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SearchTxEvents provides a mock function with given fields: _a0, _a1 +func (_m *EventSink) SearchTxEvents(_a0 context.Context, _a1 *query.Query) ([]*types.TxResult, error) { + ret := _m.Called(_a0, _a1) + + var r0 []*types.TxResult + if rf, ok := ret.Get(0).(func(context.Context, *query.Query) []*types.TxResult); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.TxResult) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *query.Query) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Stop provides a mock function with given fields: +func (_m *EventSink) Stop() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Type provides a mock function with given fields: +func (_m *EventSink) Type() indexer.EventSinkType { + ret := _m.Called() + + var r0 indexer.EventSinkType + if rf, ok := ret.Get(0).(func() indexer.EventSinkType); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(indexer.EventSinkType) + } + + return r0 +} diff --git a/state/indexer/sink/psql/psql_test.go b/state/indexer/sink/psql/psql_test.go index ee1af5a5f..0df773a53 100644 --- a/state/indexer/sink/psql/psql_test.go +++ b/state/indexer/sink/psql/psql_test.go @@ -255,12 +255,7 @@ func verifyTimeStamp(tb string) error { if rows.Next() { var ts string - err = rows.Scan(&ts) - if err != nil { - return err - } - - return nil + return rows.Scan(&ts) } return errors.New("no result") diff --git a/state/indexer/tx/kv/kv.go b/state/indexer/tx/kv/kv.go index 5d310eea7..080dbce2c 100644 --- a/state/indexer/tx/kv/kv.go +++ b/state/indexer/tx/kv/kv.go @@ -219,6 +219,7 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul } results := make([]*abci.TxResult, 0, len(filteredHashes)) +hashes: for _, h := range filteredHashes { res, err := txi.Get(h) if err != nil { @@ -229,7 +230,7 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul // Potentially exit early. select { case <-ctx.Done(): - break + break hashes default: } } @@ -285,13 +286,14 @@ func (txi *TxIndex) match( } defer it.Close() + iterEqual: for ; it.Valid(); it.Next() { tmpHashes[string(it.Value())] = it.Value() // Potentially exit early. select { case <-ctx.Done(): - break + break iterEqual default: } } @@ -308,13 +310,14 @@ func (txi *TxIndex) match( } defer it.Close() + iterExists: for ; it.Valid(); it.Next() { tmpHashes[string(it.Value())] = it.Value() // Potentially exit early. select { case <-ctx.Done(): - break + break iterExists default: } } @@ -332,6 +335,7 @@ func (txi *TxIndex) match( } defer it.Close() + iterContains: for ; it.Valid(); it.Next() { value, err := parseValueFromKey(it.Key()) if err != nil { @@ -344,7 +348,7 @@ func (txi *TxIndex) match( // Potentially exit early. select { case <-ctx.Done(): - break + break iterContains default: } } @@ -412,7 +416,7 @@ func (txi *TxIndex) matchRange( } defer it.Close() -LOOP: +iter: for ; it.Valid(); it.Next() { value, err := parseValueFromKey(it.Key()) if err != nil { @@ -421,7 +425,7 @@ LOOP: if _, ok := qr.AnyBound().(int64); ok { v, err := strconv.ParseInt(value, 10, 64) if err != nil { - continue LOOP + continue iter } include := true @@ -448,7 +452,7 @@ LOOP: // Potentially exit early. select { case <-ctx.Done(): - break + break iter default: } } diff --git a/state/mocks/block_store.go b/state/mocks/block_store.go new file mode 100644 index 000000000..e66aad071 --- /dev/null +++ b/state/mocks/block_store.go @@ -0,0 +1,194 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + + types "github.com/tendermint/tendermint/types" +) + +// BlockStore is an autogenerated mock type for the BlockStore type +type BlockStore struct { + mock.Mock +} + +// Base provides a mock function with given fields: +func (_m *BlockStore) Base() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// Height provides a mock function with given fields: +func (_m *BlockStore) Height() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// LoadBaseMeta provides a mock function with given fields: +func (_m *BlockStore) LoadBaseMeta() *types.BlockMeta { + ret := _m.Called() + + var r0 *types.BlockMeta + if rf, ok := ret.Get(0).(func() *types.BlockMeta); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.BlockMeta) + } + } + + return r0 +} + +// LoadBlock provides a mock function with given fields: height +func (_m *BlockStore) LoadBlock(height int64) *types.Block { + ret := _m.Called(height) + + var r0 *types.Block + if rf, ok := ret.Get(0).(func(int64) *types.Block); ok { + r0 = rf(height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Block) + } + } + + return r0 +} + +// LoadBlockByHash provides a mock function with given fields: hash +func (_m *BlockStore) LoadBlockByHash(hash []byte) *types.Block { + ret := _m.Called(hash) + + var r0 *types.Block + if rf, ok := ret.Get(0).(func([]byte) *types.Block); ok { + r0 = rf(hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Block) + } + } + + return r0 +} + +// LoadBlockCommit provides a mock function with given fields: height +func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit { + ret := _m.Called(height) + + var r0 *types.Commit + if rf, ok := ret.Get(0).(func(int64) *types.Commit); ok { + r0 = rf(height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Commit) + } + } + + return r0 +} + +// LoadBlockMeta provides a mock function with given fields: height +func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { + ret := _m.Called(height) + + var r0 *types.BlockMeta + if rf, ok := ret.Get(0).(func(int64) *types.BlockMeta); ok { + r0 = rf(height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.BlockMeta) + } + } + + return r0 +} + +// LoadBlockPart provides a mock function with given fields: height, index +func (_m *BlockStore) LoadBlockPart(height int64, index int) *types.Part { + ret := _m.Called(height, index) + + var r0 *types.Part + if rf, ok := ret.Get(0).(func(int64, int) *types.Part); ok { + r0 = rf(height, index) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Part) + } + } + + return r0 +} + +// LoadSeenCommit provides a mock function with given fields: +func (_m *BlockStore) LoadSeenCommit() *types.Commit { + ret := _m.Called() + + var r0 *types.Commit + if rf, ok := ret.Get(0).(func() *types.Commit); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Commit) + } + } + + return r0 +} + +// PruneBlocks provides a mock function with given fields: height +func (_m *BlockStore) PruneBlocks(height int64) (uint64, error) { + ret := _m.Called(height) + + var r0 uint64 + if rf, ok := ret.Get(0).(func(int64) uint64); ok { + r0 = rf(height) + } else { + r0 = ret.Get(0).(uint64) + } + + var r1 error + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SaveBlock provides a mock function with given fields: block, blockParts, seenCommit +func (_m *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { + _m.Called(block, blockParts, seenCommit) +} + +// Size provides a mock function with given fields: +func (_m *BlockStore) Size() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} diff --git a/state/mocks/event_sink.go b/state/mocks/event_sink.go new file mode 100644 index 000000000..749515ccf --- /dev/null +++ b/state/mocks/event_sink.go @@ -0,0 +1,167 @@ +// Code generated by mockery 2.7.5. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + indexer "github.com/tendermint/tendermint/state/indexer" + + query "github.com/tendermint/tendermint/libs/pubsub/query" + + tenderminttypes "github.com/tendermint/tendermint/types" + + types "github.com/tendermint/tendermint/abci/types" +) + +// EventSink is an autogenerated mock type for the EventSink type +type EventSink struct { + mock.Mock +} + +// GetTxByHash provides a mock function with given fields: _a0 +func (_m *EventSink) GetTxByHash(_a0 []byte) (*types.TxResult, error) { + ret := _m.Called(_a0) + + var r0 *types.TxResult + if rf, ok := ret.Get(0).(func([]byte) *types.TxResult); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.TxResult) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HasBlock provides a mock function with given fields: _a0 +func (_m *EventSink) HasBlock(_a0 int64) (bool, error) { + ret := _m.Called(_a0) + + var r0 bool + if rf, ok := ret.Get(0).(func(int64) bool); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(bool) + } + + var r1 error + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IndexBlockEvents provides a mock function with given fields: _a0 +func (_m *EventSink) IndexBlockEvents(_a0 tenderminttypes.EventDataNewBlockHeader) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(tenderminttypes.EventDataNewBlockHeader) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// IndexTxEvents provides a mock function with given fields: _a0 +func (_m *EventSink) IndexTxEvents(_a0 []*types.TxResult) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func([]*types.TxResult) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SearchBlockEvents provides a mock function with given fields: _a0, _a1 +func (_m *EventSink) SearchBlockEvents(_a0 context.Context, _a1 *query.Query) ([]int64, error) { + ret := _m.Called(_a0, _a1) + + var r0 []int64 + if rf, ok := ret.Get(0).(func(context.Context, *query.Query) []int64); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int64) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *query.Query) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SearchTxEvents provides a mock function with given fields: _a0, _a1 +func (_m *EventSink) SearchTxEvents(_a0 context.Context, _a1 *query.Query) ([]*types.TxResult, error) { + ret := _m.Called(_a0, _a1) + + var r0 []*types.TxResult + if rf, ok := ret.Get(0).(func(context.Context, *query.Query) []*types.TxResult); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.TxResult) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *query.Query) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Stop provides a mock function with given fields: +func (_m *EventSink) Stop() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Type provides a mock function with given fields: +func (_m *EventSink) Type() indexer.EventSinkType { + ret := _m.Called() + + var r0 indexer.EventSinkType + if rf, ok := ret.Get(0).(func() indexer.EventSinkType); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(indexer.EventSinkType) + } + + return r0 +} diff --git a/state/mocks/evidence_pool.go b/state/mocks/evidence_pool.go index 8dd6a68d4..bb33547d2 100644 --- a/state/mocks/evidence_pool.go +++ b/state/mocks/evidence_pool.go @@ -1,4 +1,4 @@ -// Code generated by mockery 2.7.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/state/mocks/store.go b/state/mocks/store.go index 52de54472..750bf7f29 100644 --- a/state/mocks/store.go +++ b/state/mocks/store.go @@ -1,4 +1,4 @@ -// Code generated by mockery 2.7.5. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/state/services.go b/state/services.go index a46863904..c692d0b94 100644 --- a/state/services.go +++ b/state/services.go @@ -9,6 +9,8 @@ import ( // NOTE: Interfaces used by RPC must be thread safe! //------------------------------------------------------ +//go:generate ../scripts/mockery_generate.sh BlockStore + //------------------------------------------------------ // blockstore @@ -30,13 +32,13 @@ type BlockStore interface { LoadBlockPart(height int64, index int) *types.Part LoadBlockCommit(height int64) *types.Commit - LoadSeenCommit(height int64) *types.Commit + LoadSeenCommit() *types.Commit } //----------------------------------------------------------------------------- // evidence pool -//go:generate mockery --case underscore --name EvidencePool +//go:generate ../scripts/mockery_generate.sh EvidencePool // EvidencePool defines the EvidencePool interface used by State. type EvidencePool interface { diff --git a/state/store.go b/state/store.go index 84b19a685..5ce11e47d 100644 --- a/state/store.go +++ b/state/store.go @@ -68,7 +68,7 @@ func init() { //---------------------- -//go:generate mockery --case underscore --name Store +//go:generate ../scripts/mockery_generate.sh Store // Store defines the state store interface // @@ -661,10 +661,5 @@ func (store dbStore) saveConsensusParamsInfo( return err } - err = batch.Set(consensusParamsKey(nextHeight), bz) - if err != nil { - return err - } - - return nil + return batch.Set(consensusParamsKey(nextHeight), bz) } diff --git a/store/store.go b/store/store.go index 1396ca777..8848b76d9 100644 --- a/store/store.go +++ b/store/store.go @@ -258,12 +258,13 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { return commit } -// LoadSeenCommit returns the locally seen Commit for the given height. -// This is useful when we've seen a commit, but there has not yet been -// a new block at `height + 1` that includes this commit in its block.LastCommit. -func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit { +// LoadSeenCommit returns the last locally seen Commit before being +// cannonicalized. This is useful when we've seen a commit, but there +// has not yet been a new block at `height + 1` that includes this +// commit in its block.LastCommit. +func (bs *BlockStore) LoadSeenCommit() *types.Commit { var pbc = new(tmproto.Commit) - bz, err := bs.db.Get(seenCommitKey(height)) + bz, err := bs.db.Get(seenCommitKey()) if err != nil { panic(err) } @@ -329,10 +330,6 @@ func (bs *BlockStore) PruneBlocks(height int64) (uint64, error) { return pruned, err } - if _, err := bs.pruneRange(seenCommitKey(0), seenCommitKey(height), nil); err != nil { - return pruned, err - } - return pruned, nil } @@ -479,13 +476,7 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s // Save seen commit (seen +2/3 precommits for block) pbsc := seenCommit.ToProto() seenCommitBytes := mustEncode(pbsc) - if err := batch.Set(seenCommitKey(height), seenCommitBytes); err != nil { - panic(err) - } - - // remove the previous seen commit that we have just replaced with the - // canonical commit - if err := batch.Delete(seenCommitKey(height - 1)); err != nil { + if err := batch.Set(seenCommitKey(), seenCommitBytes); err != nil { panic(err) } @@ -516,7 +507,7 @@ func (bs *BlockStore) SaveSeenCommit(height int64, seenCommit *types.Commit) err if err != nil { return fmt.Errorf("unable to marshal commit: %w", err) } - return bs.db.Set(seenCommitKey(height), seenCommitBytes) + return bs.db.Set(seenCommitKey(), seenCommitBytes) } func (bs *BlockStore) SaveSignedHeader(sh *types.SignedHeader, blockID types.BlockID) error { @@ -612,8 +603,8 @@ func blockCommitKey(height int64) []byte { return key } -func seenCommitKey(height int64) []byte { - key, err := orderedcode.Append(nil, prefixSeenCommit, height) +func seenCommitKey() []byte { + key, err := orderedcode.Append(nil, prefixSeenCommit) if err != nil { panic(err) } diff --git a/store/store_test.go b/store/store_test.go index d07e001e6..2132d9aff 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -234,14 +234,14 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { bBlockMeta := bs.LoadBlockMeta(tuple.block.Height) if tuple.eraseSeenCommitInDB { - err := db.Delete(seenCommitKey(tuple.block.Height)) + err := db.Delete(seenCommitKey()) require.NoError(t, err) } if tuple.corruptSeenCommitInDB { - err := db.Set(seenCommitKey(tuple.block.Height), []byte("bogus-seen-commit")) + err := db.Set(seenCommitKey(), []byte("bogus-seen-commit")) require.NoError(t, err) } - bSeenCommit := bs.LoadSeenCommit(tuple.block.Height) + bSeenCommit := bs.LoadSeenCommit() commitHeight := tuple.block.Height - 1 if tuple.eraseCommitInDB { @@ -494,9 +494,8 @@ func TestBlockFetchAtHeight(t *testing.T) { func TestSeenAndCanonicalCommit(t *testing.T) { bs, _ := freshBlockStore() - height := int64(2) loadCommit := func() (interface{}, error) { - meta := bs.LoadSeenCommit(height) + meta := bs.LoadSeenCommit() return meta, nil } @@ -509,20 +508,15 @@ func TestSeenAndCanonicalCommit(t *testing.T) { // produce a few blocks and check that the correct seen and cannoncial commits // are persisted. for h := int64(3); h <= 5; h++ { - c1 := bs.LoadSeenCommit(h) - require.Nil(t, c1) - c2 := bs.LoadBlockCommit(h - 1) - require.Nil(t, c2) blockCommit := makeTestCommit(h-1, tmtime.Now()) block := factory.MakeBlock(state, h, blockCommit) partSet := block.MakePartSet(2) seenCommit := makeTestCommit(h, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) - c3 := bs.LoadSeenCommit(h) + c3 := bs.LoadSeenCommit() + require.NotNil(t, c3) + require.Equal(t, h, c3.Height) require.Equal(t, seenCommit.Hash(), c3.Hash()) - // the previous seen commit should be removed - c4 := bs.LoadSeenCommit(h - 1) - require.Nil(t, c4) c5 := bs.LoadBlockCommit(h) require.Nil(t, c5) c6 := bs.LoadBlockCommit(h - 1) diff --git a/test/e2e/README.md b/test/e2e/README.md index 6811b6561..d737120c1 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -23,7 +23,7 @@ The above should hold for any arbitrary, valid network configuration, and that c A testnet configuration is specified as a TOML testnet manifest (see below). The testnet runner uses the manifest to configure a set of Docker containers and start them in some order. The manifests can be written manually (to test specific configurations) or generated randomly by the testnet generator (to test a wide range of configuration permutations). -When running a testnet, the runner will first start the Docker nodes in some sequence, submit random transactions, and wait for the nodes to come online and the first blocks to be produced. This may involve e.g. waiting for nodes to fast sync and/or state sync. If specified, it will then run any misbehaviors (e.g. double-signing) and perturbations (e.g. killing or disconnecting nodes). It then waits for the testnet to stabilize, with all nodes online and having reached the latest height. +When running a testnet, the runner will first start the Docker nodes in some sequence, submit random transactions, and wait for the nodes to come online and the first blocks to be produced. This may involve e.g. waiting for nodes to block sync and/or state sync. If specified, it will then run any misbehaviors (e.g. double-signing) and perturbations (e.g. killing or disconnecting nodes). It then waits for the testnet to stabilize, with all nodes online and having reached the latest height. Once the testnet stabilizes, a set of Go end-to-end tests are run against the live testnet to verify network invariants (for example that blocks are identical across nodes). These use the RPC client to interact with the network, and should consider the entire network as a black box (i.e. it should not test any network or node internals, only externally visible behavior via RPC). The tests may use the `testNode()` helper to run parallel tests against each individual testnet node, and/or inspect the full blockchain history via `fetchBlockChain()`. diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 1e134cf71..f699b1162 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -30,7 +30,7 @@ var ( nodeABCIProtocols = uniformChoice{"unix", "tcp", "builtin", "grpc"} nodePrivvalProtocols = uniformChoice{"file", "unix", "tcp", "grpc"} // FIXME: v2 disabled due to flake - nodeFastSyncs = uniformChoice{"v0"} // "v2" + nodeBlockSyncs = uniformChoice{"v0"} // "v2" nodeMempools = uniformChoice{"v0", "v1"} nodeStateSyncs = uniformChoice{false, true} nodePersistIntervals = uniformChoice{0, 1, 5} @@ -273,7 +273,7 @@ func generateNode( Database: nodeDatabases.Choose(r).(string), ABCIProtocol: nodeABCIProtocols.Choose(r).(string), PrivvalProtocol: nodePrivvalProtocols.Choose(r).(string), - FastSync: nodeFastSyncs.Choose(r).(string), + BlockSync: nodeBlockSyncs.Choose(r).(string), Mempool: nodeMempools.Choose(r).(string), StateSync: nodeStateSyncs.Choose(r).(bool) && startAt > 0, PersistInterval: ptrUint64(uint64(nodePersistIntervals.Choose(r).(int))), @@ -311,7 +311,7 @@ func generateNode( } if node.StateSync { - node.FastSync = "v0" + node.BlockSync = "v0" } return &node diff --git a/test/e2e/networks/ci.toml b/test/e2e/networks/ci.toml index afce3a2db..00c73ccbd 100644 --- a/test/e2e/networks/ci.toml +++ b/test/e2e/networks/ci.toml @@ -30,11 +30,6 @@ validator05 = 50 [node.seed01] mode = "seed" perturb = ["restart"] -seeds = ["seed02"] - -[node.seed02] -mode = "seed" -seeds = ["seed01"] [node.validator01] perturb = ["disconnect"] @@ -47,7 +42,7 @@ database = "boltdb" persist_interval = 0 perturb = ["restart"] privval_protocol = "tcp" -seeds = ["seed02"] +seeds = ["seed01"] [node.validator03] database = "badgerdb" @@ -66,29 +61,21 @@ perturb = ["pause"] [node.validator05] database = "cleveldb" -fast_sync = "v0" -seeds = ["seed02"] +block_sync = "v0" +seeds = ["seed01"] start_at = 1005 # Becomes part of the validator set at 1010 abci_protocol = "grpc" -perturb = ["kill", "pause", "disconnect", "restart"] +perturb = ["pause", "disconnect", "restart"] privval_protocol = "tcp" [node.full01] mode = "full" start_at = 1010 # FIXME: should be v2, disabled due to flake -fast_sync = "v0" +block_sync = "v0" persistent_peers = ["validator01", "validator02", "validator03", "validator04", "validator05"] perturb = ["restart"] retain_blocks = 7 - -[node.full02] -mode = "full" -start_at = 1015 -# FIXME: should be v2, disabled due to flake -fast_sync = "v0" -perturb = ["restart"] -seeds = ["seed01"] state_sync = true [node.light01] diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index 89b38e02a..5711be37d 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -106,9 +106,9 @@ type ManifestNode struct { // runner will wait for the network to reach at least this block height. StartAt int64 `toml:"start_at"` - // FastSync specifies the fast sync mode: "" (disable), "v0" or "v2". + // BlockSync specifies the block sync mode: "" (disable), "v0" or "v2". // Defaults to disabled. - FastSync string `toml:"fast_sync"` + BlockSync string `toml:"block_sync"` // Mempool specifies which version of mempool to use. Either "v0" or "v1" Mempool string `toml:"mempool_version"` diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index d54df4406..cfeb54bde 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -79,7 +79,7 @@ type Node struct { IP net.IP ProxyPort uint32 StartAt int64 - FastSync string + BlockSync string Mempool string StateSync bool Database string @@ -168,7 +168,7 @@ func LoadTestnet(file string) (*Testnet, error) { ABCIProtocol: ProtocolBuiltin, PrivvalProtocol: ProtocolFile, StartAt: nodeManifest.StartAt, - FastSync: nodeManifest.FastSync, + BlockSync: nodeManifest.BlockSync, Mempool: nodeManifest.Mempool, StateSync: nodeManifest.StateSync, PersistInterval: 1, @@ -328,10 +328,10 @@ func (n Node) Validate(testnet Testnet) error { } } } - switch n.FastSync { + switch n.BlockSync { case "", "v0", "v2": default: - return fmt.Errorf("invalid fast sync setting %q", n.FastSync) + return fmt.Errorf("invalid block sync setting %q", n.BlockSync) } switch n.Mempool { case "", "v0", "v1": diff --git a/test/e2e/runner/cleanup.go b/test/e2e/runner/cleanup.go index d99ca54cf..d17c75075 100644 --- a/test/e2e/runner/cleanup.go +++ b/test/e2e/runner/cleanup.go @@ -15,11 +15,7 @@ func Cleanup(testnet *e2e.Testnet) error { if err != nil { return err } - err = cleanupDir(testnet.Dir) - if err != nil { - return err - } - return nil + return cleanupDir(testnet.Dir) } // cleanupDocker removes all E2E resources (with label e2e=True), regardless @@ -37,13 +33,8 @@ func cleanupDocker() error { return err } - err = exec("bash", "-c", fmt.Sprintf( + return exec("bash", "-c", fmt.Sprintf( "docker network ls -q --filter label=e2e | xargs %v docker network rm", xargsR)) - if err != nil { - return err - } - - return nil } // cleanupDir cleans up a testnet directory @@ -74,10 +65,5 @@ func cleanupDir(dir string) error { return err } - err = os.RemoveAll(dir) - if err != nil { - return err - } - - return nil + return os.RemoveAll(dir) } diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index c80d05b22..c968ef306 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -296,10 +296,10 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.Mempool.Version = node.Mempool } - if node.FastSync == "" { + if node.BlockSync == "" { cfg.FastSyncMode = false } else { - cfg.FastSync.Version = node.FastSync + cfg.BlockSync.Version = node.BlockSync } if node.StateSync { diff --git a/test/e2e/runner/wait.go b/test/e2e/runner/wait.go index 4c16fb808..9f3a4c438 100644 --- a/test/e2e/runner/wait.go +++ b/test/e2e/runner/wait.go @@ -21,10 +21,7 @@ func Wait(testnet *e2e.Testnet, blocks int64) error { func WaitUntil(testnet *e2e.Testnet, height int64) error { logger.Info(fmt.Sprintf("Waiting for all nodes to reach height %v...", height)) _, err := waitForAllNodes(testnet, height, waitingTime(len(testnet.Nodes))) - if err != nil { - return err - } - return nil + return err } // waitingTime estimates how long it should take for a node to reach the height. diff --git a/test/fuzz/Makefile b/test/fuzz/Makefile index 96b332dcf..3d34e0a43 100644 --- a/test/fuzz/Makefile +++ b/test/fuzz/Makefile @@ -1,8 +1,15 @@ #!/usr/bin/make -f -.PHONY: fuzz-mempool -fuzz-mempool: - cd mempool && \ +.PHONY: fuzz-mempool-v1 +fuzz-mempool-v1: + cd mempool/v1 && \ + rm -f *-fuzz.zip && \ + go-fuzz-build && \ + go-fuzz + +.PHONY: fuzz-mempool-v0 +fuzz-mempool-v0: + cd mempool/v0 && \ rm -f *-fuzz.zip && \ go-fuzz-build && \ go-fuzz @@ -37,3 +44,9 @@ fuzz-rpc-server: rm -f *-fuzz.zip && \ go-fuzz-build && \ go-fuzz + +clean: + find . -name corpus -type d -exec rm -rf {} +; + find . -name crashers -type d -exec rm -rf {} +; + find . -name suppressions -type d -exec rm -rf {} +; + find . -name *\.zip -type f -delete diff --git a/test/fuzz/mempool/checktx.go b/test/fuzz/mempool/v0/checktx.go similarity index 97% rename from test/fuzz/mempool/checktx.go rename to test/fuzz/mempool/v0/checktx.go index 197b0daaf..a90ec2290 100644 --- a/test/fuzz/mempool/checktx.go +++ b/test/fuzz/mempool/v0/checktx.go @@ -1,4 +1,4 @@ -package checktx +package v0 import ( "context" diff --git a/test/fuzz/mempool/v0/fuzz_test.go b/test/fuzz/mempool/v0/fuzz_test.go new file mode 100644 index 000000000..4f8f1e9c8 --- /dev/null +++ b/test/fuzz/mempool/v0/fuzz_test.go @@ -0,0 +1,33 @@ +package v0_test + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + mempoolv0 "github.com/tendermint/tendermint/test/fuzz/mempool/v0" +) + +const testdataCasesDir = "testdata/cases" + +func TestMempoolTestdataCases(t *testing.T) { + entries, err := os.ReadDir(testdataCasesDir) + require.NoError(t, err) + + for _, e := range entries { + entry := e + t.Run(entry.Name(), func(t *testing.T) { + defer func() { + r := recover() + require.Nilf(t, r, "testdata/cases test panic") + }() + f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) + require.NoError(t, err) + input, err := ioutil.ReadAll(f) + require.NoError(t, err) + mempoolv0.Fuzz(input) + }) + } +} diff --git a/test/fuzz/mempool/v0/testdata/cases/empty b/test/fuzz/mempool/v0/testdata/cases/empty new file mode 100644 index 000000000..e69de29bb diff --git a/test/fuzz/mempool/v1/checktx.go b/test/fuzz/mempool/v1/checktx.go new file mode 100644 index 000000000..6194f3bcb --- /dev/null +++ b/test/fuzz/mempool/v1/checktx.go @@ -0,0 +1,37 @@ +package v1 + +import ( + "context" + + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/mempool" + mempoolv1 "github.com/tendermint/tendermint/internal/mempool/v0" + "github.com/tendermint/tendermint/proxy" +) + +var mp mempool.Mempool + +func init() { + app := kvstore.NewApplication() + cc := proxy.NewLocalClientCreator(app) + appConnMem, _ := cc.NewABCIClient() + err := appConnMem.Start() + if err != nil { + panic(err) + } + + cfg := config.DefaultMempoolConfig() + cfg.Broadcast = false + + mp = mempoolv1.NewCListMempool(cfg, appConnMem, 0) +} + +func Fuzz(data []byte) int { + err := mp.CheckTx(context.Background(), data, nil, mempool.TxInfo{}) + if err != nil { + return 0 + } + + return 1 +} diff --git a/test/fuzz/mempool/v1/fuzz_test.go b/test/fuzz/mempool/v1/fuzz_test.go new file mode 100644 index 000000000..863697a0a --- /dev/null +++ b/test/fuzz/mempool/v1/fuzz_test.go @@ -0,0 +1,33 @@ +package v1_test + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + mempoolv1 "github.com/tendermint/tendermint/test/fuzz/mempool/v1" +) + +const testdataCasesDir = "testdata/cases" + +func TestMempoolTestdataCases(t *testing.T) { + entries, err := os.ReadDir(testdataCasesDir) + require.NoError(t, err) + + for _, e := range entries { + entry := e + t.Run(entry.Name(), func(t *testing.T) { + defer func() { + r := recover() + require.Nilf(t, r, "testdata/cases test panic") + }() + f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) + require.NoError(t, err) + input, err := ioutil.ReadAll(f) + require.NoError(t, err) + mempoolv1.Fuzz(input) + }) + } +} diff --git a/test/fuzz/mempool/v1/testdata/cases/empty b/test/fuzz/mempool/v1/testdata/cases/empty new file mode 100644 index 000000000..e69de29bb diff --git a/test/fuzz/p2p/addrbook/fuzz.go b/test/fuzz/p2p/addrbook/fuzz.go index d0dfe6530..6d5548fc7 100644 --- a/test/fuzz/p2p/addrbook/fuzz.go +++ b/test/fuzz/p2p/addrbook/fuzz.go @@ -1,5 +1,5 @@ // nolint: gosec -package addr +package addrbook import ( "encoding/json" diff --git a/test/fuzz/p2p/addrbook/fuzz_test.go b/test/fuzz/p2p/addrbook/fuzz_test.go new file mode 100644 index 000000000..4ec7aebd9 --- /dev/null +++ b/test/fuzz/p2p/addrbook/fuzz_test.go @@ -0,0 +1,33 @@ +package addrbook_test + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/test/fuzz/p2p/addrbook" +) + +const testdataCasesDir = "testdata/cases" + +func TestAddrbookTestdataCases(t *testing.T) { + entries, err := os.ReadDir(testdataCasesDir) + require.NoError(t, err) + + for _, e := range entries { + entry := e + t.Run(entry.Name(), func(t *testing.T) { + defer func() { + r := recover() + require.Nilf(t, r, "testdata/cases test panic") + }() + f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) + require.NoError(t, err) + input, err := ioutil.ReadAll(f) + require.NoError(t, err) + addrbook.Fuzz(input) + }) + } +} diff --git a/test/fuzz/p2p/addrbook/testdata/cases/empty b/test/fuzz/p2p/addrbook/testdata/cases/empty new file mode 100644 index 000000000..e69de29bb diff --git a/test/fuzz/p2p/pex/fuzz_test.go b/test/fuzz/p2p/pex/fuzz_test.go new file mode 100644 index 000000000..8a194e730 --- /dev/null +++ b/test/fuzz/p2p/pex/fuzz_test.go @@ -0,0 +1,33 @@ +package pex_test + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/test/fuzz/p2p/pex" +) + +const testdataCasesDir = "testdata/cases" + +func TestPexTestdataCases(t *testing.T) { + entries, err := os.ReadDir(testdataCasesDir) + require.NoError(t, err) + + for _, e := range entries { + entry := e + t.Run(entry.Name(), func(t *testing.T) { + defer func() { + r := recover() + require.Nilf(t, r, "testdata/cases test panic") + }() + f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) + require.NoError(t, err) + input, err := ioutil.ReadAll(f) + require.NoError(t, err) + pex.Fuzz(input) + }) + } +} diff --git a/test/fuzz/p2p/pex/testdata/cases/empty b/test/fuzz/p2p/pex/testdata/cases/empty new file mode 100644 index 000000000..e69de29bb diff --git a/test/fuzz/p2p/secretconnection/fuzz_test.go b/test/fuzz/p2p/secretconnection/fuzz_test.go new file mode 100644 index 000000000..d48dc4267 --- /dev/null +++ b/test/fuzz/p2p/secretconnection/fuzz_test.go @@ -0,0 +1,33 @@ +package secretconnection_test + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/test/fuzz/p2p/secretconnection" +) + +const testdataCasesDir = "testdata/cases" + +func TestSecretConnectionTestdataCases(t *testing.T) { + entries, err := os.ReadDir(testdataCasesDir) + require.NoError(t, err) + + for _, e := range entries { + entry := e + t.Run(entry.Name(), func(t *testing.T) { + defer func() { + r := recover() + require.Nilf(t, r, "testdata/cases test panic") + }() + f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) + require.NoError(t, err) + input, err := ioutil.ReadAll(f) + require.NoError(t, err) + secretconnection.Fuzz(input) + }) + } +} diff --git a/test/fuzz/p2p/secret_connection/init-corpus/main.go b/test/fuzz/p2p/secretconnection/init-corpus/main.go similarity index 100% rename from test/fuzz/p2p/secret_connection/init-corpus/main.go rename to test/fuzz/p2p/secretconnection/init-corpus/main.go diff --git a/test/fuzz/p2p/secret_connection/read_write.go b/test/fuzz/p2p/secretconnection/read_write.go similarity index 100% rename from test/fuzz/p2p/secret_connection/read_write.go rename to test/fuzz/p2p/secretconnection/read_write.go diff --git a/test/fuzz/p2p/secretconnection/testdata/cases/empty b/test/fuzz/p2p/secretconnection/testdata/cases/empty new file mode 100644 index 000000000..e69de29bb diff --git a/test/fuzz/rpc/jsonrpc/server/fuzz_test.go b/test/fuzz/rpc/jsonrpc/server/fuzz_test.go new file mode 100644 index 000000000..50b9194fe --- /dev/null +++ b/test/fuzz/rpc/jsonrpc/server/fuzz_test.go @@ -0,0 +1,33 @@ +package server_test + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/test/fuzz/rpc/jsonrpc/server" +) + +const testdataCasesDir = "testdata/cases" + +func TestServerTestdataCases(t *testing.T) { + entries, err := os.ReadDir(testdataCasesDir) + require.NoError(t, err) + + for _, e := range entries { + entry := e + t.Run(entry.Name(), func(t *testing.T) { + defer func() { + r := recover() + require.Nilf(t, r, "testdata/cases test panic") + }() + f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) + require.NoError(t, err) + input, err := ioutil.ReadAll(f) + require.NoError(t, err) + server.Fuzz(input) + }) + } +} diff --git a/test/fuzz/rpc/jsonrpc/server/handler.go b/test/fuzz/rpc/jsonrpc/server/handler.go index cab1b6ed0..08f7e2b6b 100644 --- a/test/fuzz/rpc/jsonrpc/server/handler.go +++ b/test/fuzz/rpc/jsonrpc/server/handler.go @@ -1,4 +1,4 @@ -package handler +package server import ( "bytes" @@ -39,11 +39,25 @@ func Fuzz(data []byte) int { if err := res.Body.Close(); err != nil { panic(err) } - if len(blob) > 0 { - recv := new(types.RPCResponse) - if err := json.Unmarshal(blob, recv); err != nil { + if len(blob) == 0 { + return 1 + } + + if outputJSONIsSlice(blob) { + recv := []types.RPCResponse{} + if err := json.Unmarshal(blob, &recv); err != nil { panic(err) } + return 1 + } + recv := &types.RPCResponse{} + if err := json.Unmarshal(blob, recv); err != nil { + panic(err) } return 1 } + +func outputJSONIsSlice(input []byte) bool { + slice := []interface{}{} + return json.Unmarshal(input, &slice) == nil +} diff --git a/test/fuzz/rpc/jsonrpc/server/testdata/1184f5b8d4b6dd08709cf1513f26744167065e0d b/test/fuzz/rpc/jsonrpc/server/testdata/1184f5b8d4b6dd08709cf1513f26744167065e0d new file mode 100644 index 000000000..6e7ea636e --- /dev/null +++ b/test/fuzz/rpc/jsonrpc/server/testdata/1184f5b8d4b6dd08709cf1513f26744167065e0d @@ -0,0 +1 @@ +[0] \ No newline at end of file diff --git a/test/fuzz/rpc/jsonrpc/server/testdata/cases/1184f5b8d4b6dd08709cf1513f26744167065e0d b/test/fuzz/rpc/jsonrpc/server/testdata/cases/1184f5b8d4b6dd08709cf1513f26744167065e0d new file mode 100644 index 000000000..6e7ea636e --- /dev/null +++ b/test/fuzz/rpc/jsonrpc/server/testdata/cases/1184f5b8d4b6dd08709cf1513f26744167065e0d @@ -0,0 +1 @@ +[0] \ No newline at end of file diff --git a/test/fuzz/rpc/jsonrpc/server/testdata/cases/bbcffb1cdb2cea50fd3dd8c1524905551d0b2e79 b/test/fuzz/rpc/jsonrpc/server/testdata/cases/bbcffb1cdb2cea50fd3dd8c1524905551d0b2e79 new file mode 100644 index 000000000..e0be2aa4b --- /dev/null +++ b/test/fuzz/rpc/jsonrpc/server/testdata/cases/bbcffb1cdb2cea50fd3dd8c1524905551d0b2e79 @@ -0,0 +1 @@ +[0,0] \ No newline at end of file diff --git a/test/fuzz/rpc/jsonrpc/server/testdata/cases/clusterfuzz-testcase-minimized-fuzz_rpc_jsonrpc_server-4738572803506176 b/test/fuzz/rpc/jsonrpc/server/testdata/cases/clusterfuzz-testcase-minimized-fuzz_rpc_jsonrpc_server-4738572803506176 new file mode 100644 index 000000000..0f7836d2f --- /dev/null +++ b/test/fuzz/rpc/jsonrpc/server/testdata/cases/clusterfuzz-testcase-minimized-fuzz_rpc_jsonrpc_server-4738572803506176 @@ -0,0 +1 @@ +[{"iD":7},{"iD":7}] \ No newline at end of file diff --git a/test/fuzz/rpc/jsonrpc/server/testdata/cases/empty b/test/fuzz/rpc/jsonrpc/server/testdata/cases/empty new file mode 100644 index 000000000..e69de29bb diff --git a/test/fuzz/rpc/jsonrpc/server/testdata/clusterfuzz-testcase-minimized-fuzz_rpc_jsonrpc_server-4738572803506176 b/test/fuzz/rpc/jsonrpc/server/testdata/clusterfuzz-testcase-minimized-fuzz_rpc_jsonrpc_server-4738572803506176 new file mode 100644 index 000000000..0f7836d2f --- /dev/null +++ b/test/fuzz/rpc/jsonrpc/server/testdata/clusterfuzz-testcase-minimized-fuzz_rpc_jsonrpc_server-4738572803506176 @@ -0,0 +1 @@ +[{"iD":7},{"iD":7}] \ No newline at end of file diff --git a/tools/tools.go b/tools/tools.go index c6f5f4c8c..0e61333ec 100644 --- a/tools/tools.go +++ b/tools/tools.go @@ -8,4 +8,5 @@ package tools import ( _ "github.com/golangci/golangci-lint/cmd/golangci-lint" + _ "github.com/vektra/mockery/v2" ) diff --git a/types/event_bus.go b/types/event_bus.go index 5290181ee..dfe3a0664 100644 --- a/types/event_bus.go +++ b/types/event_bus.go @@ -153,8 +153,8 @@ func (b *EventBus) PublishEventValidBlock(data EventDataRoundState) error { return b.Publish(EventValidBlockValue, data) } -func (b *EventBus) PublishEventFastSyncStatus(data EventDataFastSyncStatus) error { - return b.Publish(EventFastSyncStatusValue, data) +func (b *EventBus) PublishEventBlockSyncStatus(data EventDataBlockSyncStatus) error { + return b.Publish(EventBlockSyncStatusValue, data) } func (b *EventBus) PublishEventStateSyncStatus(data EventDataStateSyncStatus) error { @@ -317,7 +317,7 @@ func (NopEventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpd return nil } -func (NopEventBus) PublishEventFastSyncStatus(data EventDataFastSyncStatus) error { +func (NopEventBus) PublishEventBlockSyncStatus(data EventDataBlockSyncStatus) error { return nil } diff --git a/types/event_bus_test.go b/types/event_bus_test.go index 987a10eab..9ca075391 100644 --- a/types/event_bus_test.go +++ b/types/event_bus_test.go @@ -370,7 +370,7 @@ func TestEventBusPublish(t *testing.T) { require.NoError(t, err) err = eventBus.PublishEventValidatorSetUpdates(EventDataValidatorSetUpdates{}) require.NoError(t, err) - err = eventBus.PublishEventFastSyncStatus(EventDataFastSyncStatus{}) + err = eventBus.PublishEventBlockSyncStatus(EventDataBlockSyncStatus{}) require.NoError(t, err) err = eventBus.PublishEventStateSyncStatus(EventDataStateSyncStatus{}) require.NoError(t, err) @@ -480,7 +480,7 @@ var events = []string{ EventRelockValue, EventTimeoutWaitValue, EventVoteValue, - EventFastSyncStatusValue, + EventBlockSyncStatusValue, EventStateSyncStatusValue, } @@ -502,7 +502,9 @@ var queries = []tmpubsub.Query{ EventQueryRelock, EventQueryTimeoutWait, EventQueryVote, - EventQueryFastSyncStatus} + EventQueryBlockSyncStatus, + EventQueryStateSyncStatus, +} func randQuery() tmpubsub.Query { return queries[mrand.Intn(len(queries))] diff --git a/types/events.go b/types/events.go index 2e234d3b7..46f150abd 100644 --- a/types/events.go +++ b/types/events.go @@ -27,9 +27,9 @@ const ( // These are used for testing the consensus state machine. // They can also be used to build real-time consensus visualizers. EventCompleteProposalValue = "CompleteProposal" - // The FastSyncStatus event will be emitted when the node switching - // state sync mechanism between the consensus reactor and the fastsync reactor. - EventFastSyncStatusValue = "FastSyncStatus" + // The BlockSyncStatus event will be emitted when the node switching + // state sync mechanism between the consensus reactor and the blocksync reactor. + EventBlockSyncStatusValue = "BlockSyncStatus" EventLockValue = "Lock" EventNewRoundValue = "NewRound" EventNewRoundStepValue = "NewRoundStep" @@ -104,7 +104,7 @@ func init() { tmjson.RegisterType(EventDataVote{}, "tendermint/event/Vote") tmjson.RegisterType(EventDataValidatorSetUpdates{}, "tendermint/event/ValidatorSetUpdates") tmjson.RegisterType(EventDataString(""), "tendermint/event/ProposalString") - tmjson.RegisterType(EventDataFastSyncStatus{}, "tendermint/event/FastSyncStatus") + tmjson.RegisterType(EventDataBlockSyncStatus{}, "tendermint/event/FastSyncStatus") tmjson.RegisterType(EventDataStateSyncStatus{}, "tendermint/event/StateSyncStatus") } @@ -176,9 +176,9 @@ type EventDataValidatorSetUpdates struct { ValidatorUpdates []*Validator `json:"validator_updates"` } -// EventDataFastSyncStatus shows the fastsync status and the +// EventDataBlockSyncStatus shows the fastsync status and the // height when the node state sync mechanism changes. -type EventDataFastSyncStatus struct { +type EventDataBlockSyncStatus struct { Complete bool `json:"complete"` Height int64 `json:"height"` } @@ -227,7 +227,7 @@ var ( EventQueryValidatorSetUpdates = QueryForEvent(EventValidatorSetUpdatesValue) EventQueryValidBlock = QueryForEvent(EventValidBlockValue) EventQueryVote = QueryForEvent(EventVoteValue) - EventQueryFastSyncStatus = QueryForEvent(EventFastSyncStatusValue) + EventQueryBlockSyncStatus = QueryForEvent(EventBlockSyncStatusValue) EventQueryStateSyncStatus = QueryForEvent(EventStateSyncStatusValue) ) diff --git a/types/node_key.go b/types/node_key.go index b8277649a..547fa1696 100644 --- a/types/node_key.go +++ b/types/node_key.go @@ -33,11 +33,7 @@ func (nodeKey NodeKey) SaveAs(filePath string) error { if err != nil { return err } - err = ioutil.WriteFile(filePath, jsonBytes, 0600) - if err != nil { - return err - } - return nil + return ioutil.WriteFile(filePath, jsonBytes, 0600) } // LoadOrGenNodeKey attempts to load the NodeKey from the given filePath. If