diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 9eb8021c7..e155fc894 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,19 +7,17 @@ # global owners are only requested if there isn't a more specific # codeowner specified below. For this reason, the global codeowners # are often repeated in package-level definitions. -* @alexanderbez @ebuchman @melekes @tessr +* @alexanderbez @ebuchman @tessr # Overrides for tooling packages -.github/ @marbar3778 @alexanderbez @ebuchman @melekes @tessr -DOCKER/ @marbar3778 @alexanderbez @ebuchman @melekes @tessr +.github/ @marbar3778 @alexanderbez @ebuchman @tessr +DOCKER/ @marbar3778 @alexanderbez @ebuchman @tessr # Overrides for core Tendermint packages -abci/ @marbar3778 @alexanderbez @ebuchman @melekes @tessr -evidence/ @cmwaters @ebuchman @melekes @tessr -light/ @cmwaters @melekes @ebuchman @tessr +abci/ @marbar3778 @alexanderbez @ebuchman @tessr +evidence/ @cmwaters @ebuchman @tessr +light/ @cmwaters @ebuchman @tessr # Overrides for docs -*.md @marbar3778 @alexanderbez @ebuchman @melekes @tessr -docs/ @marbar3778 @alexanderbez @ebuchman @melekes @tessr - - +*.md @marbar3778 @alexanderbez @ebuchman @tessr +docs/ @marbar3778 @alexanderbez @ebuchman @tessr diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 76eac7658..99417fc0c 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -121,7 +121,7 @@ jobs: - run: | cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt if: env.GIT_DIFF - - uses: codecov/codecov-action@v1.2.2 + - uses: codecov/codecov-action@v1.3.1 with: file: ./coverage.txt if: env.GIT_DIFF diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index c8fa53148..8e4fcbdad 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -23,7 +23,7 @@ jobs: - uses: golangci/golangci-lint-action@v2.5.1 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. - version: v1.31 + version: v1.38 args: --timeout 10m github-token: ${{ secrets.github_token }} if: env.GIT_DIFF diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index a773c2db9..e432b9293 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -51,6 +51,8 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi ### FEATURES +- [config] Add `--mode` flag and config variable. See [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) @dongsam + ### IMPROVEMENTS - [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778) @@ -76,6 +78,7 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi - [types] \#6120 use batch verification for verifying commits signatures. - If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature. - [privval/file] \#6185 Return error on `LoadFilePV`, `LoadFilePVEmptyState`. Allows for better programmatic control of Tendermint. +- [privval] /#6240 Add `context.Context` to privval interface. ### BUG FIXES diff --git a/UPGRADING.md b/UPGRADING.md index aa34d37d6..ce8c2a2a6 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -17,7 +17,10 @@ This guide provides instructions for upgrading to specific versions of Tendermin * `fast_sync = "v1"` is no longer supported. Please use `v2` instead. * All config parameters are now hyphen-case (also known as kebab-case) instead of snake_case. Before restarting the node make sure - you have updated all the variables in your `config.toml` file. + you have updated all the variables in your `config.toml` file. + +* Added `--mode` flag and `mode` config variable on `config.toml` for setting Mode of the Node: `full` | `validator` | `seed` (default: `full`) + [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) ### CLI Changes @@ -30,7 +33,7 @@ This guide provides instructions for upgrading to specific versions of Tendermin $ tendermint gen_node_key > $TMHOME/config/node_key.json ``` -* CLI commands and flags are all now hyphen-case instead of snake_case. +* CLI commands and flags are all now hyphen-case instead of snake_case. Make sure to adjust any scripts that calls a cli command with snake_casing ## v0.34.0 diff --git a/blockchain/v2/reactor_test.go b/blockchain/v2/reactor_test.go index d84d76549..f3a19ba11 100644 --- a/blockchain/v2/reactor_test.go +++ b/blockchain/v2/reactor_test.go @@ -64,14 +64,17 @@ type mockBlockStore struct { blocks map[uint64]*types.Block } +//nolint:unused func (ml *mockBlockStore) Height() int64 { return int64(len(ml.blocks)) } +//nolint:unused func (ml *mockBlockStore) LoadBlock(height uint64) *types.Block { return ml.blocks[height] } +//nolint:unused func (ml *mockBlockStore) SaveBlock(block *types.Block, part *types.PartSet, commit *types.Commit) { ml.blocks[block.Height] = block } diff --git a/cmd/tendermint/commands/init.go b/cmd/tendermint/commands/init.go index 51578253b..efe0f0244 100644 --- a/cmd/tendermint/commands/init.go +++ b/cmd/tendermint/commands/init.go @@ -1,6 +1,7 @@ package commands import ( + "context" "fmt" "github.com/spf13/cobra" @@ -86,7 +87,11 @@ func initFilesWithConfig(config *cfg.Config) error { PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1}, } } - pubKey, err := pv.GetPubKey() + + ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout) + defer cancel() + + pubKey, err := pv.GetPubKey(ctx) if err != nil { return fmt.Errorf("can't get pubkey: %w", err) } diff --git a/cmd/tendermint/commands/root.go b/cmd/tendermint/commands/root.go index 6dba1180d..7d25cdbff 100644 --- a/cmd/tendermint/commands/root.go +++ b/cmd/tendermint/commands/root.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "strings" + "time" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -15,8 +16,9 @@ import ( ) var ( - config = cfg.DefaultConfig() - logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + config = cfg.DefaultConfig() + logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + ctxTimeout = 4 * time.Second ) func init() { diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index 2bec0f393..44fac7c11 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -24,6 +24,9 @@ func AddNodeFlags(cmd *cobra.Command) { // bind flags cmd.Flags().String("moniker", config.Moniker, "node name") + // mode flags + cmd.Flags().String("mode", config.Mode, "node mode (full | validator | seed)") + // priv val flags cmd.Flags().String( "priv-validator-laddr", @@ -71,7 +74,6 @@ func AddNodeFlags(cmd *cobra.Command) { config.P2P.UnconditionalPeerIDs, "comma-delimited IDs of unconditional peers") cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding") cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "enable/disable Peer-Exchange") - cmd.Flags().Bool("p2p.seed-mode", config.P2P.SeedMode, "enable/disable seed mode") cmd.Flags().String("p2p.private-peer-ids", config.P2P.PrivatePeerIDs, "comma-delimited private peer IDs") // consensus flags diff --git a/cmd/tendermint/commands/show_validator.go b/cmd/tendermint/commands/show_validator.go index 3c83a4120..59cc5bbfc 100644 --- a/cmd/tendermint/commands/show_validator.go +++ b/cmd/tendermint/commands/show_validator.go @@ -1,6 +1,7 @@ package commands import ( + "context" "fmt" "github.com/spf13/cobra" @@ -36,7 +37,11 @@ func showValidator(cmd *cobra.Command, args []string) error { if err != nil { return fmt.Errorf("can't connect to remote validator %w", err) } - pubKey, err = pvsc.GetPubKey() + + ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout) + defer cancel() + + pubKey, err = pvsc.GetPubKey(ctx) if err != nil { return fmt.Errorf("can't get pubkey: %w", err) } @@ -52,7 +57,10 @@ func showValidator(cmd *cobra.Command, args []string) error { return err } - pubKey, err = pv.GetPubKey() + ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout) + defer cancel() + + pubKey, err = pv.GetPubKey(ctx) if err != nil { return fmt.Errorf("can't get pubkey: %w", err) } diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go index 3910de0f7..b61c66c48 100644 --- a/cmd/tendermint/commands/testnet.go +++ b/cmd/tendermint/commands/testnet.go @@ -1,6 +1,7 @@ package commands import ( + "context" "fmt" "net" "os" @@ -104,7 +105,9 @@ func testnetFiles(cmd *cobra.Command, args []string) error { ) } + // set mode to validator for testnet config := cfg.DefaultConfig() + config.Mode = cfg.ModeValidator // overwrite default config if set and valid if configFile != "" { @@ -149,7 +152,10 @@ func testnetFiles(cmd *cobra.Command, args []string) error { return err } - pubKey, err := pv.GetPubKey() + ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout) + defer cancel() + + pubKey, err := pv.GetPubKey(ctx) if err != nil { return fmt.Errorf("can't get pubkey: %w", err) } diff --git a/config/config.go b/config/config.go index f2aa4cc17..e4ccc4e7b 100644 --- a/config/config.go +++ b/config/config.go @@ -23,6 +23,13 @@ const ( // DefaultLogLevel defines a default log level as INFO. DefaultLogLevel = "info" + + ModeFull = "full" + ModeValidator = "validator" + ModeSeed = "seed" + + BlockchainV0 = "v0" + BlockchainV2 = "v2" ) // NOTE: Most of the structs & relevant comments + the @@ -39,6 +46,7 @@ var ( defaultConfigFileName = "config.toml" defaultGenesisJSONName = "genesis.json" + defaultMode = ModeFull defaultPrivValKeyName = "priv_validator_key.json" defaultPrivValStateName = "priv_validator_state.json" @@ -159,6 +167,18 @@ type BaseConfig struct { //nolint: maligned // A custom human readable name for this node Moniker string `mapstructure:"moniker"` + // Mode of Node: full | validator | seed (default: "full") + // * full (default) + // - all reactors + // - No priv_validator_key.json, priv_validator_state.json + // * validator + // - all reactors + // - with priv_validator_key.json, priv_validator_state.json + // * seed + // - only P2P, PEX Reactor + // - No priv_validator_key.json, priv_validator_state.json + Mode string `mapstructure:"mode"` + // If this node is many blocks behind the tip of the chain, FastSync // allows them to catchup quickly by downloading blocks in parallel // and verifying their commits @@ -235,6 +255,7 @@ func DefaultBaseConfig() BaseConfig { PrivValidatorKey: defaultPrivValKeyPath, PrivValidatorState: defaultPrivValStatePath, NodeKey: defaultNodeKeyPath, + Mode: defaultMode, Moniker: defaultMoniker, ProxyApp: "tcp://127.0.0.1:26658", ABCI: "socket", @@ -251,6 +272,7 @@ func DefaultBaseConfig() BaseConfig { func TestBaseConfig() BaseConfig { cfg := DefaultBaseConfig() cfg.chainID = "tendermint_test" + cfg.Mode = ModeValidator cfg.ProxyApp = "kvstore" cfg.FastSyncMode = false cfg.DBBackend = "memdb" @@ -322,6 +344,11 @@ func (cfg BaseConfig) ValidateBasic() error { default: return errors.New("unknown log format (must be 'plain' or 'json')") } + switch cfg.Mode { + case ModeFull, ModeValidator, ModeSeed: + default: + return fmt.Errorf("unknown mode: %v", cfg.Mode) + } return nil } @@ -557,12 +584,6 @@ type P2PConfig struct { //nolint: maligned // Set true to enable the peer-exchange reactor PexReactor bool `mapstructure:"pex"` - // Seed mode, in which node constantly crawls the network and looks for - // peers. If another node asks it for addresses, it responds and disconnects. - // - // Does not work if the peer-exchange reactor is disabled. - SeedMode bool `mapstructure:"seed-mode"` - // Comma separated list of peer IDs to keep private (will not be gossiped to // other peers) PrivatePeerIDs string `mapstructure:"private-peer-ids"` @@ -600,7 +621,6 @@ func DefaultP2PConfig() *P2PConfig { SendRate: 5120000, // 5 mB/s RecvRate: 5120000, // 5 mB/s PexReactor: true, - SeedMode: false, AllowDuplicateIP: false, HandshakeTimeout: 20 * time.Second, DialTimeout: 3 * time.Second, @@ -807,7 +827,7 @@ type FastSyncConfig struct { // DefaultFastSyncConfig returns a default configuration for the fast sync service func DefaultFastSyncConfig() *FastSyncConfig { return &FastSyncConfig{ - Version: "v0", + Version: BlockchainV0, } } @@ -819,9 +839,9 @@ func TestFastSyncConfig() *FastSyncConfig { // ValidateBasic performs basic validation. func (cfg *FastSyncConfig) ValidateBasic() error { switch cfg.Version { - case "v0": + case BlockchainV0: return nil - case "v2": + case BlockchainV2: return nil default: return fmt.Errorf("unknown fastsync version %s", cfg.Version) @@ -838,6 +858,7 @@ type ConsensusConfig struct { WalPath string `mapstructure:"wal-file"` walFile string // overrides WalPath if set + // TODO: remove timeout configs, these should be global not local // How long we wait for a proposal block before prevoting nil TimeoutPropose time.Duration `mapstructure:"timeout-propose"` // How much timeout-propose increases with each round diff --git a/config/toml.go b/config/toml.go index efa1152dc..f98a35198 100644 --- a/config/toml.go +++ b/config/toml.go @@ -88,6 +88,19 @@ proxy-app = "{{ .BaseConfig.ProxyApp }}" # A custom human readable name for this node moniker = "{{ .BaseConfig.Moniker }}" +# Mode of Node: full | validator | seed (default: "full") +# You will need to set it to "validator" if you want to run the node as a validator +# * full node (default) +# - all reactors +# - No priv_validator_key.json, priv_validator_state.json +# * validator node +# - all reactors +# - with priv_validator_key.json, priv_validator_state.json +# * seed node +# - only P2P, PEX Reactor +# - No priv_validator_key.json, priv_validator_state.json +mode = "{{ .BaseConfig.Mode }}" + # If this node is many blocks behind the tip of the chain, FastSync # allows them to catchup quickly by downloading blocks in parallel # and verifying their commits @@ -305,12 +318,6 @@ recv-rate = {{ .P2P.RecvRate }} # Set true to enable the peer-exchange reactor pex = {{ .P2P.PexReactor }} -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed-mode = {{ .P2P.SeedMode }} - # Comma separated list of peer IDs to keep private (will not be gossiped to other peers) private-peer-ids = "{{ .P2P.PrivatePeerIDs }}" diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index dbc0908f8..7f040e46e 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -1,6 +1,7 @@ package consensus import ( + "context" "fmt" "os" "path" @@ -39,57 +40,59 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { states := make([]*State, nValidators) for i := 0; i < nValidators; i++ { - logger := consensusLogger().With("test", "byzantine", "validator", i) - stateDB := dbm.NewMemDB() // each state needs its own db - stateStore := sm.NewStore(stateDB) - state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) + func() { + logger := consensusLogger().With("test", "byzantine", "validator", i) + stateDB := dbm.NewMemDB() // each state needs its own db + stateStore := sm.NewStore(stateDB) + state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) - thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) - defer os.RemoveAll(thisConfig.RootDir) + thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + defer os.RemoveAll(thisConfig.RootDir) - ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal - app := appFunc() - vals := types.TM2PB.ValidatorUpdates(state.Validators) - app.InitChain(abci.RequestInitChain{Validators: vals}) + ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal + app := appFunc() + vals := types.TM2PB.ValidatorUpdates(state.Validators) + app.InitChain(abci.RequestInitChain{Validators: vals}) - blockDB := dbm.NewMemDB() - blockStore := store.NewBlockStore(blockDB) + blockDB := dbm.NewMemDB() + blockStore := store.NewBlockStore(blockDB) - // one for mempool, one for consensus - mtx := new(tmsync.Mutex) - proxyAppConnMem := abcicli.NewLocalClient(mtx, app) - proxyAppConnCon := abcicli.NewLocalClient(mtx, app) + // one for mempool, one for consensus + mtx := new(tmsync.Mutex) + proxyAppConnMem := abcicli.NewLocalClient(mtx, app) + proxyAppConnCon := abcicli.NewLocalClient(mtx, app) - // Make Mempool - mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) - mempool.SetLogger(log.TestingLogger().With("module", "mempool")) - if thisConfig.Consensus.WaitForTxs() { - mempool.EnableTxsAvailable() - } + // Make Mempool + mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) + mempool.SetLogger(log.TestingLogger().With("module", "mempool")) + if thisConfig.Consensus.WaitForTxs() { + mempool.EnableTxsAvailable() + } - // Make a full instance of the evidence pool - evidenceDB := dbm.NewMemDB() - evpool, err := evidence.NewPool(logger.With("module", "evidence"), evidenceDB, stateStore, blockStore) - require.NoError(t, err) + // Make a full instance of the evidence pool + evidenceDB := dbm.NewMemDB() + evpool, err := evidence.NewPool(logger.With("module", "evidence"), evidenceDB, stateStore, blockStore) + require.NoError(t, err) - // Make State - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool) - cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) - cs.SetLogger(cs.Logger) - // set private validator - pv := privVals[i] - cs.SetPrivValidator(pv) + // Make State + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool) + cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) + cs.SetLogger(cs.Logger) + // set private validator + pv := privVals[i] + cs.SetPrivValidator(pv) - eventBus := types.NewEventBus() - eventBus.SetLogger(log.TestingLogger().With("module", "events")) - err = eventBus.Start() - require.NoError(t, err) - cs.SetEventBus(eventBus) + eventBus := types.NewEventBus() + eventBus.SetLogger(log.TestingLogger().With("module", "events")) + err = eventBus.Start() + require.NoError(t, err) + cs.SetEventBus(eventBus) - cs.SetTimeoutTicker(tickerFunc()) - cs.SetLogger(logger) + cs.SetTimeoutTicker(tickerFunc()) + cs.SetLogger(logger) - states[i] = cs + states[i] = cs + }() } rts := setup(t, nValidators, states, 100) // buffer must be large enough to not deadlock @@ -200,7 +203,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} proposal := types.NewProposal(height, round, lazyNodeState.ValidRound, propBlockID) p := proposal.ToProto() - if err := lazyNodeState.privValidator.SignProposal(lazyNodeState.state.ChainID, p); err == nil { + if err := lazyNodeState.privValidator.SignProposal(context.Background(), lazyNodeState.state.ChainID, p); err == nil { proposal.Signature = p.Signature // send proposal and block parts on internal msg queue @@ -232,11 +235,17 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { go func(j int, s types.Subscription) { defer wg.Done() - - for msg := range s.Out() { - block := msg.Data().(types.EventDataNewBlock).Block - if len(block.Evidence.Evidence) != 0 { - evidenceFromEachValidator[j] = block.Evidence.Evidence[0] + for { + select { + case msg := <-s.Out(): + require.NotNil(t, msg) + block := msg.Data().(types.EventDataNewBlock).Block + if len(block.Evidence.Evidence) != 0 { + evidenceFromEachValidator[j] = block.Evidence.Evidence[0] + return + } + case <-s.Canceled(): + require.Fail(t, "subscription failed for %d", j) return } } @@ -247,7 +256,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { wg.Wait() - pubkey, err := bzNodeState.privValidator.GetPubKey() + pubkey, err := bzNodeState.privValidator.GetPubKey(context.Background()) require.NoError(t, err) for idx, ev := range evidenceFromEachValidator { diff --git a/consensus/common_test.go b/consensus/common_test.go index eb0b346f6..ad4a9ad28 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -108,7 +108,7 @@ func (vs *validatorStub) signVote( hash []byte, header types.PartSetHeader) (*types.Vote, error) { - pubKey, err := vs.PrivValidator.GetPubKey() + pubKey, err := vs.PrivValidator.GetPubKey(context.Background()) if err != nil { return nil, fmt.Errorf("can't get pubkey: %w", err) } @@ -123,7 +123,7 @@ func (vs *validatorStub) signVote( BlockID: types.BlockID{Hash: hash, PartSetHeader: header}, } v := vote.ToProto() - err = vs.PrivValidator.SignVote(config.ChainID(), v) + err = vs.PrivValidator.SignVote(context.Background(), config.ChainID(), v) vote.Signature = v.Signature return vote, err @@ -169,11 +169,11 @@ func (vss ValidatorStubsByPower) Len() int { } func (vss ValidatorStubsByPower) Less(i, j int) bool { - vssi, err := vss[i].GetPubKey() + vssi, err := vss[i].GetPubKey(context.Background()) if err != nil { panic(err) } - vssj, err := vss[j].GetPubKey() + vssj, err := vss[j].GetPubKey(context.Background()) if err != nil { panic(err) } @@ -220,7 +220,7 @@ func decideProposal( polRound, propBlockID := validRound, types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} proposal = types.NewProposal(height, round, polRound, propBlockID) p := proposal.ToProto() - if err := vs.SignProposal(chainID, p); err != nil { + if err := vs.SignProposal(context.Background(), chainID, p); err != nil { panic(err) } @@ -248,7 +248,7 @@ func signAddVotes( func validatePrevote(t *testing.T, cs *State, round int32, privVal *validatorStub, blockHash []byte) { prevotes := cs.Votes.Prevotes(round) - pubKey, err := privVal.GetPubKey() + pubKey, err := privVal.GetPubKey(context.Background()) require.NoError(t, err) address := pubKey.Address() var vote *types.Vote @@ -268,7 +268,7 @@ func validatePrevote(t *testing.T, cs *State, round int32, privVal *validatorStu func validateLastPrecommit(t *testing.T, cs *State, privVal *validatorStub, blockHash []byte) { votes := cs.LastCommit - pv, err := privVal.GetPubKey() + pv, err := privVal.GetPubKey(context.Background()) require.NoError(t, err) address := pv.Address() var vote *types.Vote @@ -290,7 +290,7 @@ func validatePrecommit( lockedBlockHash []byte, ) { precommits := cs.Votes.Precommits(thisRound) - pv, err := privVal.GetPubKey() + pv, err := privVal.GetPubKey(context.Background()) require.NoError(t, err) address := pv.Address() var vote *types.Vote diff --git a/consensus/invalid_test.go b/consensus/invalid_test.go index ff45b9060..315449a50 100644 --- a/consensus/invalid_test.go +++ b/consensus/invalid_test.go @@ -1,6 +1,7 @@ package consensus import ( + "context" "sync" "testing" @@ -75,7 +76,7 @@ func invalidDoPrevoteFunc(t *testing.T, height uint64, round int32, cs *State, r cs.mtx.Lock() cs.privValidator = pv - pubKey, err := cs.privValidator.GetPubKey() + pubKey, err := cs.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pubKey.Address() @@ -96,7 +97,7 @@ func invalidDoPrevoteFunc(t *testing.T, height uint64, round int32, cs *State, r } p := precommit.ToProto() - err = cs.privValidator.SignVote(cs.state.ChainID, p) + err = cs.privValidator.SignVote(context.Background(), cs.state.ChainID, p) require.NoError(t, err) precommit.Signature = p.Signature diff --git a/consensus/msgs_test.go b/consensus/msgs_test.go index 6507cb76f..c82bf69a5 100644 --- a/consensus/msgs_test.go +++ b/consensus/msgs_test.go @@ -1,6 +1,7 @@ package consensus import ( + "context" "encoding/hex" "fmt" "math" @@ -62,7 +63,7 @@ func TestMsgToProto(t *testing.T) { pbProposal := proposal.ToProto() pv := types.NewMockPV() - pk, err := pv.GetPubKey() + pk, err := pv.GetPubKey(context.Background()) require.NoError(t, err) val := types.NewValidator(pk, 100) diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index 21252f23c..c0346d903 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -488,7 +488,7 @@ func TestReactorVotingPowerChange(t *testing.T) { // map of active validators activeVals := make(map[string]struct{}) for i := 0; i < n; i++ { - pubKey, err := states[i].privValidator.GetPubKey() + pubKey, err := states[i].privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pubKey.Address() @@ -513,7 +513,7 @@ func TestReactorVotingPowerChange(t *testing.T) { blocksSubs = append(blocksSubs, sub) } - val1PubKey, err := states[0].privValidator.GetPubKey() + val1PubKey, err := states[0].privValidator.GetPubKey(context.Background()) require.NoError(t, err) val1PubKeyABCI, err := cryptoenc.PubKeyToProto(val1PubKey) @@ -588,7 +588,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { // map of active validators activeVals := make(map[string]struct{}) for i := 0; i < nVals; i++ { - pubKey, err := states[i].privValidator.GetPubKey() + pubKey, err := states[i].privValidator.GetPubKey(context.Background()) require.NoError(t, err) activeVals[string(pubKey.Address())] = struct{}{} @@ -607,7 +607,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { wg.Wait() - newValidatorPubKey1, err := states[nVals].privValidator.GetPubKey() + newValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(context.Background()) require.NoError(t, err) valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1) @@ -640,7 +640,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { // it includes the commit for block 4, which should have the updated validator set waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, states) - updateValidatorPubKey1, err := states[nVals].privValidator.GetPubKey() + updateValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(context.Background()) require.NoError(t, err) updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1) @@ -660,7 +660,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { previousTotalVotingPower, states[nVals].GetRoundState().LastValidators.TotalVotingPower(), ) - newValidatorPubKey2, err := states[nVals+1].privValidator.GetPubKey() + newValidatorPubKey2, err := states[nVals+1].privValidator.GetPubKey(context.Background()) require.NoError(t, err) newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2) @@ -668,7 +668,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) - newValidatorPubKey3, err := states[nVals+2].privValidator.GetPubKey() + newValidatorPubKey3, err := states[nVals+2].privValidator.GetPubKey(context.Background()) require.NoError(t, err) newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3) diff --git a/consensus/replay_test.go b/consensus/replay_test.go index 0d5cc6855..1c7fa5951 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -343,7 +343,7 @@ func TestSimulateValidatorsChange(t *testing.T) { // HEIGHT 2 height++ incrementHeight(vss...) - newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() + newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(context.Background()) require.NoError(t, err) valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1) require.NoError(t, err) @@ -356,7 +356,7 @@ func TestSimulateValidatorsChange(t *testing.T) { proposal := types.NewProposal(vss[1].Height, round, -1, blockID) p := proposal.ToProto() - if err := vss[1].SignProposal(config.ChainID(), p); err != nil { + if err := vss[1].SignProposal(context.Background(), config.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -373,7 +373,7 @@ func TestSimulateValidatorsChange(t *testing.T) { // HEIGHT 3 height++ incrementHeight(vss...) - updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() + updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(context.Background()) require.NoError(t, err) updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1) require.NoError(t, err) @@ -386,7 +386,7 @@ func TestSimulateValidatorsChange(t *testing.T) { proposal = types.NewProposal(vss[2].Height, round, -1, blockID) p = proposal.ToProto() - if err := vss[2].SignProposal(config.ChainID(), p); err != nil { + if err := vss[2].SignProposal(context.Background(), config.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -403,14 +403,14 @@ func TestSimulateValidatorsChange(t *testing.T) { // HEIGHT 4 height++ incrementHeight(vss...) - newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey() + newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey(context.Background()) require.NoError(t, err) newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2) require.NoError(t, err) newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx2, nil, mempl.TxInfo{}) assert.Nil(t, err) - newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey() + newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey(context.Background()) require.NoError(t, err) newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3) require.NoError(t, err) @@ -426,10 +426,10 @@ func TestSimulateValidatorsChange(t *testing.T) { valIndexFn := func(cssIdx int) int { for i, vs := range newVss { - vsPubKey, err := vs.GetPubKey() + vsPubKey, err := vs.GetPubKey(context.Background()) require.NoError(t, err) - cssPubKey, err := css[cssIdx].privValidator.GetPubKey() + cssPubKey, err := css[cssIdx].privValidator.GetPubKey(context.Background()) require.NoError(t, err) if vsPubKey.Equals(cssPubKey) { @@ -443,7 +443,7 @@ func TestSimulateValidatorsChange(t *testing.T) { proposal = types.NewProposal(vss[3].Height, round, -1, blockID) p = proposal.ToProto() - if err := vss[3].SignProposal(config.ChainID(), p); err != nil { + if err := vss[3].SignProposal(context.Background(), config.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -502,7 +502,7 @@ func TestSimulateValidatorsChange(t *testing.T) { selfIndex = valIndexFn(0) proposal = types.NewProposal(vss[1].Height, round, -1, blockID) p = proposal.ToProto() - if err := vss[1].SignProposal(config.ChainID(), p); err != nil { + if err := vss[1].SignProposal(context.Background(), config.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -687,7 +687,7 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin }) chain, commits, err = makeBlockchainFromWAL(wal) require.NoError(t, err) - pubKey, err := privVal.GetPubKey() + pubKey, err := privVal.GetPubKey(context.Background()) require.NoError(t, err) stateDB, genesisState, store = stateAndStore(config, pubKey, kvstore.ProtocolVersion) @@ -888,7 +888,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { privVal, err := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) require.NoError(t, err) const appVersion = 0x0 - pubKey, err := privVal.GetPubKey() + pubKey, err := privVal.GetPubKey(context.Background()) require.NoError(t, err) stateDB, state, store := stateAndStore(config, pubKey, appVersion) stateStore := sm.NewStore(stateDB) @@ -1224,7 +1224,7 @@ func TestHandshakeUpdatesValidators(t *testing.T) { privVal, err := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) require.NoError(t, err) - pubKey, err := privVal.GetPubKey() + pubKey, err := privVal.GetPubKey(context.Background()) require.NoError(t, err) stateDB, state, store := stateAndStore(config, pubKey, 0x0) stateStore := sm.NewStore(stateDB) diff --git a/consensus/state.go b/consensus/state.go index d9e71fb44..3bd044051 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -2,6 +2,7 @@ package consensus import ( "bytes" + "context" "errors" "fmt" "io/ioutil" @@ -1136,7 +1137,11 @@ func (cs *State) defaultDecideProposal(height uint64, round int32) { propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} proposal := types.NewProposal(height, round, cs.ValidRound, propBlockID) p := proposal.ToProto() - if err := cs.privValidator.SignProposal(cs.state.ChainID, p); err == nil { + + // wait the max amount we would wait for a proposal + ctx, cancel := context.WithTimeout(context.TODO(), cs.config.TimeoutPropose) + defer cancel() + if err := cs.privValidator.SignProposal(ctx, cs.state.ChainID, p); err == nil { proposal.Signature = p.Signature // send proposal and block parts on internal msg queue @@ -2181,7 +2186,24 @@ func (cs *State) signVote( } v := vote.ToProto() - err := cs.privValidator.SignVote(cs.state.ChainID, v) + + // If the signedMessageType is for precommit, + // use our local precommit Timeout as the max wait time for getting a singed commit. The same goes for prevote. + var timeout time.Duration + + switch msgType { + case tmproto.PrecommitType: + timeout = cs.config.TimeoutPrecommit + case tmproto.PrevoteType: + timeout = cs.config.TimeoutPrevote + default: + timeout = time.Second + } + + ctx, cancel := context.WithTimeout(context.TODO(), timeout) + defer cancel() + + err := cs.privValidator.SignVote(ctx, cs.state.ChainID, v) vote.Signature = v.Signature return vote, err @@ -2248,7 +2270,17 @@ func (cs *State) updatePrivValidatorPubKey() error { return nil } - pubKey, err := cs.privValidator.GetPubKey() + var timeout time.Duration + if cs.config.TimeoutPrecommit > cs.config.TimeoutPrevote { + timeout = cs.config.TimeoutPrecommit + } else { + timeout = cs.config.TimeoutPrevote + } + + // set a hard timeout for 2 seconds. This helps in avoiding blocking of the remote signer connection + ctx, cancel := context.WithTimeout(context.TODO(), timeout) + defer cancel() + pubKey, err := cs.privValidator.GetPubKey(ctx) if err != nil { return err } diff --git a/consensus/state_test.go b/consensus/state_test.go index a6c013c4e..de0f6cc09 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -71,7 +71,7 @@ func TestStateProposerSelection0(t *testing.T) { // Commit a block and ensure proposer for the next height is correct. prop := cs1.GetRoundState().Validators.GetProposer() - pv, err := cs1.privValidator.GetPubKey() + pv, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) address := pv.Address() if !bytes.Equal(prop.Address, address) { @@ -88,7 +88,7 @@ func TestStateProposerSelection0(t *testing.T) { ensureNewRound(newRoundCh, height+1, 0) prop = cs1.GetRoundState().Validators.GetProposer() - pv1, err := vss[1].GetPubKey() + pv1, err := vss[1].GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() if !bytes.Equal(prop.Address, addr) { @@ -116,7 +116,7 @@ func TestStateProposerSelection2(t *testing.T) { // everyone just votes nil. we get a new proposer each round for i := int32(0); int(i) < len(vss); i++ { prop := cs1.GetRoundState().Validators.GetProposer() - pvk, err := vss[int(i+round)%len(vss)].GetPubKey() + pvk, err := vss[int(i+round)%len(vss)].GetPubKey(context.Background()) require.NoError(t, err) addr := pvk.Address() correctProposer := addr @@ -218,7 +218,7 @@ func TestStateBadProposal(t *testing.T) { blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} proposal := types.NewProposal(vs2.Height, round, -1, blockID) p := proposal.ToProto() - if err := vs2.SignProposal(config.ChainID(), p); err != nil { + if err := vs2.SignProposal(context.Background(), config.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } @@ -274,7 +274,7 @@ func TestStateOversizedBlock(t *testing.T) { blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} proposal := types.NewProposal(height, round, -1, blockID) p := proposal.ToProto() - if err := vs2.SignProposal(config.ChainID(), p); err != nil { + if err := vs2.SignProposal(context.Background(), config.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -616,7 +616,7 @@ func TestStateLockPOLRelock(t *testing.T) { timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - pv1, err := cs1.privValidator.GetPubKey() + pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) @@ -717,7 +717,7 @@ func TestStateLockPOLUnlock(t *testing.T) { timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) - pv1, err := cs1.privValidator.GetPubKey() + pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) @@ -809,7 +809,7 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - pv1, err := cs1.privValidator.GetPubKey() + pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) @@ -939,7 +939,7 @@ func TestStateLockPOLSafety1(t *testing.T) { timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - pv1, err := cs1.privValidator.GetPubKey() + pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) @@ -1060,7 +1060,7 @@ func TestStateLockPOLSafety2(t *testing.T) { timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) - pv1, err := cs1.privValidator.GetPubKey() + pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) @@ -1115,7 +1115,7 @@ func TestStateLockPOLSafety2(t *testing.T) { // in round 2 we see the polkad block from round 0 newProp := types.NewProposal(height, round, 0, propBlockID0) p := newProp.ToProto() - if err := vs3.SignProposal(config.ChainID(), p); err != nil { + if err := vs3.SignProposal(context.Background(), config.ChainID(), p); err != nil { t.Fatal(err) } @@ -1160,7 +1160,7 @@ func TestProposeValidBlock(t *testing.T) { timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) - pv1, err := cs1.privValidator.GetPubKey() + pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) @@ -1251,7 +1251,7 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) - pv1, err := cs1.privValidator.GetPubKey() + pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) @@ -1315,7 +1315,7 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) { timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) - pv1, err := cs1.privValidator.GetPubKey() + pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) @@ -1392,7 +1392,7 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - pv1, err := cs1.privValidator.GetPubKey() + pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) @@ -1430,7 +1430,7 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - pv1, err := cs1.privValidator.GetPubKey() + pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) @@ -1468,7 +1468,7 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - pv1, err := cs1.privValidator.GetPubKey() + pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) @@ -1595,7 +1595,7 @@ func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) - pv1, err := cs1.privValidator.GetPubKey() + pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) @@ -1657,7 +1657,7 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) - pv1, err := cs1.privValidator.GetPubKey() + pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) @@ -1798,7 +1798,7 @@ func TestStateHalt1(t *testing.T) { timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) - pv1, err := cs1.privValidator.GetPubKey() + pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) diff --git a/consensus/types/height_vote_set_test.go b/consensus/types/height_vote_set_test.go index 8951ad1b1..15cd9a222 100644 --- a/consensus/types/height_vote_set_test.go +++ b/consensus/types/height_vote_set_test.go @@ -1,6 +1,7 @@ package types import ( + "context" "fmt" "os" "testing" @@ -57,7 +58,7 @@ func TestPeerCatchupRounds(t *testing.T) { func makeVoteHR(t *testing.T, height uint64, valIndex, round int32, privVals []types.PrivValidator) *types.Vote { privVal := privVals[valIndex] - pubKey, err := privVal.GetPubKey() + pubKey, err := privVal.GetPubKey(context.Background()) if err != nil { panic(err) } @@ -76,7 +77,7 @@ func makeVoteHR(t *testing.T, height uint64, valIndex, round int32, privVals []t chainID := config.ChainID() v := vote.ToProto() - err = privVal.SignVote(chainID, v) + err = privVal.SignVote(context.Background(), chainID, v) if err != nil { panic(fmt.Sprintf("Error signing vote: %v", err)) } diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index 6872816d6..ae786895a 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -89,7 +89,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { consensusState := NewState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) consensusState.SetLogger(logger) consensusState.SetEventBus(eventBus) - if privValidator != nil { + if privValidator != nil && privValidator != (*privval.FilePV)(nil) { consensusState.SetPrivValidator(privValidator) } // END OF COPY PASTE diff --git a/crypto/hash.go b/crypto/hash.go index dd1b4c1dd..e1d22523f 100644 --- a/crypto/hash.go +++ b/crypto/hash.go @@ -6,6 +6,6 @@ import ( func Sha256(bytes []byte) []byte { hasher := sha256.New() - hasher.Write(bytes) //nolint:errcheck // ignore error + hasher.Write(bytes) return hasher.Sum(nil) } diff --git a/crypto/merkle/proof_value.go b/crypto/merkle/proof_value.go index b613ebe31..842dc8201 100644 --- a/crypto/merkle/proof_value.go +++ b/crypto/merkle/proof_value.go @@ -80,7 +80,7 @@ func (op ValueOp) Run(args [][]byte) ([][]byte, error) { } value := args[0] hasher := tmhash.New() - hasher.Write(value) //nolint: errcheck // does not error + hasher.Write(value) vhash := hasher.Sum(nil) bz := new(bytes.Buffer) diff --git a/docs/architecture/adr-052-tendermint-mode.md b/docs/architecture/adr-052-tendermint-mode.md index acd5028b4..344c68a5b 100644 --- a/docs/architecture/adr-052-tendermint-mode.md +++ b/docs/architecture/adr-052-tendermint-mode.md @@ -7,16 +7,16 @@ ## Context -- Fullnode mode: fullnode mode does not have the capability to become a validator. +- Full mode: full mode does not have the capability to become a validator. - Validator mode : this mode is exactly same as existing state machine behavior. sync without voting on consensus, and participate consensus when fully synced -- Seed mode : lightweight seed mode maintaining an address book, p2p like [TenderSeed](https://gitlab.com/polychainlabs/tenderseed) +- Seed mode : lightweight seed node maintaining an address book, p2p like [TenderSeed](https://gitlab.com/polychainlabs/tenderseed) ## Decision We would like to suggest a simple Tendermint mode abstraction. These modes will live under one binary, and when initializing a node the user will be able to specify which node they would like to create. - Which reactor, component to include for each node - - fullnode *(default)* + - full *(default)* - switch, transport - reactors - mempool @@ -24,6 +24,7 @@ We would like to suggest a simple Tendermint mode abstraction. These modes will - evidence - blockchain - p2p/pex + - statesync - rpc (safe connections only) - *~~no privValidator(priv_validator_key.json, priv_validator_state.json)~~* - validator @@ -33,7 +34,8 @@ We would like to suggest a simple Tendermint mode abstraction. These modes will - consensus - evidence - blockchain -  - p2p/pex + - p2p/pex + - statesync - rpc (safe connections only) - with privValidator(priv_validator_key.json, priv_validator_state.json) - seed @@ -44,17 +46,17 @@ We would like to suggest a simple Tendermint mode abstraction. These modes will - We would like to suggest by introducing `mode` parameter in `config.toml` and cli - `mode = "{{ .BaseConfig.Mode }}"` in `config.toml` - `tendermint node --mode validator` in cli - - fullnode | validator | seed (default: "fullnode") + - full | validator | seednode (default: "full") - RPC modification - `host:26657/status` - - return empty `validator_info` when fullnode mode - - no rpc server in seed mode + - return empty `validator_info` when in full mode + - no rpc server in seednode - Where to modify in codebase - Add switch for `config.Mode` on `node/node.go:DefaultNewNode` - If `config.Mode==validator`, call default `NewNode` (current logic) - - If `config.Mode==fullnode`, call `NewNode` with `nil` `privValidator` (do not load or generation) + - If `config.Mode==full`, call `NewNode` with `nil` `privValidator` (do not load or generation) - Need to add exception routine for `nil` `privValidator` to related functions - - If `config.Mode==seed`, call `NewSeedNode` (seed version of `node/node.go:NewNode`) + - If `config.Mode==seed`, call `NewSeedNode` (seed node version of `node/node.go:NewNode`) - Need to add exception routine for `nil` `reactor`, `component` to related functions ## Status diff --git a/docs/nodes/configuration.md b/docs/nodes/configuration.md index 30d22234f..eaf448c7e 100644 --- a/docs/nodes/configuration.md +++ b/docs/nodes/configuration.md @@ -41,6 +41,19 @@ moniker = "anonymous" # and verifying their commits fast-sync = true +# Mode of Node: full | validator | seed +# You will need to set it to "validator" if you want to run the node as a validator +# * full node (default) +# - all reactors +# - No priv_validator_key.json, priv_validator_state.json +# * validator node +# - all reactors +# - with priv_validator_key.json, priv_validator_state.json +# * seed node +# - only P2P, PEX Reactor +# - No priv_validator_key.json, priv_validator_state.json +mode = "full" + # Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb # * goleveldb (github.com/syndtr/goleveldb - most popular implementation) # - pure go @@ -242,12 +255,6 @@ recv-rate = 5120000 # Set true to enable the peer-exchange reactor pex = true -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed-mode = false - # Comma separated list of peer IDs to keep private (will not be gossiped to other peers) private-peer-ids = "" @@ -494,5 +501,4 @@ This section will cover settings within the p2p section of the `config.toml`. - `max-num-outbound-peers` = is the maximum number of peers you will initiate outbound connects to at one time (where you dial their address and initiate the connection). - `unconditional-peer-ids` = is similar to `persistent-peers` except that these peers will be connected to even if you are already connected to the maximum number of peers. This can be a validator node ID on your sentry node. - `pex` = turns the peer exchange reactor on or off. Validator node will want the `pex` turned off so it would not begin gossiping to unknown peers on the network. PeX can also be turned off for statically configured networks with fixed network connectivity. For full nodes on open, dynamic networks, it should be turned on. -- `seed-mode` = is used for when node operators want to run their node as a seed node. Seed node's run a variation of the PeX protocol that disconnects from peers after sending them a list of peers to connect to. To minimize the servers usage, it is recommended to set the mempool's size to 0. - `private-peer-ids` = is a comma-separated list of node ids that will _not_ be exposed to other peers (i.e., you will not tell other peers about the ids in this list). This can be filled with a validator's node id. diff --git a/docs/nodes/validators.md b/docs/nodes/validators.md index 191a84cb2..b787fa8a4 100644 --- a/docs/nodes/validators.md +++ b/docs/nodes/validators.md @@ -56,6 +56,7 @@ The validator will only talk to the sentry that are provided, the sentry nodes w When initializing nodes there are five parameters in the `config.toml` that may need to be altered. +- `mode:` (full | validator | seed) Mode of node (default: 'full'). If you want to run the node as validator, change it to 'validator'. - `pex:` boolean. This turns the peer exchange reactor on or off for a node. When `pex=false`, only the `persistent-peers` list is available for connection. - `persistent-peers:` a comma separated list of `nodeID@ip:port` values that define a list of peers that are expected to be online at all times. This is necessary at first startup because by setting `pex=false` the node will not be able to join the network. - `unconditional-peer-ids:` comma separated list of nodeID's. These nodes will be connected to no matter the limits of inbound and outbound peers. This is useful for when sentry nodes have full address books. @@ -67,6 +68,7 @@ When initializing nodes there are five parameters in the `config.toml` that may | Config Option | Setting | | ------------------------ | -------------------------- | +| mode | validator | | pex | false | | persistent-peers | list of sentry nodes | | private-peer-ids | none | @@ -74,12 +76,13 @@ When initializing nodes there are five parameters in the `config.toml` that may | addr-book-strict | false | | double-sign-check-height | 10 | -The validator node should have `pex=false` so it does not gossip to the entire network. The persistent peers will be your sentry nodes. Private peers can be left empty as the validator is not trying to hide who it is communicating with. Setting unconditional peers is optional for a validator because they will not have a full address books. +To run the node as validator ensure `mode=validator`. The validator node should have `pex=false` so it does not gossip to the entire network. The persistent peers will be your sentry nodes. Private peers can be left empty as the validator is not trying to hide who it is communicating with. Setting unconditional peers is optional for a validator because they will not have a full address books. #### Sentry Node Configuration | Config Option | Setting | | ---------------------- | --------------------------------------------- | +| mode | full | | pex | true | | persistent-peers | validator node, optionally other sentry nodes | | private-peer-ids | validator node ID | diff --git a/evidence/pool_test.go b/evidence/pool_test.go index ffb0e582b..a549d2bdf 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -1,6 +1,7 @@ package evidence_test import ( + "context" "testing" "time" @@ -438,7 +439,7 @@ func initializeStateFromValidatorSet(t *testing.T, valSet *types.ValidatorSet, h } func initializeValidatorState(t *testing.T, privVal types.PrivValidator, height uint64) sm.Store { - pubKey, _ := privVal.GetPubKey() + pubKey, _ := privVal.GetPubKey(context.Background()) validator := &types.Validator{Address: pubKey.Address(), VotingPower: 10, PubKey: pubKey} // create validator set and state diff --git a/evidence/reactor_test.go b/evidence/reactor_test.go index 45ae0c079..9fb832806 100644 --- a/evidence/reactor_test.go +++ b/evidence/reactor_test.go @@ -2,12 +2,13 @@ package evidence_test import ( "encoding/hex" - "fmt" "math/rand" "sync" "testing" "time" + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -19,6 +20,7 @@ import ( "github.com/tendermint/tendermint/evidence/mocks" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/p2p/p2ptest" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" @@ -31,138 +33,190 @@ var ( ) type reactorTestSuite struct { - reactor *evidence.Reactor - pool *evidence.Pool - - peerID p2p.NodeID - - evidenceChannel *p2p.Channel - evidenceInCh chan p2p.Envelope - evidenceOutCh chan p2p.Envelope - evidencePeerErrCh chan p2p.PeerError - - peerUpdatesCh chan p2p.PeerUpdate - peerUpdates *p2p.PeerUpdates + network *p2ptest.Network + logger log.Logger + reactors map[p2p.NodeID]*evidence.Reactor + pools map[p2p.NodeID]*evidence.Pool + evidenceChannels map[p2p.NodeID]*p2p.Channel + peerUpdates map[p2p.NodeID]*p2p.PeerUpdates + peerChans map[p2p.NodeID]chan p2p.PeerUpdate + nodes []*p2ptest.Node + numStateStores int } -func setup(t *testing.T, logger log.Logger, pool *evidence.Pool, chBuf uint) *reactorTestSuite { +func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { t.Helper() pID := make([]byte, 16) _, err := rng.Read(pID) require.NoError(t, err) - peerUpdatesCh := make(chan p2p.PeerUpdate) - + numStateStores := len(stateStores) rts := &reactorTestSuite{ - pool: pool, - evidenceInCh: make(chan p2p.Envelope, chBuf), - evidenceOutCh: make(chan p2p.Envelope, chBuf), - evidencePeerErrCh: make(chan p2p.PeerError, chBuf), - peerUpdatesCh: peerUpdatesCh, - peerUpdates: p2p.NewPeerUpdates(peerUpdatesCh), - peerID: p2p.NodeID(fmt.Sprintf("%x", pID)), + numStateStores: numStateStores, + logger: log.TestingLogger().With("testCase", t.Name()), + network: p2ptest.MakeNetwork(t, numStateStores), + reactors: make(map[p2p.NodeID]*evidence.Reactor, numStateStores), + pools: make(map[p2p.NodeID]*evidence.Pool, numStateStores), + peerUpdates: make(map[p2p.NodeID]*p2p.PeerUpdates, numStateStores), + peerChans: make(map[p2p.NodeID]chan p2p.PeerUpdate, numStateStores), } - rts.evidenceChannel = p2p.NewChannel( + rts.evidenceChannels = rts.network.MakeChannelsNoCleanup(t, evidence.EvidenceChannel, new(tmproto.EvidenceList), - rts.evidenceInCh, - rts.evidenceOutCh, - rts.evidencePeerErrCh, - ) + int(chBuf)) + require.Len(t, rts.network.RandomNode().PeerManager.Peers(), 0) - rts.reactor = evidence.NewReactor( - logger, - rts.evidenceChannel, - rts.peerUpdates, - pool, - ) + idx := 0 + evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) + for nodeID := range rts.network.Nodes { + logger := rts.logger.With("validator", idx) + evidenceDB := dbm.NewMemDB() + blockStore := &mocks.BlockStore{} + state, _ := stateStores[idx].Load() + blockStore.On("LoadBlockMeta", mock.AnythingOfType("uint64")).Return(func(h uint64) *types.BlockMeta { + if h <= state.LastBlockHeight { + return &types.BlockMeta{Header: types.Header{Time: evidenceTime}} + } + return nil + }) + rts.pools[nodeID], err = evidence.NewPool(logger, evidenceDB, stateStores[idx], blockStore) - require.NoError(t, rts.reactor.Start()) - require.True(t, rts.reactor.IsRunning()) + require.NoError(t, err) + + rts.peerChans[nodeID] = make(chan p2p.PeerUpdate) + rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID]) + rts.network.Nodes[nodeID].PeerManager.Register(rts.peerUpdates[nodeID]) + rts.nodes = append(rts.nodes, rts.network.Nodes[nodeID]) + + rts.reactors[nodeID] = evidence.NewReactor(logger, + rts.evidenceChannels[nodeID], + rts.peerUpdates[nodeID], + rts.pools[nodeID]) + + require.NoError(t, rts.reactors[nodeID].Start()) + require.True(t, rts.reactors[nodeID].IsRunning()) + + idx++ + } t.Cleanup(func() { - require.NoError(t, rts.reactor.Stop()) - require.False(t, rts.reactor.IsRunning()) + for _, r := range rts.reactors { + if r.IsRunning() { + require.NoError(t, r.Stop()) + require.False(t, r.IsRunning()) + } + } + + leaktest.Check(t) }) return rts } -func createTestSuites(t *testing.T, stateStores []sm.Store, chBuf uint) []*reactorTestSuite { - t.Helper() - - numSStores := len(stateStores) - testSuites := make([]*reactorTestSuite, numSStores) - evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) - - for i := 0; i < numSStores; i++ { - logger := log.TestingLogger().With("validator", i) - evidenceDB := dbm.NewMemDB() - blockStore := &mocks.BlockStore{} - blockStore.On("LoadBlockMeta", mock.AnythingOfType("uint64")).Return( - &types.BlockMeta{Header: types.Header{Time: evidenceTime}}, - ) - - pool, err := evidence.NewPool(logger, evidenceDB, stateStores[i], blockStore) - require.NoError(t, err) - - testSuites[i] = setup(t, logger, pool, chBuf) - } - - return testSuites +func (rts *reactorTestSuite) start(t *testing.T) { + rts.network.Start(t) + require.Len(t, + rts.network.RandomNode().PeerManager.Peers(), + rts.numStateStores-1, + "network does not have expected number of nodes") } -func waitForEvidence(t *testing.T, evList types.EvidenceList, suites ...*reactorTestSuite) { +func (rts *reactorTestSuite) waitForEvidence(t *testing.T, evList types.EvidenceList, ids ...p2p.NodeID) { t.Helper() - wg := new(sync.WaitGroup) + fn := func(pool *evidence.Pool) { + var ( + localEvList []types.Evidence + size int64 + loops int + ) - for _, suite := range suites { - wg.Add(1) - - go func(s *reactorTestSuite) { - var localEvList []types.Evidence - - currentPoolSize := 0 - for currentPoolSize != len(evList) { - // each evidence should not be more than 500 bytes - localEvList, _ = s.pool.PendingEvidence(int64(len(evList) * 500)) - currentPoolSize = len(localEvList) + // wait till we have at least the amount of evidence + // that we expect. if there's more local evidence then + // it doesn't make sense to wait longer and a + // different assertion should catch the resulting error + for len(localEvList) < len(evList) { + // each evidence should not be more than 500 bytes + localEvList, size = pool.PendingEvidence(int64(len(evList) * 500)) + if loops == 100 { + t.Log("current wait status:", "|", + "local", len(localEvList), "|", + "waitlist", len(evList), "|", + "size", size) } - // put the reaped evidence in a map so we can quickly check we got everything - evMap := make(map[string]types.Evidence) - for _, e := range localEvList { - evMap[string(e.Hash())] = e - } + loops++ + } - for i, expectedEv := range evList { - gotEv := evMap[string(expectedEv.Hash())] - require.Equalf( - t, - expectedEv, - gotEv, - "evidence at index %d in pool does not match; got: %v, expected: %v", i, gotEv, expectedEv, - ) - } + // put the reaped evidence in a map so we can quickly check we got everything + evMap := make(map[string]types.Evidence) + for _, e := range localEvList { + evMap[string(e.Hash())] = e + } - wg.Done() - }(suite) + for i, expectedEv := range evList { + gotEv := evMap[string(expectedEv.Hash())] + require.Equalf( + t, + expectedEv, + gotEv, + "evidence for pool %d in pool does not match; got: %v, expected: %v", i, gotEv, expectedEv, + ) + } } - // wait for the evidence in all evidence pools + if len(ids) == 1 { + // special case waiting once, just to avoid the extra + // goroutine, in the case that this hits a timeout, + // the stack will be clearer. + fn(rts.pools[ids[0]]) + return + } + + wg := sync.WaitGroup{} + + for id := range rts.pools { + if len(ids) > 0 && !p2ptest.NodeInSlice(id, ids) { + // if an ID list is specified, then we only + // want to wait for those pools that are + // specified in the list, otherwise, wait for + // all pools. + continue + } + + wg.Add(1) + go func(id p2p.NodeID) { defer wg.Done(); fn(rts.pools[id]) }(id) + } wg.Wait() } +func (rts *reactorTestSuite) assertEvidenceChannelsEmpty(t *testing.T) { + t.Helper() + + for id, r := range rts.reactors { + require.NoError(t, r.Stop(), "stopping reactor #%s", id) + r.Wait() + require.False(t, r.IsRunning(), "reactor #%d did not stop", id) + + } + + for id, ech := range rts.evidenceChannels { + require.Empty(t, ech.Out, "checking channel #%q", id) + } +} + func createEvidenceList( t *testing.T, pool *evidence.Pool, val types.PrivValidator, numEvidence int, ) types.EvidenceList { + t.Helper() + evList := make([]types.Evidence, numEvidence) + for i := 0; i < numEvidence; i++ { ev := types.NewMockDuplicateVoteEvidenceWithValidator( uint64(i+1), @@ -171,44 +225,15 @@ func createEvidenceList( evidenceChainID, ) - require.NoError(t, pool.AddEvidence(ev)) + require.NoError(t, pool.AddEvidence(ev), + "adding evidence it#%d of %d to pool with height %d", + i, numEvidence, pool.State().LastBlockHeight) evList[i] = ev } return evList } -// simulateRouter will increment the provided WaitGroup and execute a simulated -// router where, for each outbound p2p Envelope from the primary reactor, we -// proxy (send) the Envelope the relevant peer reactor. Done is invoked on the -// WaitGroup when numOut Envelopes are sent (i.e. read from the outbound channel). -func simulateRouter(wg *sync.WaitGroup, primary *reactorTestSuite, suites []*reactorTestSuite, numOut int) { - wg.Add(1) - - // create a mapping for efficient suite lookup by peer ID - suitesByPeerID := make(map[p2p.NodeID]*reactorTestSuite) - for _, suite := range suites { - suitesByPeerID[suite.peerID] = suite - } - - // Simulate a router by listening for all outbound envelopes and proxying the - // envelope to the respective peer (suite). - go func() { - for i := 0; i < numOut; i++ { - envelope := <-primary.evidenceOutCh - other := suitesByPeerID[envelope.To] - - other.evidenceInCh <- p2p.Envelope{ - From: primary.peerID, - To: envelope.To, - Message: envelope.Message, - } - } - - wg.Done() - }() -} - func TestReactorMultiDisconnect(t *testing.T) { val := types.NewMockPV() height := uint64(numEvidence) + 10 @@ -216,27 +241,29 @@ func TestReactorMultiDisconnect(t *testing.T) { stateDB1 := initializeValidatorState(t, val, height) stateDB2 := initializeValidatorState(t, val, height) - testSuites := createTestSuites(t, []sm.Store{stateDB1, stateDB2}, 20) - primary := testSuites[0] - secondary := testSuites[1] + rts := setup(t, []sm.Store{stateDB1, stateDB2}, 20) + primary := rts.nodes[0] + secondary := rts.nodes[1] - _ = createEvidenceList(t, primary.pool, val, numEvidence) + _ = createEvidenceList(t, rts.pools[primary.NodeID], val, numEvidence) - primary.peerUpdatesCh <- p2p.PeerUpdate{ - Status: p2p.PeerStatusUp, - NodeID: secondary.peerID, - } + require.Equal(t, primary.PeerManager.Status(secondary.NodeID), p2p.PeerStatusDown) + rts.start(t) + + require.Equal(t, primary.PeerManager.Status(secondary.NodeID), p2p.PeerStatusUp) // Ensure "disconnecting" the secondary peer from the primary more than once // is handled gracefully. - primary.peerUpdatesCh <- p2p.PeerUpdate{ - Status: p2p.PeerStatusDown, - NodeID: secondary.peerID, - } - primary.peerUpdatesCh <- p2p.PeerUpdate{ - Status: p2p.PeerStatusDown, - NodeID: secondary.peerID, - } + + require.NoError(t, primary.PeerManager.Disconnected(secondary.NodeID)) + require.Equal(t, primary.PeerManager.Status(secondary.NodeID), p2p.PeerStatusDown) + _, err := primary.PeerManager.TryEvictNext() + require.NoError(t, err) + require.NoError(t, primary.PeerManager.Disconnected(secondary.NodeID)) + + require.Equal(t, primary.PeerManager.Status(secondary.NodeID), p2p.PeerStatusDown) + require.Equal(t, secondary.PeerManager.Status(primary.NodeID), p2p.PeerStatusUp) + } // TestReactorBroadcastEvidence creates an environment of multiple peers that @@ -256,44 +283,45 @@ func TestReactorBroadcastEvidence(t *testing.T) { stateDBs[i] = initializeValidatorState(t, val, height) } - // Create a series of test suites where each suite contains a reactor and + rts := setup(t, stateDBs, 0) + rts.start(t) + + // Create a series of fixtures where each suite contains a reactor and // evidence pool. In addition, we mark a primary suite and the rest are // secondaries where each secondary is added as a peer via a PeerUpdate to the // primary. As a result, the primary will gossip all evidence to each secondary. - testSuites := createTestSuites(t, stateDBs, 0) - primary := testSuites[0] - secondaries := testSuites[1:] + primary := rts.network.RandomNode() + secondaries := make([]*p2ptest.Node, 0, len(rts.network.NodeIDs())-1) + secondaryIDs := make([]p2p.NodeID, 0, cap(secondaries)) + for id := range rts.network.Nodes { + if id == primary.NodeID { + continue + } - // Simulate a router by listening for all outbound envelopes and proxying the - // envelopes to the respective peer (suite). - wg := new(sync.WaitGroup) - simulateRouter(wg, primary, testSuites, numEvidence*len(secondaries)) + secondaries = append(secondaries, rts.network.Nodes[id]) + secondaryIDs = append(secondaryIDs, id) + } - evList := createEvidenceList(t, primary.pool, val, numEvidence) + evList := createEvidenceList(t, rts.pools[primary.NodeID], val, numEvidence) // Add each secondary suite (node) as a peer to the primary suite (node). This // will cause the primary to gossip all evidence to the secondaries. for _, suite := range secondaries { - primary.peerUpdatesCh <- p2p.PeerUpdate{ + rts.peerChans[primary.NodeID] <- p2p.PeerUpdate{ Status: p2p.PeerStatusUp, - NodeID: suite.peerID, + NodeID: suite.NodeID, } } // Wait till all secondary suites (reactor) received all evidence from the // primary suite (node). - waitForEvidence(t, evList, secondaries...) + rts.waitForEvidence(t, evList, secondaryIDs...) - for _, suite := range testSuites { - require.Equal(t, numEvidence, int(suite.pool.Size())) + for _, pool := range rts.pools { + require.Equal(t, numEvidence, int(pool.Size())) } - wg.Wait() - - // ensure all channels are drained - for _, suite := range testSuites { - require.Empty(t, suite.evidenceOutCh) - } + rts.assertEvidenceChannelsEmpty(t) } // TestReactorSelectiveBroadcast tests a context where we have two reactors @@ -304,47 +332,35 @@ func TestReactorBroadcastEvidence_Lagging(t *testing.T) { height1 := uint64(numEvidence) + 10 height2 := uint64(numEvidence) / 2 - // stateDB1 is ahead of stateDB2, where stateDB1 has all heights (1-10) and - // stateDB2 only has heights 1-7. + // stateDB1 is ahead of stateDB2, where stateDB1 has all heights (1-20) and + // stateDB2 only has heights 1-5. stateDB1 := initializeValidatorState(t, val, height1) stateDB2 := initializeValidatorState(t, val, height2) - testSuites := createTestSuites(t, []sm.Store{stateDB1, stateDB2}, 0) - primary := testSuites[0] - secondaries := testSuites[1:] + rts := setup(t, []sm.Store{stateDB1, stateDB2}, 100) + rts.start(t) - // Simulate a router by listening for all outbound envelopes and proxying the - // envelope to the respective peer (suite). - wg := new(sync.WaitGroup) - simulateRouter(wg, primary, testSuites, numEvidence*len(secondaries)) + primary := rts.nodes[0] + secondary := rts.nodes[1] // Send a list of valid evidence to the first reactor's, the one that is ahead, // evidence pool. - evList := createEvidenceList(t, primary.pool, val, numEvidence) + evList := createEvidenceList(t, rts.pools[primary.NodeID], val, numEvidence) // Add each secondary suite (node) as a peer to the primary suite (node). This // will cause the primary to gossip all evidence to the secondaries. - for _, suite := range secondaries { - primary.peerUpdatesCh <- p2p.PeerUpdate{ - Status: p2p.PeerStatusUp, - NodeID: suite.peerID, - } + rts.peerChans[primary.NodeID] <- p2p.PeerUpdate{ + Status: p2p.PeerStatusUp, + NodeID: secondary.NodeID, } // only ones less than the peers height should make it through - waitForEvidence(t, evList[:height2+2], secondaries...) + rts.waitForEvidence(t, evList[:height2], secondary.NodeID) - require.Equal(t, numEvidence, int(primary.pool.Size())) - require.Equal(t, int(height2+2), int(secondaries[0].pool.Size())) + require.Equal(t, numEvidence, int(rts.pools[primary.NodeID].Size())) + require.Equal(t, int(height2), int(rts.pools[secondary.NodeID].Size())) - // The primary will continue to send the remaining evidence to the secondaries - // so we wait until it has sent all the envelopes. - wg.Wait() - - // ensure all channels are drained - for _, suite := range testSuites { - require.Empty(t, suite.evidenceOutCh) - } + rts.assertEvidenceChannelsEmpty(t) } func TestReactorBroadcastEvidence_Pending(t *testing.T) { @@ -354,46 +370,36 @@ func TestReactorBroadcastEvidence_Pending(t *testing.T) { stateDB1 := initializeValidatorState(t, val, height) stateDB2 := initializeValidatorState(t, val, height) - testSuites := createTestSuites(t, []sm.Store{stateDB1, stateDB2}, 0) - primary := testSuites[0] - secondary := testSuites[1] + rts := setup(t, []sm.Store{stateDB1, stateDB2}, 100) + primary := rts.nodes[0] + secondary := rts.nodes[1] - // Simulate a router by listening for all outbound envelopes and proxying the - // envelopes to the respective peer (suite). - wg := new(sync.WaitGroup) - simulateRouter(wg, primary, testSuites, numEvidence) - - // add all evidence to the primary reactor - evList := createEvidenceList(t, primary.pool, val, numEvidence) + evList := createEvidenceList(t, rts.pools[primary.NodeID], val, numEvidence) // Manually add half the evidence to the secondary which will mark them as // pending. for i := 0; i < numEvidence/2; i++ { - require.NoError(t, secondary.pool.AddEvidence(evList[i])) + require.NoError(t, rts.pools[secondary.NodeID].AddEvidence(evList[i])) } // the secondary should have half the evidence as pending - require.Equal(t, uint32(numEvidence/2), secondary.pool.Size()) + require.Equal(t, numEvidence/2, int(rts.pools[secondary.NodeID].Size())) - // add the secondary reactor as a peer to the primary reactor - primary.peerUpdatesCh <- p2p.PeerUpdate{ - Status: p2p.PeerStatusUp, - NodeID: secondary.peerID, - } + rts.start(t) // The secondary reactor should have received all the evidence ignoring the // already pending evidence. - waitForEvidence(t, evList, secondary) + rts.waitForEvidence(t, evList, secondary.NodeID) - for _, suite := range testSuites { - require.Equal(t, numEvidence, int(suite.pool.Size())) - } + // check to make sure that all of the evidence has + // propogated + require.Len(t, rts.pools, 2) + assert.EqualValues(t, numEvidence, rts.pools[primary.NodeID].Size(), + "primary node should have all the evidence") + if assert.EqualValues(t, numEvidence, rts.pools[secondary.NodeID].Size(), + "secondary nodes should have caught up") { - wg.Wait() - - // ensure all channels are drained - for _, suite := range testSuites { - require.Empty(t, suite.evidenceOutCh) + rts.assertEvidenceChannelsEmpty(t) } } @@ -404,55 +410,52 @@ func TestReactorBroadcastEvidence_Committed(t *testing.T) { stateDB1 := initializeValidatorState(t, val, height) stateDB2 := initializeValidatorState(t, val, height) - testSuites := createTestSuites(t, []sm.Store{stateDB1, stateDB2}, 0) - primary := testSuites[0] - secondary := testSuites[1] + rts := setup(t, []sm.Store{stateDB1, stateDB2}, 0) + + primary := rts.nodes[0] + secondary := rts.nodes[1] // add all evidence to the primary reactor - evList := createEvidenceList(t, primary.pool, val, numEvidence) + evList := createEvidenceList(t, rts.pools[primary.NodeID], val, numEvidence) // Manually add half the evidence to the secondary which will mark them as // pending. for i := 0; i < numEvidence/2; i++ { - require.NoError(t, secondary.pool.AddEvidence(evList[i])) + require.NoError(t, rts.pools[secondary.NodeID].AddEvidence(evList[i])) } // the secondary should have half the evidence as pending - require.Equal(t, uint32(numEvidence/2), secondary.pool.Size()) + require.Equal(t, numEvidence/2, int(rts.pools[secondary.NodeID].Size())) state, err := stateDB2.Load() require.NoError(t, err) // update the secondary's pool such that all pending evidence is committed state.LastBlockHeight++ - secondary.pool.Update(state, evList[:numEvidence/2]) + rts.pools[secondary.NodeID].Update(state, evList[:numEvidence/2]) // the secondary should have half the evidence as committed - require.Equal(t, uint32(0), secondary.pool.Size()) + require.Equal(t, 0, int(rts.pools[secondary.NodeID].Size())) - // Simulate a router by listening for all outbound envelopes and proxying the - // envelopes to the respective peer (suite). - wg := new(sync.WaitGroup) - simulateRouter(wg, primary, testSuites, numEvidence) + // start the network and ensure it's configured + rts.start(t) - // add the secondary reactor as a peer to the primary reactor - primary.peerUpdatesCh <- p2p.PeerUpdate{ - Status: p2p.PeerStatusUp, - NodeID: secondary.peerID, - } + // without the following sleep the test consistently fails; + // likely because the sleep forces a context switch that lets + // the router process other operations. + time.Sleep(2 * time.Millisecond) // The secondary reactor should have received all the evidence ignoring the // already committed evidence. - waitForEvidence(t, evList[numEvidence/2:], secondary) + rts.waitForEvidence(t, evList[numEvidence/2:], secondary.NodeID) - require.Equal(t, numEvidence, int(primary.pool.Size())) - require.Equal(t, numEvidence/2, int(secondary.pool.Size())) + require.Len(t, rts.pools, 2) + assert.EqualValues(t, numEvidence, rts.pools[primary.NodeID].Size(), + "primary node should have all the evidence") + if assert.EqualValues(t, numEvidence/2, rts.pools[secondary.NodeID].Size(), + "secondary nodes should have caught up") { - wg.Wait() - - // ensure all channels are drained - for _, suite := range testSuites { - require.Empty(t, suite.evidenceOutCh) + rts.assertEvidenceChannelsEmpty(t) } } @@ -470,96 +473,33 @@ func TestReactorBroadcastEvidence_FullyConnected(t *testing.T) { stateDBs[i] = initializeValidatorState(t, val, height) } - testSuites := createTestSuites(t, stateDBs, 0) + rts := setup(t, stateDBs, 0) + rts.start(t) - // Simulate a router by listening for all outbound envelopes and proxying the - // envelopes to the respective peer (suite). - wg := new(sync.WaitGroup) - for _, suite := range testSuites { - simulateRouter(wg, suite, testSuites, numEvidence*(len(testSuites)-1)) - } - - evList := createEvidenceList(t, testSuites[0].pool, val, numEvidence) + evList := createEvidenceList(t, rts.pools[rts.network.RandomNode().NodeID], val, numEvidence) // every suite (reactor) connects to every other suite (reactor) - for _, suiteI := range testSuites { - for _, suiteJ := range testSuites { - if suiteI.peerID != suiteJ.peerID { - suiteI.peerUpdatesCh <- p2p.PeerUpdate{ + for outerID, outerChan := range rts.peerChans { + for innerID := range rts.peerChans { + if outerID != innerID { + outerChan <- p2p.PeerUpdate{ Status: p2p.PeerStatusUp, - NodeID: suiteJ.peerID, + NodeID: innerID, } } } } // wait till all suites (reactors) received all evidence from other suites (reactors) - waitForEvidence(t, evList, testSuites...) + rts.waitForEvidence(t, evList) - for _, suite := range testSuites { - require.Equal(t, numEvidence, int(suite.pool.Size())) + for _, pool := range rts.pools { + require.Equal(t, numEvidence, int(pool.Size())) // commit state so we do not continue to repeat gossiping the same evidence - state := suite.pool.State() + state := pool.State() state.LastBlockHeight++ - suite.pool.Update(state, evList) - } - - wg.Wait() -} - -func TestReactorBroadcastEvidence_RemovePeer(t *testing.T) { - val := types.NewMockPV() - height := uint64(10) - - stateDB1 := initializeValidatorState(t, val, height) - stateDB2 := initializeValidatorState(t, val, height) - - testSuites := createTestSuites(t, []sm.Store{stateDB1, stateDB2}, uint(numEvidence)) - primary := testSuites[0] - secondary := testSuites[1] - - // Simulate a router by listening for all outbound envelopes and proxying the - // envelopes to the respective peer (suite). - wg := new(sync.WaitGroup) - simulateRouter(wg, primary, testSuites, numEvidence/2) - - // add all evidence to the primary reactor - evList := createEvidenceList(t, primary.pool, val, numEvidence) - - // add the secondary reactor as a peer to the primary reactor - primary.peerUpdatesCh <- p2p.PeerUpdate{ - Status: p2p.PeerStatusUp, - NodeID: secondary.peerID, - } - - // have the secondary reactor receive only half the evidence - waitForEvidence(t, evList[:numEvidence/2], secondary) - - // disconnect the peer - primary.peerUpdatesCh <- p2p.PeerUpdate{ - Status: p2p.PeerStatusDown, - NodeID: secondary.peerID, - } - - // Ensure the secondary only received half of the evidence before being - // disconnected. - require.Equal(t, numEvidence/2, int(secondary.pool.Size())) - - wg.Wait() - - // The primary reactor should still be attempting to send the remaining half. - // - // NOTE: The channel is buffered (size numEvidence) as to ensure the primary - // reactor will send all envelopes at once before receiving the signal to stop - // gossiping. - for i := 0; i < numEvidence/2; i++ { - <-primary.evidenceOutCh - } - - // ensure all channels are drained - for _, suite := range testSuites { - require.Empty(t, suite.evidenceOutCh) + pool.Update(state, evList) } } diff --git a/evidence/verify_test.go b/evidence/verify_test.go index a6fd6f06a..e7e55abba 100644 --- a/evidence/verify_test.go +++ b/evidence/verify_test.go @@ -1,6 +1,7 @@ package evidence_test import ( + "context" "testing" "time" @@ -304,11 +305,11 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) { vote1 := makeVote(t, val, chainID, 0, 10, 2, 1, blockID, defaultEvidenceTime) v1 := vote1.ToProto() - err := val.SignVote(chainID, v1) + err := val.SignVote(context.Background(), chainID, v1) require.NoError(t, err) badVote := makeVote(t, val, chainID, 0, 10, 2, 1, blockID, defaultEvidenceTime) bv := badVote.ToProto() - err = val2.SignVote(chainID, bv) + err = val2.SignVote(context.Background(), chainID, bv) require.NoError(t, err) vote1.Signature = v1.Signature @@ -386,7 +387,7 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) { func makeVote( t *testing.T, val types.PrivValidator, chainID string, valIndex int32, height uint64, round int32, step int, blockID types.BlockID, time time.Time) *types.Vote { - pubKey, err := val.GetPubKey() + pubKey, err := val.GetPubKey(context.Background()) require.NoError(t, err) v := &types.Vote{ ValidatorAddress: pubKey.Address(), @@ -399,7 +400,7 @@ func makeVote( } vpb := v.ToProto() - err = val.SignVote(chainID, vpb) + err = val.SignVote(context.Background(), chainID, vpb) if err != nil { panic(err) } diff --git a/mempool/clist_mempool_test.go b/mempool/clist_mempool_test.go index 19ca5b9a1..b43888df6 100644 --- a/mempool/clist_mempool_test.go +++ b/mempool/clist_mempool_test.go @@ -653,7 +653,7 @@ func newRemoteApp( } func checksumIt(data []byte) string { h := sha256.New() - h.Write(data) //nolint: errcheck // ignore errcheck + h.Write(data) return fmt.Sprintf("%x", h.Sum(nil)) } diff --git a/mempool/reactor_test.go b/mempool/reactor_test.go index 1f1b3be1e..acf9921a4 100644 --- a/mempool/reactor_test.go +++ b/mempool/reactor_test.go @@ -1,8 +1,6 @@ package mempool import ( - "fmt" - "math/rand" "sync" "testing" "time" @@ -15,140 +13,142 @@ import ( "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/p2p/p2ptest" protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) -var rng = rand.New(rand.NewSource(time.Now().UnixNano())) - type reactorTestSuite struct { - reactor *Reactor + network *p2ptest.Network + logger log.Logger - peerID p2p.NodeID + reactors map[p2p.NodeID]*Reactor + mempoolChnnels map[p2p.NodeID]*p2p.Channel + mempools map[p2p.NodeID]*CListMempool + kvstores map[p2p.NodeID]*kvstore.Application - mempoolChannel *p2p.Channel - mempoolInCh chan p2p.Envelope - mempoolOutCh chan p2p.Envelope - mempoolPeerErrCh chan p2p.PeerError + peerChans map[p2p.NodeID]chan p2p.PeerUpdate + peerUpdates map[p2p.NodeID]*p2p.PeerUpdates - peerUpdatesCh chan p2p.PeerUpdate - peerUpdates *p2p.PeerUpdates + nodes []p2p.NodeID } -func setup(t *testing.T, cfg *cfg.MempoolConfig, logger log.Logger, chBuf uint) *reactorTestSuite { +func setup(t *testing.T, cfg *cfg.MempoolConfig, numNodes int, chBuf uint) *reactorTestSuite { t.Helper() - pID := make([]byte, 20) - _, err := rng.Read(pID) - require.NoError(t, err) - - peerID, err := p2p.NewNodeID(fmt.Sprintf("%x", pID)) - require.NoError(t, err) - - peerUpdatesCh := make(chan p2p.PeerUpdate, chBuf) - rts := &reactorTestSuite{ - mempoolInCh: make(chan p2p.Envelope, chBuf), - mempoolOutCh: make(chan p2p.Envelope, chBuf), - mempoolPeerErrCh: make(chan p2p.PeerError, chBuf), - peerUpdatesCh: peerUpdatesCh, - peerUpdates: p2p.NewPeerUpdates(peerUpdatesCh), - peerID: peerID, + logger: log.TestingLogger().With("testCase", t.Name()), + network: p2ptest.MakeNetwork(t, numNodes), + reactors: make(map[p2p.NodeID]*Reactor, numNodes), + mempoolChnnels: make(map[p2p.NodeID]*p2p.Channel, numNodes), + mempools: make(map[p2p.NodeID]*CListMempool, numNodes), + kvstores: make(map[p2p.NodeID]*kvstore.Application, numNodes), + peerChans: make(map[p2p.NodeID]chan p2p.PeerUpdate, numNodes), + peerUpdates: make(map[p2p.NodeID]*p2p.PeerUpdates, numNodes), } - rts.mempoolChannel = p2p.NewChannel( - MempoolChannel, - new(protomem.Message), - rts.mempoolInCh, - rts.mempoolOutCh, - rts.mempoolPeerErrCh, - ) + rts.mempoolChnnels = rts.network.MakeChannelsNoCleanup(t, MempoolChannel, new(protomem.Message), int(chBuf)) - app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - mempool, memCleanup := newMempoolWithApp(cc) + i := 0 + for nodeID := range rts.network.Nodes { + rts.kvstores[nodeID] = kvstore.NewApplication() + cc := proxy.NewLocalClientCreator(rts.kvstores[nodeID]) - mempool.SetLogger(logger) + mempool, memCleanup := newMempoolWithApp(cc) + t.Cleanup(memCleanup) + mempool.SetLogger(rts.logger) + rts.mempools[nodeID] = mempool - rts.reactor = NewReactor( - logger, - cfg, - nil, - mempool, - rts.mempoolChannel, - rts.peerUpdates, - ) + rts.peerChans[nodeID] = make(chan p2p.PeerUpdate) + rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID]) + rts.network.Nodes[nodeID].PeerManager.Register(rts.peerUpdates[nodeID]) - require.NoError(t, rts.reactor.Start()) - require.True(t, rts.reactor.IsRunning()) + rts.reactors[nodeID] = NewReactor( + rts.logger.With("nodeID", nodeID), + cfg, + rts.network.RandomNode().PeerManager, + mempool, + rts.mempoolChnnels[nodeID], + rts.peerUpdates[nodeID], + ) + + rts.nodes = append(rts.nodes, nodeID) + + require.NoError(t, rts.reactors[nodeID].Start()) + require.True(t, rts.reactors[nodeID].IsRunning()) + i++ + } + + require.Len(t, rts.reactors, numNodes) t.Cleanup(func() { - memCleanup() - require.NoError(t, rts.reactor.Stop()) - require.False(t, rts.reactor.IsRunning()) + for nodeID := range rts.reactors { + if rts.reactors[nodeID].IsRunning() { + require.NoError(t, rts.reactors[nodeID].Stop()) + require.False(t, rts.reactors[nodeID].IsRunning()) + } + } }) return rts } -func simulateRouter( - wg *sync.WaitGroup, - primary *reactorTestSuite, - suites []*reactorTestSuite, - numOut int, -) { - - wg.Add(1) - - // create a mapping for efficient suite lookup by peer ID - suitesByPeerID := make(map[p2p.NodeID]*reactorTestSuite) - for _, suite := range suites { - suitesByPeerID[suite.peerID] = suite - } - - // Simulate a router by listening for all outbound envelopes and proxying the - // envelope to the respective peer (suite). - go func() { - for i := 0; i < numOut; i++ { - envelope := <-primary.mempoolOutCh - other := suitesByPeerID[envelope.To] - - other.mempoolInCh <- p2p.Envelope{ - From: primary.peerID, - To: envelope.To, - Message: envelope.Message, - } - } - - wg.Done() - }() +func (rts *reactorTestSuite) start(t *testing.T) { + t.Helper() + rts.network.Start(t) + require.Len(t, + rts.network.RandomNode().PeerManager.Peers(), + len(rts.nodes)-1, + "network does not have expected number of nodes") } -func waitForTxs(t *testing.T, txs types.Txs, suites ...*reactorTestSuite) { +func (rts *reactorTestSuite) assertMempoolChannelsDrained(t *testing.T) { t.Helper() - wg := new(sync.WaitGroup) + for id, r := range rts.reactors { + require.NoError(t, r.Stop(), "stopping reactor %s", id) + r.Wait() + require.False(t, r.IsRunning(), "reactor %s did not stop", id) + } + + for id, mch := range rts.mempoolChnnels { + require.Empty(t, mch.Out, "checking channel %q", id) + } +} + +func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs types.Txs, ids ...p2p.NodeID) { + t.Helper() + + fn := func(pool *CListMempool) { + for pool.Size() < len(txs) { + time.Sleep(50 * time.Millisecond) + } + + reapedTxs := pool.ReapMaxTxs(len(txs)) + require.Equal(t, len(txs), len(reapedTxs)) + for i, tx := range txs { + require.Equalf(t, + tx, + reapedTxs[i], + "txs at index %d in reactor mempool mismatch; got: %v, expected: %v", i, tx, reapedTxs[i], + ) + } + } + + if len(ids) == 1 { + fn(rts.reactors[ids[0]].mempool) + return + } + + wg := &sync.WaitGroup{} + for id := range rts.mempools { + if len(ids) > 0 && !p2ptest.NodeInSlice(id, ids) { + continue + } - for _, suite := range suites { wg.Add(1) - - go func(s *reactorTestSuite) { - mempool := s.reactor.mempool - for mempool.Size() < len(txs) { - time.Sleep(time.Millisecond * 100) - } - - reapedTxs := mempool.ReapMaxTxs(len(txs)) - for i, tx := range txs { - require.Equalf( - t, tx, reapedTxs[i], - "txs at index %d in reactor mempool mismatch; got: %v, expected: %v", i, tx, reapedTxs[i], - ) - } - - wg.Done() - }(suite) + func(nid p2p.NodeID) { defer wg.Done(); fn(rts.reactors[nid].mempool) }(id) } wg.Wait() @@ -159,54 +159,25 @@ func TestReactorBroadcastTxs(t *testing.T) { numNodes := 10 config := cfg.TestConfig() - testSuites := make([]*reactorTestSuite, numNodes) - for i := 0; i < len(testSuites); i++ { - logger := log.TestingLogger().With("node", i) - testSuites[i] = setup(t, config.Mempool, logger, 0) - } + rts := setup(t, config.Mempool, numNodes, 0) - // ignore all peer errors - for _, suite := range testSuites { - go func(s *reactorTestSuite) { - // drop all errors on the mempool channel - for range s.mempoolPeerErrCh { - } - }(suite) - } + primary := rts.nodes[0] + secondaries := rts.nodes[1:] - primary := testSuites[0] - secondaries := testSuites[1:] + txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, UnknownPeerID) - // Simulate a router by listening for all outbound envelopes and proxying the - // envelopes to the respective peer (suite). - wg := new(sync.WaitGroup) - simulateRouter(wg, primary, testSuites, numTxs*len(secondaries)) - - txs := checkTxs(t, primary.reactor.mempool, numTxs, UnknownPeerID) - - // Add each secondary suite (node) as a peer to the primary suite (node). This - // will cause the primary to gossip all mempool txs to the secondaries. - for _, suite := range secondaries { - primary.peerUpdatesCh <- p2p.PeerUpdate{ - Status: p2p.PeerStatusUp, - NodeID: suite.peerID, - } - } + // run the router + rts.start(t) // Wait till all secondary suites (reactor) received all mempool txs from the // primary suite (node). - waitForTxs(t, txs, secondaries...) + rts.waitForTxns(t, txs, secondaries...) - for _, suite := range testSuites { - require.Equal(t, len(txs), suite.reactor.mempool.Size()) + for _, pool := range rts.mempools { + require.Equal(t, len(txs), pool.Size()) } - wg.Wait() - - // ensure all channels are drained - for _, suite := range testSuites { - require.Empty(t, suite.mempoolOutCh) - } + rts.assertMempoolChannelsDrained(t) } // regression test for https://github.com/tendermint/tendermint/issues/5408 @@ -215,14 +186,12 @@ func TestReactorConcurrency(t *testing.T) { numNodes := 2 config := cfg.TestConfig() - testSuites := make([]*reactorTestSuite, numNodes) - for i := 0; i < len(testSuites); i++ { - logger := log.TestingLogger().With("node", i) - testSuites[i] = setup(t, config.Mempool, logger, 0) - } + rts := setup(t, config.Mempool, numNodes, 0) - primary := testSuites[0] - secondary := testSuites[1] + primary := rts.nodes[0] + secondary := rts.nodes[1] + + rts.start(t) var wg sync.WaitGroup @@ -231,37 +200,41 @@ func TestReactorConcurrency(t *testing.T) { // 1. submit a bunch of txs // 2. update the whole mempool - txs := checkTxs(t, primary.reactor.mempool, numTxs, UnknownPeerID) + + txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, UnknownPeerID) go func() { defer wg.Done() - primary.reactor.mempool.Lock() - defer primary.reactor.mempool.Unlock() + mempool := rts.mempools[primary] + + mempool.Lock() + defer mempool.Unlock() deliverTxResponses := make([]*abci.ResponseDeliverTx, len(txs)) for i := range txs { deliverTxResponses[i] = &abci.ResponseDeliverTx{Code: 0} } - err := primary.reactor.mempool.Update(1, txs, deliverTxResponses, nil, nil) - require.NoError(t, err) + require.NoError(t, mempool.Update(1, txs, deliverTxResponses, nil, nil)) }() // 1. submit a bunch of txs // 2. update none - _ = checkTxs(t, secondary.reactor.mempool, numTxs, UnknownPeerID) + _ = checkTxs(t, rts.reactors[secondary].mempool, numTxs, UnknownPeerID) go func() { defer wg.Done() - secondary.reactor.mempool.Lock() - defer secondary.reactor.mempool.Unlock() + mempool := rts.mempools[secondary] - err := secondary.reactor.mempool.Update(1, []types.Tx{}, make([]*abci.ResponseDeliverTx, 0), nil, nil) + mempool.Lock() + defer mempool.Unlock() + + err := mempool.Update(1, []types.Tx{}, make([]*abci.ResponseDeliverTx, 0), nil, nil) require.NoError(t, err) }() // flush the mempool - secondary.reactor.mempool.Flush() + rts.mempools[secondary].Flush() } wg.Wait() @@ -272,42 +245,23 @@ func TestReactorNoBroadcastToSender(t *testing.T) { numNodes := 2 config := cfg.TestConfig() - testSuites := make([]*reactorTestSuite, numNodes) - for i := 0; i < len(testSuites); i++ { - logger := log.TestingLogger().With("node", i) - testSuites[i] = setup(t, config.Mempool, logger, uint(numTxs)) - } + rts := setup(t, config.Mempool, numNodes, uint(numTxs)) - primary := testSuites[0] - secondary := testSuites[1] - - // ignore all peer errors - for _, suite := range testSuites { - go func(s *reactorTestSuite) { - // drop all errors on the mempool channel - for range s.mempoolPeerErrCh { - } - }(suite) - } + primary := rts.nodes[0] + secondary := rts.nodes[1] peerID := uint16(1) - _ = checkTxs(t, primary.reactor.mempool, numTxs, peerID) + _ = checkTxs(t, rts.mempools[primary], numTxs, peerID) - primary.peerUpdatesCh <- p2p.PeerUpdate{ - Status: p2p.PeerStatusUp, - NodeID: secondary.peerID, - } + rts.start(t) time.Sleep(100 * time.Millisecond) require.Eventually(t, func() bool { - return secondary.reactor.mempool.Size() == 0 + return rts.mempools[secondary].Size() == 0 }, time.Minute, 100*time.Millisecond) - // ensure all channels are drained - for _, suite := range testSuites { - require.Empty(t, suite.mempoolOutCh) - } + rts.assertMempoolChannelsDrained(t) } func TestMempoolIDsBasic(t *testing.T) { @@ -329,86 +283,54 @@ func TestReactor_MaxTxBytes(t *testing.T) { numNodes := 2 config := cfg.TestConfig() - testSuites := make([]*reactorTestSuite, numNodes) - for i := 0; i < len(testSuites); i++ { - logger := log.TestingLogger().With("node", i) - testSuites[i] = setup(t, config.Mempool, logger, 0) - } + rts := setup(t, config.Mempool, numNodes, 0) - // ignore all peer errors - for _, suite := range testSuites { - go func(s *reactorTestSuite) { - // drop all errors on the mempool channel - for range s.mempoolPeerErrCh { - } - }(suite) - } - - primary := testSuites[0] - secondary := testSuites[1] - - // Simulate a router by listening for all outbound envelopes and proxying the - // envelopes to the respective peer (suite). - wg := new(sync.WaitGroup) - simulateRouter(wg, primary, testSuites, 1) + primary := rts.nodes[0] + secondary := rts.nodes[1] // Broadcast a tx, which has the max size and ensure it's received by the // second reactor. tx1 := tmrand.Bytes(config.Mempool.MaxTxBytes) - err := primary.reactor.mempool.CheckTx(tx1, nil, TxInfo{SenderID: UnknownPeerID}) + err := rts.reactors[primary].mempool.CheckTx(tx1, nil, TxInfo{SenderID: UnknownPeerID}) require.NoError(t, err) - primary.peerUpdatesCh <- p2p.PeerUpdate{ - Status: p2p.PeerStatusUp, - NodeID: secondary.peerID, - } + rts.start(t) // Wait till all secondary suites (reactor) received all mempool txs from the // primary suite (node). - waitForTxs(t, []types.Tx{tx1}, secondary) + rts.waitForTxns(t, []types.Tx{tx1}, secondary) - primary.reactor.mempool.Flush() - secondary.reactor.mempool.Flush() + rts.reactors[primary].mempool.Flush() + rts.reactors[secondary].mempool.Flush() // broadcast a tx, which is beyond the max size and ensure it's not sent tx2 := tmrand.Bytes(config.Mempool.MaxTxBytes + 1) - err = primary.reactor.mempool.CheckTx(tx2, nil, TxInfo{SenderID: UnknownPeerID}) + err = rts.mempools[primary].CheckTx(tx2, nil, TxInfo{SenderID: UnknownPeerID}) require.Error(t, err) - wg.Wait() - - // ensure all channels are drained - for _, suite := range testSuites { - require.Empty(t, suite.mempoolOutCh) - } + rts.assertMempoolChannelsDrained(t) } func TestDontExhaustMaxActiveIDs(t *testing.T) { config := cfg.TestConfig() - reactor := setup(t, config.Mempool, log.TestingLogger().With("node", 0), 0) - go func() { - // drop all messages on the mempool channel - for range reactor.mempoolOutCh { - } - }() + // we're creating a single node network, but not starting the + // network. + rts := setup(t, config.Mempool, 1, 0) - go func() { - // drop all errors on the mempool channel - for range reactor.mempoolPeerErrCh { - } - }() + nodeID := rts.nodes[0] peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899") require.NoError(t, err) // ensure the reactor does not panic (i.e. exhaust active IDs) for i := 0; i < maxActiveIDs+1; i++ { - reactor.peerUpdatesCh <- p2p.PeerUpdate{ + rts.peerChans[nodeID] <- p2p.PeerUpdate{ Status: p2p.PeerStatusUp, NodeID: peerID, } - reactor.mempoolOutCh <- p2p.Envelope{ + + rts.mempoolChnnels[nodeID].Out <- p2p.Envelope{ To: peerID, Message: &protomem.Txs{ Txs: [][]byte{}, @@ -416,7 +338,7 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) { } } - require.Empty(t, reactor.mempoolOutCh) + rts.assertMempoolChannelsDrained(t) } func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) { @@ -446,32 +368,16 @@ func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { config := cfg.TestConfig() - testSuites := []*reactorTestSuite{ - setup(t, config.Mempool, log.TestingLogger().With("node", 0), 0), - setup(t, config.Mempool, log.TestingLogger().With("node", 1), 0), - } + rts := setup(t, config.Mempool, 2, 0) - primary := testSuites[0] - secondary := testSuites[1] + primary := rts.nodes[0] + secondary := rts.nodes[1] - // ignore all peer errors - for _, suite := range testSuites { - go func(s *reactorTestSuite) { - // drop all errors on the mempool channel - for range s.mempoolPeerErrCh { - } - }(suite) - } - - // connect peer - primary.peerUpdatesCh <- p2p.PeerUpdate{ - Status: p2p.PeerStatusUp, - NodeID: secondary.peerID, - } + rts.start(t) // disconnect peer - primary.peerUpdatesCh <- p2p.PeerUpdate{ + rts.peerChans[primary] <- p2p.PeerUpdate{ Status: p2p.PeerStatusDown, - NodeID: secondary.peerID, + NodeID: secondary, } } diff --git a/networks/remote/integration.sh b/networks/remote/integration.sh index ae9320757..4c7132865 100644 --- a/networks/remote/integration.sh +++ b/networks/remote/integration.sh @@ -124,7 +124,7 @@ Restart=on-failure User={{service}} Group={{service}} PermissionsStartOnly=true -ExecStart=/usr/bin/tendermint node --proxy-app=kvstore --p2p.persistent-peers=$id0@$ip0:26656,$id1@$ip1:26656,$id2@$ip2:26656,$id3@$ip3:26656 +ExecStart=/usr/bin/tendermint node --mode validator --proxy-app=kvstore --p2p.persistent-peers=$id0@$ip0:26656,$id1@$ip1:26656,$id2@$ip2:26656,$id3@$ip3:26656 ExecReload=/bin/kill -HUP \$MAINPID KillSignal=SIGTERM diff --git a/node/node.go b/node/node.go index 69e92d4a4..4f7901e3e 100644 --- a/node/node.go +++ b/node/node.go @@ -100,12 +100,23 @@ func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) { if err != nil { return nil, fmt.Errorf("failed to load or gen node key %s: %w", config.NodeKeyFile(), err) } - - pval, err := privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) - if err != nil { - return nil, err + if config.Mode == cfg.ModeSeed { + return NewSeedNode(config, + nodeKey, + DefaultGenesisDocProviderFunc(config), + logger, + ) } + var pval *privval.FilePV + if config.Mode == cfg.ModeValidator { + pval, err = privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) + if err != nil { + return nil, err + } + } else { + pval = nil + } return NewNode(config, pval, nodeKey, @@ -298,12 +309,13 @@ func doHandshake( return nil } -func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) { +func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger, mode string) { // Log the version info. logger.Info("Version info", "software", version.TMCoreSemVer, "block", version.BlockProtocol, "p2p", version.P2PProtocol, + "mode", mode, ) // If the state and software differ in block version, at least log it. @@ -313,13 +325,18 @@ func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusL "state", state.Version.Consensus.Block, ) } - - addr := pubKey.Address() - // Log whether this node is a validator or an observer - if state.Validators.HasAddress(addr) { - consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey) - } else { - consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey) + switch { + case mode == cfg.ModeFull: + consensusLogger.Info("This node is a fullnode") + case mode == cfg.ModeValidator: + addr := pubKey.Address() + // Log whether this node is a validator or an observer + if state.Validators.HasAddress(addr) { + consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey.Bytes()) + } else { + consensusLogger.Info("This node is a validator (NOT in the active validator set)", + "addr", addr, "pubKey", pubKey.Bytes()) + } } } @@ -328,7 +345,7 @@ func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool { return false } addr, _ := state.Validators.GetByIndex(0) - return bytes.Equal(pubKey.Address(), addr) + return pubKey != nil && bytes.Equal(pubKey.Address(), addr) } func createMempoolReactor( @@ -445,7 +462,7 @@ func createBlockchainReactor( logger = logger.With("module", "blockchain") switch config.FastSync.Version { - case "v0": + case cfg.BlockchainV0: reactorShim := p2p.NewReactorShim(logger, "BlockchainShim", bcv0.ChannelShims) var ( @@ -471,7 +488,7 @@ func createBlockchainReactor( return reactorShim, reactor, nil - case "v2": + case cfg.BlockchainV2: reactor := bcv2.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) reactor.SetLogger(logger) @@ -507,10 +524,8 @@ func createConsensusReactor( evidencePool, cs.StateMetrics(csMetrics), ) - consensusState.SetLogger(logger) - - if privValidator != nil { + if privValidator != nil && config.Mode == cfg.ModeValidator { consensusState.SetPrivValidator(privValidator) } @@ -677,11 +692,13 @@ func createSwitch(config *cfg.Config, ) sw.SetLogger(p2pLogger) - sw.AddReactor("MEMPOOL", mempoolReactor) - sw.AddReactor("BLOCKCHAIN", bcReactor) - sw.AddReactor("CONSENSUS", consensusReactor) - sw.AddReactor("EVIDENCE", evidenceReactor) - sw.AddReactor("STATESYNC", stateSyncReactor) + if config.Mode != cfg.ModeSeed { + sw.AddReactor("MEMPOOL", mempoolReactor) + sw.AddReactor("BLOCKCHAIN", bcReactor) + sw.AddReactor("CONSENSUS", consensusReactor) + sw.AddReactor("EVIDENCE", evidenceReactor) + sw.AddReactor("STATESYNC", stateSyncReactor) + } sw.SetNodeInfo(nodeInfo) sw.SetNodeKey(nodeKey) @@ -723,19 +740,19 @@ func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch, func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, sw *p2p.Switch, logger log.Logger) *pex.Reactor { + reactorConfig := &pex.ReactorConfig{ + Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "), + SeedMode: config.Mode == cfg.ModeSeed, + // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000 + // blocks assuming 10s blocks ~ 28 hours. + // TODO (melekes): make it dynamic based on the actual block latencies + // from the live network. + // https://github.com/tendermint/tendermint/issues/3523 + SeedDisconnectWaitPeriod: 28 * time.Hour, + PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod, + } // TODO persistent peers ? so we can have their DNS addrs saved - pexReactor := pex.NewReactor(addrBook, - &pex.ReactorConfig{ - Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "), - SeedMode: config.P2P.SeedMode, - // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000 - // blocks assuming 10s blocks ~ 28 hours. - // TODO (melekes): make it dynamic based on the actual block latencies - // from the live network. - // https://github.com/tendermint/tendermint/issues/3523 - SeedDisconnectWaitPeriod: 28 * time.Hour, - PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod, - }) + pexReactor := pex.NewReactor(addrBook, reactorConfig) pexReactor.SetLogger(logger.With("module", "pex")) sw.AddReactor("PEX", pexReactor) return pexReactor @@ -813,6 +830,100 @@ func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reacto return nil } +// NewSeedNode returns a new seed node, containing only p2p, pex reactor +func NewSeedNode(config *cfg.Config, + nodeKey p2p.NodeKey, + genesisDocProvider GenesisDocProvider, + logger log.Logger, + options ...Option) (*Node, error) { + + genDoc, err := genesisDocProvider() + if err != nil { + return nil, err + } + + state, err := sm.MakeGenesisState(genDoc) + if err != nil { + return nil, err + } + + nodeInfo, err := makeSeedNodeInfo(config, nodeKey, genDoc, state) + if err != nil { + return nil, err + } + + // Setup Transport and Switch. + p2pMetrics := p2p.PrometheusMetrics(config.Instrumentation.Namespace, "chain_id", genDoc.ChainID) + p2pLogger := logger.With("module", "p2p") + transport := createTransport(p2pLogger, config) + sw := createSwitch( + config, transport, p2pMetrics, nil, nil, + nil, nil, nil, nil, nodeInfo, nodeKey, p2pLogger, + ) + + err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) + if err != nil { + return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err) + } + + err = sw.AddUnconditionalPeerIDs(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) + if err != nil { + return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) + } + + addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey) + if err != nil { + return nil, fmt.Errorf("could not create addrbook: %w", err) + } + + peerManager, err := createPeerManager(config, p2pLogger, nodeKey.ID) + if err != nil { + return nil, fmt.Errorf("failed to create peer manager: %w", err) + } + + router, err := createRouter(p2pLogger, nodeInfo, nodeKey.PrivKey, peerManager, transport) + if err != nil { + return nil, fmt.Errorf("failed to create router: %w", err) + } + + // start the pex reactor + pexReactor := createPEXReactorAndAddToSwitch(addrBook, config, sw, logger) + pexReactorV2, err := createPEXReactorV2(config, logger, peerManager, router) + if err != nil { + return nil, err + } + + if config.RPC.PprofListenAddress != "" { + go func() { + logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress) + logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil)) + }() + } + + node := &Node{ + config: config, + genesisDoc: genDoc, + + transport: transport, + sw: sw, + addrBook: addrBook, + nodeInfo: nodeInfo, + nodeKey: nodeKey, + peerManager: peerManager, + router: router, + + pexReactor: pexReactor, + pexReactorV2: pexReactorV2, + } + node.BaseService = *service.NewBaseService(logger, "SeedNode", node) + + for _, option := range options { + option(node) + } + + return node, nil +} + // NewNode returns a new, ready to go, Tendermint Node. func NewNode(config *cfg.Config, privValidator types.PrivValidator, @@ -875,10 +986,15 @@ func NewNode(config *cfg.Config, } } } - - pubKey, err := privValidator.GetPubKey() - if err != nil { - return nil, fmt.Errorf("can't get pubkey: %w", err) + var pubKey crypto.PubKey + if config.Mode == cfg.ModeValidator { + pubKey, err = privValidator.GetPubKey(context.TODO()) + if err != nil { + return nil, fmt.Errorf("can't get pubkey: %w", err) + } + if pubKey == nil { + return nil, errors.New("could not retrieve public key from private validator") + } } // Determine whether we should attempt state sync. @@ -909,7 +1025,7 @@ func NewNode(config *cfg.Config, // app may modify the validator set, specifying ourself as the only validator. fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey) - logNodeStartupInfo(state, pubKey, logger, consensusLogger) + logNodeStartupInfo(state, pubKey, logger, consensusLogger, config.Mode) // TODO: Fetch and provide real options and do proper p2p bootstrapping. // TODO: Use a persistent peer database. @@ -989,13 +1105,16 @@ func NewNode(config *cfg.Config, // FIXME The way we do phased startups (e.g. replay -> fast sync -> consensus) is very messy, // we should clean this whole thing up. See: // https://github.com/tendermint/tendermint/issues/4644 - stateSyncReactorShim := p2p.NewReactorShim(logger.With("module", "statesync"), "StateSyncShim", statesync.ChannelShims) - var ( + stateSyncReactor *statesync.Reactor + stateSyncReactorShim *p2p.ReactorShim + channels map[p2p.ChannelID]*p2p.Channel peerUpdates *p2p.PeerUpdates ) + stateSyncReactorShim = p2p.NewReactorShim(logger.With("module", "statesync"), "StateSyncShim", statesync.ChannelShims) + if useLegacyP2P { channels = getChannelsFromShim(stateSyncReactorShim) peerUpdates = stateSyncReactorShim.PeerUpdates @@ -1004,7 +1123,7 @@ func NewNode(config *cfg.Config, peerUpdates = peerManager.Subscribe() } - stateSyncReactor := statesync.NewReactor( + stateSyncReactor = statesync.NewReactor( stateSyncReactorShim.Logger, proxyApp.Snapshot(), proxyApp.Query(), @@ -1121,7 +1240,7 @@ func (n *Node) OnStart() error { // Start the RPC server before the P2P server // so we can eg. receive txs for the first block - if n.config.RPC.ListenAddress != "" { + if n.config.RPC.ListenAddress != "" && n.config.Mode != cfg.ModeSeed { listeners, err := n.startRPC() if err != nil { return err @@ -1164,31 +1283,33 @@ func (n *Node) OnStart() error { return err } - if n.config.FastSync.Version == "v0" { - // Start the real blockchain reactor separately since the switch uses the shim. - if err := n.bcReactor.Start(); err != nil { + if n.config.Mode != cfg.ModeSeed { + if n.config.FastSync.Version == cfg.BlockchainV0 { + // Start the real blockchain reactor separately since the switch uses the shim. + if err := n.bcReactor.Start(); err != nil { + return err + } + } + + // Start the real consensus reactor separately since the switch uses the shim. + if err := n.consensusReactor.Start(); err != nil { return err } - } - // Start the real consensus reactor separately since the switch uses the shim. - if err := n.consensusReactor.Start(); err != nil { - return err - } + // Start the real state sync reactor separately since the switch uses the shim. + if err := n.stateSyncReactor.Start(); err != nil { + return err + } - // Start the real state sync reactor separately since the switch uses the shim. - if err := n.stateSyncReactor.Start(); err != nil { - return err - } + // Start the real mempool reactor separately since the switch uses the shim. + if err := n.mempoolReactor.Start(); err != nil { + return err + } - // Start the real mempool reactor separately since the switch uses the shim. - if err := n.mempoolReactor.Start(); err != nil { - return err - } - - // Start the real evidence reactor separately since the switch uses the shim. - if err := n.evidenceReactor.Start(); err != nil { - return err + // Start the real evidence reactor separately since the switch uses the shim. + if err := n.evidenceReactor.Start(); err != nil { + return err + } } if !useLegacyP2P && n.pexReactorV2 != nil { @@ -1233,32 +1354,35 @@ func (n *Node) OnStop() { n.Logger.Error("Error closing indexerService", "err", err) } - // now stop the reactors - if n.config.FastSync.Version == "v0" { - // Stop the real blockchain reactor separately since the switch uses the shim. - if err := n.bcReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the blockchain reactor", "err", err) + if n.config.Mode != cfg.ModeSeed { + + // now stop the reactors + if n.config.FastSync.Version == "v0" { + // Stop the real blockchain reactor separately since the switch uses the shim. + if err := n.bcReactor.Stop(); err != nil { + n.Logger.Error("failed to stop the blockchain reactor", "err", err) + } } - } - // Stop the real consensus reactor separately since the switch uses the shim. - if err := n.consensusReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the consensus reactor", "err", err) - } + // Stop the real consensus reactor separately since the switch uses the shim. + if err := n.consensusReactor.Stop(); err != nil { + n.Logger.Error("failed to stop the consensus reactor", "err", err) + } - // Stop the real state sync reactor separately since the switch uses the shim. - if err := n.stateSyncReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the state sync reactor", "err", err) - } + // Stop the real state sync reactor separately since the switch uses the shim. + if err := n.stateSyncReactor.Stop(); err != nil { + n.Logger.Error("failed to stop the state sync reactor", "err", err) + } - // Stop the real mempool reactor separately since the switch uses the shim. - if err := n.mempoolReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the mempool reactor", "err", err) - } + // Stop the real mempool reactor separately since the switch uses the shim. + if err := n.mempoolReactor.Stop(); err != nil { + n.Logger.Error("failed to stop the mempool reactor", "err", err) + } - // Stop the real evidence reactor separately since the switch uses the shim. - if err := n.evidenceReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the evidence reactor", "err", err) + // Stop the real evidence reactor separately since the switch uses the shim. + if err := n.evidenceReactor.Stop(); err != nil { + n.Logger.Error("failed to stop the evidence reactor", "err", err) + } } if !useLegacyP2P && n.pexReactorV2 != nil { @@ -1312,11 +1436,7 @@ func (n *Node) OnStop() { // ConfigureRPC makes sure RPC has all the objects it needs to operate. func (n *Node) ConfigureRPC() error { - pubKey, err := n.privValidator.GetPubKey() - if err != nil { - return fmt.Errorf("can't get pubkey: %w", err) - } - rpccore.SetEnvironment(&rpccore.Environment{ + rpcCoreEnv := rpccore.Environment{ ProxyAppQuery: n.proxyApp.Query(), ProxyAppMempool: n.proxyApp.Mempool(), @@ -1327,7 +1447,6 @@ func (n *Node) ConfigureRPC() error { P2PPeers: n.sw, P2PTransport: n, - PubKey: pubKey, GenDoc: n.genesisDoc, TxIndexer: n.txIndexer, ConsensusReactor: n.consensusReactor, @@ -1337,7 +1456,15 @@ func (n *Node) ConfigureRPC() error { Logger: n.Logger.With("module", "rpc"), Config: *n.config.RPC, - }) + } + if n.config.Mode == cfg.ModeValidator { + pubKey, err := n.privValidator.GetPubKey(context.TODO()) + if pubKey == nil || err != nil { + return fmt.Errorf("can't get pubkey: %w", err) + } + rpcCoreEnv.PubKey = pubKey + } + rpccore.SetEnvironment(&rpcCoreEnv) return nil } @@ -1577,10 +1704,10 @@ func makeNodeInfo( var bcChannel byte switch config.FastSync.Version { - case "v0": + case cfg.BlockchainV0: bcChannel = byte(bcv0.BlockchainChannel) - case "v2": + case cfg.BlockchainV2: bcChannel = bcv2.BlockchainChannel default: @@ -1630,6 +1757,45 @@ func makeNodeInfo( return nodeInfo, err } +func makeSeedNodeInfo( + config *cfg.Config, + nodeKey p2p.NodeKey, + genDoc *types.GenesisDoc, + state sm.State, +) (p2p.NodeInfo, error) { + nodeInfo := p2p.NodeInfo{ + ProtocolVersion: p2p.NewProtocolVersion( + version.P2PProtocol, // global + state.Version.Consensus.Block, + state.Version.Consensus.App, + ), + NodeID: nodeKey.ID, + Network: genDoc.ChainID, + Version: version.TMCoreSemVer, + Channels: []byte{}, + Moniker: config.Moniker, + Other: p2p.NodeInfoOther{ + TxIndex: "off", + RPCAddress: config.RPC.ListenAddress, + }, + } + + if config.P2P.PexReactor { + nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) + } + + lAddr := config.P2P.ExternalAddress + + if lAddr == "" { + lAddr = config.P2P.ListenAddress + } + + nodeInfo.ListenAddr = lAddr + + err := nodeInfo.Validate() + return nodeInfo, err +} + //------------------------------------------------------------------------------ var ( @@ -1716,7 +1882,7 @@ func createAndStartPrivValidatorSocketClient( } // try to get a pubkey from private validate first time - _, err = pvsc.GetPubKey() + _, err = pvsc.GetPubKey(context.TODO()) if err != nil { return nil, fmt.Errorf("can't get pubkey: %w", err) } @@ -1741,7 +1907,7 @@ func createAndStartPrivValidatorGRPCClient( } // try to get a pubkey from private validate first time - _, err = pvsc.GetPubKey() + _, err = pvsc.GetPubKey(context.TODO()) if err != nil { return nil, fmt.Errorf("can't get pubkey: %w", err) } diff --git a/node/node_test.go b/node/node_test.go index 420e5f58c..01ce60abb 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -522,6 +522,27 @@ func TestNodeNewNodeCustomReactors(t *testing.T) { assert.Equal(t, customBlockchainReactor, n.Switch().Reactor("BLOCKCHAIN")) } +func TestNodeNewSeedNode(t *testing.T) { + config := cfg.ResetTestRoot("node_new_node_custom_reactors_test") + config.Mode = cfg.ModeSeed + defer os.RemoveAll(config.RootDir) + + nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) + require.NoError(t, err) + + n, err := NewSeedNode(config, + nodeKey, + DefaultGenesisDocProviderFunc(config), + log.TestingLogger(), + ) + require.NoError(t, err) + + err = n.Start() + require.NoError(t, err) + + assert.True(t, n.pexReactor.IsRunning()) +} + func state(nVals int, height uint64) (sm.State, dbm.DB, []types.PrivValidator) { privVals := make([]types.PrivValidator, nVals) vals := make([]types.GenesisValidator, nVals) diff --git a/p2p/p2ptest/network.go b/p2p/p2ptest/network.go index 3e94c7bc1..30e508287 100644 --- a/p2p/p2ptest/network.go +++ b/p2p/p2ptest/network.go @@ -211,7 +211,9 @@ func MakeNode(t *testing.T, network *Network) *Node { require.Len(t, transport.Endpoints(), 1, "transport not listening on 1 endpoint") peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{ - MinRetryTime: 10 * time.Millisecond, + MinRetryTime: 10 * time.Millisecond, + MaxRetryTime: 100 * time.Millisecond, + RetryTimeJitter: time.Millisecond, }) require.NoError(t, err) @@ -274,5 +276,6 @@ func (n *Node) MakePeerUpdates(t *testing.T) *p2p.PeerUpdates { RequireNoUpdates(t, sub) sub.Close() }) + return sub } diff --git a/p2p/p2ptest/util.go b/p2p/p2ptest/util.go index 27335d9d3..f29ddeb79 100644 --- a/p2p/p2ptest/util.go +++ b/p2p/p2ptest/util.go @@ -2,7 +2,17 @@ package p2ptest import ( gogotypes "github.com/gogo/protobuf/types" + "github.com/tendermint/tendermint/p2p" ) // Message is a simple message containing a string-typed Value field. type Message = gogotypes.StringValue + +func NodeInSlice(id p2p.NodeID, ids []p2p.NodeID) bool { + for _, n := range ids { + if id == n { + return true + } + } + return false +} diff --git a/p2p/peermanager.go b/p2p/peermanager.go index f330dd236..4a77b6209 100644 --- a/p2p/peermanager.go +++ b/p2p/peermanager.go @@ -792,6 +792,19 @@ func (m *PeerManager) Subscribe() *PeerUpdates { // compounding. Limiting it to 1 means that the subscribers are still // reasonably in sync. However, this should probably be benchmarked. peerUpdates := NewPeerUpdates(make(chan PeerUpdate, 1)) + m.Register(peerUpdates) + return peerUpdates +} + +// Register allows you to inject a custom PeerUpdate instance into the +// PeerManager, rather than relying on the instance constructed by the +// Subscribe method, which wraps the functionality of the Register +// method. +// +// The caller must consume the peer updates from this PeerUpdates +// instance in a timely fashion and close the subscription when done, +// otherwise the PeerManager will halt. +func (m *PeerManager) Register(peerUpdates *PeerUpdates) { m.mtx.Lock() m.subscriptions[peerUpdates] = peerUpdates m.mtx.Unlock() @@ -805,7 +818,6 @@ func (m *PeerManager) Subscribe() *PeerUpdates { case <-m.closeCh: } }() - return peerUpdates } // broadcast broadcasts a peer update to all subscriptions. The caller must diff --git a/p2p/pex/addrbook.go b/p2p/pex/addrbook.go index 49daec014..f9faabc09 100644 --- a/p2p/pex/addrbook.go +++ b/p2p/pex/addrbook.go @@ -941,6 +941,6 @@ func (a *addrBook) hash(b []byte) ([]byte, error) { if err != nil { return nil, err } - hasher.Write(b) //nolint:errcheck // ignore error + hasher.Write(b) return hasher.Sum(nil), nil } diff --git a/privval/file.go b/privval/file.go index bf0b28824..a4e6bf53b 100644 --- a/privval/file.go +++ b/privval/file.go @@ -2,6 +2,7 @@ package privval import ( "bytes" + "context" "errors" "fmt" "io/ioutil" @@ -153,6 +154,8 @@ type FilePV struct { LastSignState FilePVLastSignState } +var _ types.PrivValidator = (*FilePV)(nil) + // NewFilePV generates a new validator from the given key and paths. func NewFilePV(privKey crypto.PrivKey, keyFilePath, stateFilePath string) *FilePV { return &FilePV{ @@ -257,13 +260,13 @@ func (pv *FilePV) GetAddress() types.Address { // GetPubKey returns the public key of the validator. // Implements PrivValidator. -func (pv *FilePV) GetPubKey() (crypto.PubKey, error) { +func (pv *FilePV) GetPubKey(ctx context.Context) (crypto.PubKey, error) { return pv.Key.PubKey, nil } // SignVote signs a canonical representation of the vote, along with the // chainID. Implements PrivValidator. -func (pv *FilePV) SignVote(chainID string, vote *tmproto.Vote) error { +func (pv *FilePV) SignVote(ctx context.Context, chainID string, vote *tmproto.Vote) error { if err := pv.signVote(chainID, vote); err != nil { return fmt.Errorf("error signing vote: %v", err) } @@ -272,7 +275,7 @@ func (pv *FilePV) SignVote(chainID string, vote *tmproto.Vote) error { // SignProposal signs a canonical representation of the proposal, along with // the chainID. Implements PrivValidator. -func (pv *FilePV) SignProposal(chainID string, proposal *tmproto.Proposal) error { +func (pv *FilePV) SignProposal(ctx context.Context, chainID string, proposal *tmproto.Proposal) error { if err := pv.signProposal(chainID, proposal); err != nil { return fmt.Errorf("error signing proposal: %v", err) } diff --git a/privval/file_test.go b/privval/file_test.go index b82f365c9..770e16c86 100644 --- a/privval/file_test.go +++ b/privval/file_test.go @@ -1,6 +1,7 @@ package privval import ( + "context" "encoding/base64" "fmt" "io/ioutil" @@ -61,7 +62,7 @@ func TestResetValidator(t *testing.T) { randBytes := tmrand.Bytes(tmhash.Size) blockID := types.BlockID{Hash: randBytes, PartSetHeader: types.PartSetHeader{}} vote := newVote(privVal.Key.Address, 0, height, round, voteType, blockID) - err = privVal.SignVote("mychainid", vote.ToProto()) + err = privVal.SignVote(context.Background(), "mychainid", vote.ToProto()) assert.NoError(t, err, "expected no error signing vote") // priv val after signing is not same as empty @@ -186,11 +187,11 @@ func TestSignVote(t *testing.T) { // sign a vote for first time vote := newVote(privVal.Key.Address, 0, height, round, voteType, block1) v := vote.ToProto() - err = privVal.SignVote("mychainid", v) + err = privVal.SignVote(context.Background(), "mychainid", v) assert.NoError(err, "expected no error signing vote") // try to sign the same vote again; should be fine - err = privVal.SignVote("mychainid", v) + err = privVal.SignVote(context.Background(), "mychainid", v) assert.NoError(err, "expected no error on signing same vote") // now try some bad votes @@ -203,14 +204,14 @@ func TestSignVote(t *testing.T) { for _, c := range cases { cpb := c.ToProto() - err = privVal.SignVote("mychainid", cpb) + err = privVal.SignVote(context.Background(), "mychainid", cpb) assert.Error(err, "expected error on signing conflicting vote") } // try signing a vote with a different time stamp sig := vote.Signature vote.Timestamp = vote.Timestamp.Add(time.Duration(1000)) - err = privVal.SignVote("mychainid", v) + err = privVal.SignVote(context.Background(), "mychainid", v) assert.NoError(err) assert.Equal(sig, vote.Signature) } @@ -238,11 +239,11 @@ func TestSignProposal(t *testing.T) { // sign a proposal for first time proposal := newProposal(height, round, block1) pbp := proposal.ToProto() - err = privVal.SignProposal("mychainid", pbp) + err = privVal.SignProposal(context.Background(), "mychainid", pbp) assert.NoError(err, "expected no error signing proposal") // try to sign the same proposal again; should be fine - err = privVal.SignProposal("mychainid", pbp) + err = privVal.SignProposal(context.Background(), "mychainid", pbp) assert.NoError(err, "expected no error on signing same proposal") // now try some bad Proposals @@ -254,14 +255,14 @@ func TestSignProposal(t *testing.T) { } for _, c := range cases { - err = privVal.SignProposal("mychainid", c.ToProto()) + err = privVal.SignProposal(context.Background(), "mychainid", c.ToProto()) assert.Error(err, "expected error on signing conflicting proposal") } // try signing a proposal with a different time stamp sig := proposal.Signature proposal.Timestamp = proposal.Timestamp.Add(time.Duration(1000)) - err = privVal.SignProposal("mychainid", pbp) + err = privVal.SignProposal(context.Background(), "mychainid", pbp) assert.NoError(err) assert.Equal(sig, proposal.Signature) } @@ -283,7 +284,7 @@ func TestDifferByTimestamp(t *testing.T) { { proposal := newProposal(height, round, block1) pb := proposal.ToProto() - err := privVal.SignProposal(chainID, pb) + err := privVal.SignProposal(context.Background(), chainID, pb) assert.NoError(t, err, "expected no error signing proposal") signBytes := types.ProposalSignBytes(chainID, pb) @@ -294,7 +295,7 @@ func TestDifferByTimestamp(t *testing.T) { pb.Timestamp = pb.Timestamp.Add(time.Millisecond) var emptySig []byte proposal.Signature = emptySig - err = privVal.SignProposal("mychainid", pb) + err = privVal.SignProposal(context.Background(), "mychainid", pb) assert.NoError(t, err, "expected no error on signing same proposal") assert.Equal(t, timeStamp, pb.Timestamp) @@ -308,7 +309,7 @@ func TestDifferByTimestamp(t *testing.T) { blockID := types.BlockID{Hash: randbytes, PartSetHeader: types.PartSetHeader{}} vote := newVote(privVal.Key.Address, 0, height, round, voteType, blockID) v := vote.ToProto() - err := privVal.SignVote("mychainid", v) + err := privVal.SignVote(context.Background(), "mychainid", v) assert.NoError(t, err, "expected no error signing vote") signBytes := types.VoteSignBytes(chainID, v) @@ -319,7 +320,7 @@ func TestDifferByTimestamp(t *testing.T) { v.Timestamp = v.Timestamp.Add(time.Millisecond) var emptySig []byte v.Signature = emptySig - err = privVal.SignVote("mychainid", v) + err = privVal.SignVote(context.Background(), "mychainid", v) assert.NoError(t, err, "expected no error on signing same vote") assert.Equal(t, timeStamp, v.Timestamp) diff --git a/privval/grpc/client.go b/privval/grpc/client.go index 5b44973e8..77f3930aa 100644 --- a/privval/grpc/client.go +++ b/privval/grpc/client.go @@ -2,7 +2,6 @@ package grpc import ( "context" - "time" grpc "google.golang.org/grpc" "google.golang.org/grpc/status" @@ -55,9 +54,7 @@ func (sc *SignerClient) Close() error { // GetPubKey retrieves a public key from a remote signer // returns an error if client is not able to provide the key -func (sc *SignerClient) GetPubKey() (crypto.PubKey, error) { - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) // Todo: should this be configurable? - defer cancel() +func (sc *SignerClient) GetPubKey(ctx context.Context) (crypto.PubKey, error) { resp, err := sc.client.GetPubKey(ctx, &privvalproto.PubKeyRequest{ChainId: sc.chainID}) if err != nil { errStatus, _ := status.FromError(err) @@ -74,9 +71,7 @@ func (sc *SignerClient) GetPubKey() (crypto.PubKey, error) { } // SignVote requests a remote signer to sign a vote -func (sc *SignerClient) SignVote(chainID string, vote *tmproto.Vote) error { - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() +func (sc *SignerClient) SignVote(ctx context.Context, chainID string, vote *tmproto.Vote) error { resp, err := sc.client.SignVote(ctx, &privvalproto.SignVoteRequest{ChainId: sc.chainID, Vote: vote}) if err != nil { errStatus, _ := status.FromError(err) @@ -90,9 +85,7 @@ func (sc *SignerClient) SignVote(chainID string, vote *tmproto.Vote) error { } // SignProposal requests a remote signer to sign a proposal -func (sc *SignerClient) SignProposal(chainID string, proposal *tmproto.Proposal) error { - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() +func (sc *SignerClient) SignProposal(ctx context.Context, chainID string, proposal *tmproto.Proposal) error { resp, err := sc.client.SignProposal( ctx, &privvalproto.SignProposalRequest{ChainId: chainID, Proposal: proposal}) diff --git a/privval/grpc/client_test.go b/privval/grpc/client_test.go index 09feea731..98730df19 100644 --- a/privval/grpc/client_test.go +++ b/privval/grpc/client_test.go @@ -60,7 +60,7 @@ func TestSignerClient_GetPubKey(t *testing.T) { client, err := tmgrpc.NewSignerClient(conn, chainID, logger) require.NoError(t, err) - pk, err := client.GetPubKey() + pk, err := client.GetPubKey(context.Background()) require.NoError(t, err) assert.Equal(t, mockPV.PrivKey.PubKey(), pk) } @@ -108,12 +108,12 @@ func TestSignerClient_SignVote(t *testing.T) { pbHave := have.ToProto() - err = client.SignVote(chainID, pbHave) + err = client.SignVote(context.Background(), chainID, pbHave) require.NoError(t, err) pbWant := want.ToProto() - require.NoError(t, mockPV.SignVote(chainID, pbWant)) + require.NoError(t, mockPV.SignVote(context.Background(), chainID, pbWant)) assert.Equal(t, pbWant.Signature, pbHave.Signature) } @@ -157,12 +157,12 @@ func TestSignerClient_SignProposal(t *testing.T) { pbHave := have.ToProto() - err = client.SignProposal(chainID, pbHave) + err = client.SignProposal(context.Background(), chainID, pbHave) require.NoError(t, err) pbWant := want.ToProto() - require.NoError(t, mockPV.SignProposal(chainID, pbWant)) + require.NoError(t, mockPV.SignProposal(context.Background(), chainID, pbWant)) assert.Equal(t, pbWant.Signature, pbHave.Signature) } diff --git a/privval/grpc/server.go b/privval/grpc/server.go index 763e50aaa..f5c434b1b 100644 --- a/privval/grpc/server.go +++ b/privval/grpc/server.go @@ -39,7 +39,7 @@ func (ss *SignerServer) GetPubKey(ctx context.Context, req *privvalproto.PubKeyR *privvalproto.PubKeyResponse, error) { var pubKey crypto.PubKey - pubKey, err := ss.privVal.GetPubKey() + pubKey, err := ss.privVal.GetPubKey(ctx) if err != nil { return nil, status.Errorf(codes.NotFound, "error getting pubkey: %v", err) } @@ -60,12 +60,12 @@ func (ss *SignerServer) SignVote(ctx context.Context, req *privvalproto.SignVote *privvalproto.SignedVoteResponse, error) { vote := req.Vote - err := ss.privVal.SignVote(req.ChainId, vote) + err := ss.privVal.SignVote(ctx, req.ChainId, vote) if err != nil { return nil, status.Errorf(codes.InvalidArgument, "error signing vote: %v", err) } - ss.logger.Info("SignerServer: SignVote Success") + ss.logger.Info("SignerServer: SignVote Success", "height", req.Vote.Height) return &privvalproto.SignedVoteResponse{Vote: *vote}, nil } @@ -76,12 +76,12 @@ func (ss *SignerServer) SignProposal(ctx context.Context, req *privvalproto.Sign *privvalproto.SignedProposalResponse, error) { proposal := req.Proposal - err := ss.privVal.SignProposal(req.ChainId, proposal) + err := ss.privVal.SignProposal(ctx, req.ChainId, proposal) if err != nil { return nil, status.Errorf(codes.InvalidArgument, "error signing proposal: %v", err) } - ss.logger.Info("SignerServer: SignProposal Success") + ss.logger.Info("SignerServer: SignProposal Success", "height", req.Proposal.Height) return &privvalproto.SignedProposalResponse{Proposal: *proposal}, nil } diff --git a/privval/grpc/server_test.go b/privval/grpc/server_test.go index ab728fdad..9fec9f2fd 100644 --- a/privval/grpc/server_test.go +++ b/privval/grpc/server_test.go @@ -41,7 +41,7 @@ func TestGetPubKey(t *testing.T) { if tc.err { require.Error(t, err) } else { - pk, err := tc.pv.GetPubKey() + pk, err := tc.pv.GetPubKey(context.Background()) require.NoError(t, err) assert.Equal(t, resp.PubKey.GetEd25519(), pk.Bytes()) } @@ -114,7 +114,8 @@ func TestSignVote(t *testing.T) { } else { pbVote := tc.want.ToProto() - require.NoError(t, tc.pv.SignVote(ChainID, pbVote)) + require.NoError(t, tc.pv.SignVote(context.Background(), ChainID, pbVote)) + assert.Equal(t, pbVote.Signature, resp.Vote.Signature) } }) @@ -179,7 +180,7 @@ func TestSignProposal(t *testing.T) { require.Error(t, err) } else { pbProposal := tc.want.ToProto() - require.NoError(t, tc.pv.SignProposal(ChainID, pbProposal)) + require.NoError(t, tc.pv.SignProposal(context.Background(), ChainID, pbProposal)) assert.Equal(t, pbProposal.Signature, resp.Proposal.Signature) } }) diff --git a/privval/retry_signer_client.go b/privval/retry_signer_client.go index 92a7d0655..9bd702196 100644 --- a/privval/retry_signer_client.go +++ b/privval/retry_signer_client.go @@ -1,6 +1,7 @@ package privval import ( + "context" "fmt" "time" @@ -44,13 +45,13 @@ func (sc *RetrySignerClient) Ping() error { return sc.next.Ping() } -func (sc *RetrySignerClient) GetPubKey() (crypto.PubKey, error) { +func (sc *RetrySignerClient) GetPubKey(ctx context.Context) (crypto.PubKey, error) { var ( pk crypto.PubKey err error ) for i := 0; i < sc.retries || sc.retries == 0; i++ { - pk, err = sc.next.GetPubKey() + pk, err = sc.next.GetPubKey(ctx) if err == nil { return pk, nil } @@ -63,10 +64,10 @@ func (sc *RetrySignerClient) GetPubKey() (crypto.PubKey, error) { return nil, fmt.Errorf("exhausted all attempts to get pubkey: %w", err) } -func (sc *RetrySignerClient) SignVote(chainID string, vote *tmproto.Vote) error { +func (sc *RetrySignerClient) SignVote(ctx context.Context, chainID string, vote *tmproto.Vote) error { var err error for i := 0; i < sc.retries || sc.retries == 0; i++ { - err = sc.next.SignVote(chainID, vote) + err = sc.next.SignVote(ctx, chainID, vote) if err == nil { return nil } @@ -79,10 +80,10 @@ func (sc *RetrySignerClient) SignVote(chainID string, vote *tmproto.Vote) error return fmt.Errorf("exhausted all attempts to sign vote: %w", err) } -func (sc *RetrySignerClient) SignProposal(chainID string, proposal *tmproto.Proposal) error { +func (sc *RetrySignerClient) SignProposal(ctx context.Context, chainID string, proposal *tmproto.Proposal) error { var err error for i := 0; i < sc.retries || sc.retries == 0; i++ { - err = sc.next.SignProposal(chainID, proposal) + err = sc.next.SignProposal(ctx, chainID, proposal) if err == nil { return nil } diff --git a/privval/signer_client.go b/privval/signer_client.go index aecb0381e..d25584c8f 100644 --- a/privval/signer_client.go +++ b/privval/signer_client.go @@ -1,6 +1,7 @@ package privval import ( + "context" "fmt" "time" @@ -68,7 +69,7 @@ func (sc *SignerClient) Ping() error { // GetPubKey retrieves a public key from a remote signer // returns an error if client is not able to provide the key -func (sc *SignerClient) GetPubKey() (crypto.PubKey, error) { +func (sc *SignerClient) GetPubKey(ctx context.Context) (crypto.PubKey, error) { response, err := sc.endpoint.SendRequest(mustWrapMsg(&privvalproto.PubKeyRequest{ChainId: sc.chainID})) if err != nil { return nil, fmt.Errorf("send: %w", err) @@ -91,7 +92,7 @@ func (sc *SignerClient) GetPubKey() (crypto.PubKey, error) { } // SignVote requests a remote signer to sign a vote -func (sc *SignerClient) SignVote(chainID string, vote *tmproto.Vote) error { +func (sc *SignerClient) SignVote(ctx context.Context, chainID string, vote *tmproto.Vote) error { response, err := sc.endpoint.SendRequest(mustWrapMsg(&privvalproto.SignVoteRequest{Vote: vote, ChainId: chainID})) if err != nil { return err @@ -111,7 +112,7 @@ func (sc *SignerClient) SignVote(chainID string, vote *tmproto.Vote) error { } // SignProposal requests a remote signer to sign a proposal -func (sc *SignerClient) SignProposal(chainID string, proposal *tmproto.Proposal) error { +func (sc *SignerClient) SignProposal(ctx context.Context, chainID string, proposal *tmproto.Proposal) error { response, err := sc.endpoint.SendRequest(mustWrapMsg( &privvalproto.SignProposalRequest{Proposal: proposal, ChainId: chainID}, )) diff --git a/privval/signer_client_test.go b/privval/signer_client_test.go index 019fd2c96..9aa49e709 100644 --- a/privval/signer_client_test.go +++ b/privval/signer_client_test.go @@ -1,6 +1,7 @@ package privval import ( + "context" "fmt" "testing" "time" @@ -97,16 +98,16 @@ func TestSignerGetPubKey(t *testing.T) { } }) - pubKey, err := tc.signerClient.GetPubKey() + pubKey, err := tc.signerClient.GetPubKey(context.Background()) require.NoError(t, err) - expectedPubKey, err := tc.mockPV.GetPubKey() + expectedPubKey, err := tc.mockPV.GetPubKey(context.Background()) require.NoError(t, err) assert.Equal(t, expectedPubKey, pubKey) - pubKey, err = tc.signerClient.GetPubKey() + pubKey, err = tc.signerClient.GetPubKey(context.Background()) require.NoError(t, err) - expectedpk, err := tc.mockPV.GetPubKey() + expectedpk, err := tc.mockPV.GetPubKey(context.Background()) require.NoError(t, err) expectedAddr := expectedpk.Address() @@ -147,8 +148,8 @@ func TestSignerProposal(t *testing.T) { } }) - require.NoError(t, tc.mockPV.SignProposal(tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignProposal(tc.chainID, have.ToProto())) + require.NoError(t, tc.mockPV.SignProposal(context.Background(), tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignProposal(context.Background(), tc.chainID, have.ToProto())) assert.Equal(t, want.Signature, have.Signature) } @@ -191,8 +192,8 @@ func TestSignerVote(t *testing.T) { } }) - require.NoError(t, tc.mockPV.SignVote(tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignVote(tc.chainID, have.ToProto())) + require.NoError(t, tc.mockPV.SignVote(context.Background(), tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignVote(context.Background(), tc.chainID, have.ToProto())) assert.Equal(t, want.Signature, have.Signature) } @@ -237,8 +238,8 @@ func TestSignerVoteResetDeadline(t *testing.T) { time.Sleep(testTimeoutReadWrite2o3) - require.NoError(t, tc.mockPV.SignVote(tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignVote(tc.chainID, have.ToProto())) + require.NoError(t, tc.mockPV.SignVote(context.Background(), tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignVote(context.Background(), tc.chainID, have.ToProto())) assert.Equal(t, want.Signature, have.Signature) // TODO(jleni): Clarify what is actually being tested @@ -246,8 +247,8 @@ func TestSignerVoteResetDeadline(t *testing.T) { // This would exceed the deadline if it was not extended by the previous message time.Sleep(testTimeoutReadWrite2o3) - require.NoError(t, tc.mockPV.SignVote(tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignVote(tc.chainID, have.ToProto())) + require.NoError(t, tc.mockPV.SignVote(context.Background(), tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignVote(context.Background(), tc.chainID, have.ToProto())) assert.Equal(t, want.Signature, have.Signature) } } @@ -298,8 +299,8 @@ func TestSignerVoteKeepAlive(t *testing.T) { time.Sleep(testTimeoutReadWrite * 3) tc.signerServer.Logger.Debug("TEST: Forced Wait DONE---------------------------------------------") - require.NoError(t, tc.mockPV.SignVote(tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignVote(tc.chainID, have.ToProto())) + require.NoError(t, tc.mockPV.SignVote(context.Background(), tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignVote(context.Background(), tc.chainID, have.ToProto())) assert.Equal(t, want.Signature, have.Signature) } @@ -335,13 +336,13 @@ func TestSignerSignProposalErrors(t *testing.T) { Signature: []byte("signature"), } - err := tc.signerClient.SignProposal(tc.chainID, proposal.ToProto()) + err := tc.signerClient.SignProposal(context.Background(), tc.chainID, proposal.ToProto()) require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) - err = tc.mockPV.SignProposal(tc.chainID, proposal.ToProto()) + err = tc.mockPV.SignProposal(context.Background(), tc.chainID, proposal.ToProto()) require.Error(t, err) - err = tc.signerClient.SignProposal(tc.chainID, proposal.ToProto()) + err = tc.signerClient.SignProposal(context.Background(), tc.chainID, proposal.ToProto()) require.Error(t, err) } } @@ -378,18 +379,18 @@ func TestSignerSignVoteErrors(t *testing.T) { } }) - err := tc.signerClient.SignVote(tc.chainID, vote.ToProto()) + err := tc.signerClient.SignVote(context.Background(), tc.chainID, vote.ToProto()) require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) - err = tc.mockPV.SignVote(tc.chainID, vote.ToProto()) + err = tc.mockPV.SignVote(context.Background(), tc.chainID, vote.ToProto()) require.Error(t, err) - err = tc.signerClient.SignVote(tc.chainID, vote.ToProto()) + err = tc.signerClient.SignVote(context.Background(), tc.chainID, vote.ToProto()) require.Error(t, err) } } -func brokenHandler(privVal types.PrivValidator, request privvalproto.Message, +func brokenHandler(ctx context.Context, privVal types.PrivValidator, request privvalproto.Message, chainID string) (privvalproto.Message, error) { var res privvalproto.Message var err error @@ -433,7 +434,7 @@ func TestSignerUnexpectedResponse(t *testing.T) { ts := time.Now() want := &types.Vote{Timestamp: ts, Type: tmproto.PrecommitType} - e := tc.signerClient.SignVote(tc.chainID, want.ToProto()) + e := tc.signerClient.SignVote(context.Background(), tc.chainID, want.ToProto()) assert.EqualError(t, e, "empty response") } } diff --git a/privval/signer_requestHandler.go b/privval/signer_requestHandler.go index 682863b19..18ad8a996 100644 --- a/privval/signer_requestHandler.go +++ b/privval/signer_requestHandler.go @@ -1,6 +1,7 @@ package privval import ( + "context" "fmt" "github.com/tendermint/tendermint/crypto" @@ -12,6 +13,7 @@ import ( ) func DefaultValidationRequestHandler( + ctx context.Context, privVal types.PrivValidator, req privvalproto.Message, chainID string, @@ -31,7 +33,7 @@ func DefaultValidationRequestHandler( } var pubKey crypto.PubKey - pubKey, err = privVal.GetPubKey() + pubKey, err = privVal.GetPubKey(ctx) if err != nil { return res, err } @@ -57,7 +59,7 @@ func DefaultValidationRequestHandler( vote := r.SignVoteRequest.Vote - err = privVal.SignVote(chainID, vote) + err = privVal.SignVote(ctx, chainID, vote) if err != nil { res = mustWrapMsg(&privvalproto.SignedVoteResponse{ Vote: tmproto.Vote{}, Error: &privvalproto.RemoteSignerError{Code: 0, Description: err.Error()}}) @@ -76,7 +78,7 @@ func DefaultValidationRequestHandler( proposal := r.SignProposalRequest.Proposal - err = privVal.SignProposal(chainID, proposal) + err = privVal.SignProposal(ctx, chainID, proposal) if err != nil { res = mustWrapMsg(&privvalproto.SignedProposalResponse{ Proposal: tmproto.Proposal{}, Error: &privvalproto.RemoteSignerError{Code: 0, Description: err.Error()}}) diff --git a/privval/signer_server.go b/privval/signer_server.go index c14524e36..f6e150ca0 100644 --- a/privval/signer_server.go +++ b/privval/signer_server.go @@ -1,6 +1,7 @@ package privval import ( + "context" "io" "github.com/tendermint/tendermint/libs/service" @@ -11,6 +12,7 @@ import ( // ValidationRequestHandlerFunc handles different remoteSigner requests type ValidationRequestHandlerFunc func( + ctx context.Context, privVal types.PrivValidator, requestMessage privvalproto.Message, chainID string) (privvalproto.Message, error) @@ -76,7 +78,7 @@ func (ss *SignerServer) servicePendingRequest() { // limit the scope of the lock ss.handlerMtx.Lock() defer ss.handlerMtx.Unlock() - res, err = ss.validationRequestHandler(ss.privVal, req, ss.chainID) + res, err = ss.validationRequestHandler(context.TODO(), ss.privVal, req, ss.chainID) // todo if err != nil { // only log the error; we'll reply with an error in res ss.Logger.Error("SignerServer: handleMessage", "err", err) diff --git a/rpc/core/status.go b/rpc/core/status.go index d8e45355d..0ea29ee3d 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -1,6 +1,7 @@ package core import ( + "bytes" "time" tmbytes "github.com/tendermint/tendermint/libs/bytes" @@ -49,7 +50,14 @@ func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { if val := validatorAtHeight(latestUncommittedHeight()); val != nil { votingPower = val.VotingPower } - + validatorInfo := ctypes.ValidatorInfo{} + if env.PubKey != nil { + validatorInfo = ctypes.ValidatorInfo{ + Address: env.PubKey.Address(), + PubKey: env.PubKey, + VotingPower: votingPower, + } + } result := &ctypes.ResultStatus{ NodeInfo: env.P2PTransport.NodeInfo(), SyncInfo: ctypes.SyncInfo{ @@ -63,22 +71,32 @@ func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { EarliestBlockTime: time.Unix(0, earliestBlockTimeNano), CatchingUp: env.ConsensusReactor.WaitSync(), }, - ValidatorInfo: ctypes.ValidatorInfo{ - Address: env.PubKey.Address(), - PubKey: env.PubKey, - VotingPower: votingPower, - }, + ValidatorInfo: validatorInfo, } return result, nil } func validatorAtHeight(h uint64) *types.Validator { - vals, err := env.StateStore.LoadValidators(h) + valsWithH, err := env.StateStore.LoadValidators(h) if err != nil { return nil } + if env.PubKey == nil { + return nil + } privValAddress := env.PubKey.Address() - _, val := vals.GetByAddress(privValAddress) + + // If we're still at height h, search in the current validator set. + lastBlockHeight, vals := env.ConsensusState.GetValidators() + if lastBlockHeight == h { + for _, val := range vals { + if bytes.Equal(val.Address, privValAddress) { + return val + } + } + } + + _, val := valsWithH.GetByAddress(privValAddress) return val } diff --git a/state/state_test.go b/state/state_test.go index 03401186f..c00dae159 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -2,6 +2,7 @@ package state_test import ( "bytes" + "context" "fmt" "math" "math/big" @@ -363,7 +364,7 @@ func TestProposerFrequency(t *testing.T) { votePower := int64(tmrand.Int()%maxPower) + 1 totalVotePower += votePower privVal := types.NewMockPV() - pubKey, err := privVal.GetPubKey() + pubKey, err := privVal.GetPubKey(context.Background()) require.NoError(t, err) val := types.NewValidator(pubKey, votePower) val.ProposerPriority = tmrand.Int64() diff --git a/state/validation_test.go b/state/validation_test.go index dcebcd765..64b97632d 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -1,6 +1,7 @@ package state_test import ( + "context" "testing" "time" @@ -199,7 +200,7 @@ func TestValidateBlockCommit(t *testing.T) { ) require.NoError(t, err, "height %d", height) - bpvPubKey, err := badPrivVal.GetPubKey() + bpvPubKey, err := badPrivVal.GetPubKey(context.Background()) require.NoError(t, err) badVote := &types.Vote{ @@ -215,9 +216,9 @@ func TestValidateBlockCommit(t *testing.T) { g := goodVote.ToProto() b := badVote.ToProto() - err = badPrivVal.SignVote(chainID, g) + err = badPrivVal.SignVote(context.Background(), chainID, g) require.NoError(t, err, "height %d", height) - err = badPrivVal.SignVote(chainID, b) + err = badPrivVal.SignVote(context.Background(), chainID, b) require.NoError(t, err, "height %d", height) goodVote.Signature, badVote.Signature = g.Signature, b.Signature diff --git a/statesync/snapshots.go b/statesync/snapshots.go index 53ffed4e7..1dd3a6d34 100644 --- a/statesync/snapshots.go +++ b/statesync/snapshots.go @@ -33,9 +33,9 @@ type snapshot struct { func (s *snapshot) Key() snapshotKey { // Hash.Write() never returns an error. hasher := sha256.New() - hasher.Write([]byte(fmt.Sprintf("%v:%v:%v", s.Height, s.Format, s.Chunks))) //nolint:errcheck // ignore error - hasher.Write(s.Hash) //nolint:errcheck // ignore error - hasher.Write(s.Metadata) //nolint:errcheck // ignore error + hasher.Write([]byte(fmt.Sprintf("%v:%v:%v", s.Height, s.Format, s.Chunks))) + hasher.Write(s.Hash) + hasher.Write(s.Metadata) var key snapshotKey copy(key[:], hasher.Sum(nil)) return key diff --git a/test/app/test.sh b/test/app/test.sh index 710aae80b..d415bc10e 100755 --- a/test/app/test.sh +++ b/test/app/test.sh @@ -17,7 +17,7 @@ function kvstore_over_socket(){ echo "Starting kvstore_over_socket" abci-cli kvstore > /dev/null & pid_kvstore=$! - tendermint start > tendermint.log & + tendermint start --mode validator > tendermint.log & pid_tendermint=$! sleep 5 @@ -32,7 +32,7 @@ function kvstore_over_socket_reorder(){ rm -rf $TMHOME tendermint init echo "Starting kvstore_over_socket_reorder (ie. start tendermint first)" - tendermint start > tendermint.log & + tendermint start --mode validator > tendermint.log & pid_tendermint=$! sleep 2 abci-cli kvstore > /dev/null & @@ -52,7 +52,7 @@ function counter_over_socket() { echo "Starting counter_over_socket" abci-cli counter --serial > /dev/null & pid_counter=$! - tendermint start > tendermint.log & + tendermint start --mode validator > tendermint.log & pid_tendermint=$! sleep 5 @@ -68,7 +68,7 @@ function counter_over_grpc() { echo "Starting counter_over_grpc" abci-cli counter --serial --abci grpc > /dev/null & pid_counter=$! - tendermint start --abci grpc > tendermint.log & + tendermint start --mode validator --abci grpc > tendermint.log & pid_tendermint=$! sleep 5 @@ -86,7 +86,7 @@ function counter_over_grpc_grpc() { pid_counter=$! sleep 1 GRPC_PORT=36656 - tendermint start --abci grpc --rpc.grpc-laddr tcp://localhost:$GRPC_PORT > tendermint.log & + tendermint start --mode validator --abci grpc --rpc.grpc-laddr tcp://localhost:$GRPC_PORT > tendermint.log & pid_tendermint=$! sleep 5 diff --git a/test/e2e/app/main.go b/test/e2e/app/main.go index a465bb786..5141750ed 100644 --- a/test/e2e/app/main.go +++ b/test/e2e/app/main.go @@ -75,9 +75,12 @@ func run(configFile string) error { case "socket", "grpc": err = startApp(cfg) case "builtin": - if cfg.Mode == string(e2e.ModeLight) { - err = startLightClient(cfg) - } else { + switch cfg.Mode { + case string(e2e.ModeLight): + err = startLightNode(cfg) + case string(e2e.ModeSeed): + err = startSeedNode(cfg) + default: err = startNode(cfg) } // FIXME: Temporarily remove maverick until it is redesigned @@ -149,7 +152,25 @@ func startNode(cfg *Config) error { return n.Start() } -func startLightClient(cfg *Config) error { +func startSeedNode(cfg *Config) error { + tmcfg, nodeLogger, nodeKey, err := setupNode() + if err != nil { + return fmt.Errorf("failed to setup config: %w", err) + } + + n, err := node.NewSeedNode( + tmcfg, + *nodeKey, + node.DefaultGenesisDocProviderFunc(tmcfg), + nodeLogger, + ) + if err != nil { + return err + } + return n.Start() +} + +func startLightNode(cfg *Config) error { tmcfg, nodeLogger, _, err := setupNode() if err != nil { return err diff --git a/test/e2e/runner/rpc.go b/test/e2e/runner/rpc.go index 3f1602277..68eab6f97 100644 --- a/test/e2e/runner/rpc.go +++ b/test/e2e/runner/rpc.go @@ -67,6 +67,9 @@ func waitForHeight(testnet *e2e.Testnet, height uint64) (*types.Block, *types.Bl // waitForNode waits for a node to become available and catch up to the given block height. func waitForNode(node *e2e.Node, height uint64, timeout time.Duration) (*rpctypes.ResultStatus, error) { + if node.Mode == e2e.ModeSeed { + return nil, nil + } client, err := node.Client() if err != nil { return nil, err diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 49e2ea251..7653ae764 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -256,6 +256,9 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.P2P.AddrBookStrict = false cfg.DBBackend = node.Database cfg.StateSync.DiscoveryTime = 5 * time.Second + if node.Mode != e2e.ModeLight { + cfg.Mode = string(node.Mode) + } switch node.ABCIProtocol { case e2e.ProtocolUNIX: @@ -296,7 +299,6 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { return nil, fmt.Errorf("invalid privval protocol setting %q", node.PrivvalProtocol) } case e2e.ModeSeed: - cfg.P2P.SeedMode = true cfg.P2P.PexReactor = true case e2e.ModeFull, e2e.ModeLight: // Don't need to do anything, since we're using a dummy privval key by default. diff --git a/tools/tm-signer-harness/internal/test_harness.go b/tools/tm-signer-harness/internal/test_harness.go index b0940f3db..4d333949a 100644 --- a/tools/tm-signer-harness/internal/test_harness.go +++ b/tools/tm-signer-harness/internal/test_harness.go @@ -2,6 +2,7 @@ package internal import ( "bytes" + "context" "fmt" "net" "os" @@ -195,12 +196,12 @@ func (th *TestHarness) Run() { // local Tendermint version. func (th *TestHarness) TestPublicKey() error { th.logger.Info("TEST: Public key of remote signer") - fpvk, err := th.fpv.GetPubKey() + fpvk, err := th.fpv.GetPubKey(context.Background()) if err != nil { return err } th.logger.Info("Local", "pubKey", fpvk) - sck, err := th.signerClient.GetPubKey() + sck, err := th.signerClient.GetPubKey(context.Background()) if err != nil { return err } @@ -234,7 +235,7 @@ func (th *TestHarness) TestSignProposal() error { } p := prop.ToProto() propBytes := types.ProposalSignBytes(th.chainID, p) - if err := th.signerClient.SignProposal(th.chainID, p); err != nil { + if err := th.signerClient.SignProposal(context.Background(), th.chainID, p); err != nil { th.logger.Error("FAILED: Signing of proposal", "err", err) return newTestHarnessError(ErrTestSignProposalFailed, err, "") } @@ -245,7 +246,7 @@ func (th *TestHarness) TestSignProposal() error { th.logger.Error("FAILED: Signed proposal is invalid", "err", err) return newTestHarnessError(ErrTestSignProposalFailed, err, "") } - sck, err := th.signerClient.GetPubKey() + sck, err := th.signerClient.GetPubKey(context.Background()) if err != nil { return err } @@ -284,7 +285,7 @@ func (th *TestHarness) TestSignVote() error { v := vote.ToProto() voteBytes := types.VoteSignBytes(th.chainID, v) // sign the vote - if err := th.signerClient.SignVote(th.chainID, v); err != nil { + if err := th.signerClient.SignVote(context.Background(), th.chainID, v); err != nil { th.logger.Error("FAILED: Signing of vote", "err", err) return newTestHarnessError(ErrTestSignVoteFailed, err, fmt.Sprintf("voteType=%d", voteType)) } @@ -295,7 +296,7 @@ func (th *TestHarness) TestSignVote() error { th.logger.Error("FAILED: Signed vote is invalid", "err", err) return newTestHarnessError(ErrTestSignVoteFailed, err, fmt.Sprintf("voteType=%d", voteType)) } - sck, err := th.signerClient.GetPubKey() + sck, err := th.signerClient.GetPubKey(context.Background()) if err != nil { return err } diff --git a/types/block_test.go b/types/block_test.go index 21b78a415..240db8cde 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -3,6 +3,7 @@ package types import ( // it is ok to use math/rand here: we do not need a cryptographically secure random // number generator here and we can run the tests a bit faster + "context" "crypto/rand" "encoding/hex" "math" @@ -565,7 +566,7 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { vi := int32(0) for n := range tc.blockIDs { for i := 0; i < tc.numVotes[n]; i++ { - pubKey, err := vals[vi].GetPubKey() + pubKey, err := vals[vi].GetPubKey(context.Background()) require.NoError(t, err) vote := &Vote{ ValidatorAddress: pubKey.Address(), diff --git a/types/evidence.go b/types/evidence.go index bc5cdca63..08448a07b 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -2,6 +2,7 @@ package types import ( "bytes" + "context" "encoding/binary" "errors" "fmt" @@ -547,15 +548,15 @@ func NewMockDuplicateVoteEvidence(height uint64, time time.Time, chainID string) // assumes voting power to be 10 and validator to be the only one in the set func NewMockDuplicateVoteEvidenceWithValidator(height uint64, time time.Time, pv PrivValidator, chainID string) *DuplicateVoteEvidence { - pubKey, _ := pv.GetPubKey() + pubKey, _ := pv.GetPubKey(context.Background()) val := NewValidator(pubKey, 10) voteA := makeMockVote(height, 0, 0, pubKey.Address(), randBlockID(), time) vA := voteA.ToProto() - _ = pv.SignVote(chainID, vA) + _ = pv.SignVote(context.Background(), chainID, vA) voteA.Signature = vA.Signature voteB := makeMockVote(height, 0, 0, pubKey.Address(), randBlockID(), time) vB := voteB.ToProto() - _ = pv.SignVote(chainID, vB) + _ = pv.SignVote(context.Background(), chainID, vB) voteB.Signature = vB.Signature return NewDuplicateVoteEvidence(voteA, voteB, time, NewValidatorSet([]*Validator{val})) } diff --git a/types/evidence_test.go b/types/evidence_test.go index ed7d2edd5..3b33065dc 100644 --- a/types/evidence_test.go +++ b/types/evidence_test.go @@ -1,6 +1,7 @@ package types import ( + "context" "math" "testing" "time" @@ -219,7 +220,7 @@ func TestMockEvidenceValidateBasic(t *testing.T) { func makeVote( t *testing.T, val PrivValidator, chainID string, valIndex int32, height uint64, round int32, step int, blockID BlockID, time time.Time) *Vote { - pubKey, err := val.GetPubKey() + pubKey, err := val.GetPubKey(context.Background()) require.NoError(t, err) v := &Vote{ ValidatorAddress: pubKey.Address(), @@ -232,7 +233,7 @@ func makeVote( } vpb := v.ToProto() - err = val.SignVote(chainID, vpb) + err = val.SignVote(context.Background(), chainID, vpb) if err != nil { panic(err) } diff --git a/types/priv_validator.go b/types/priv_validator.go index 3ce02511a..f82da8991 100644 --- a/types/priv_validator.go +++ b/types/priv_validator.go @@ -2,6 +2,7 @@ package types import ( "bytes" + "context" "errors" "fmt" @@ -13,10 +14,10 @@ import ( // PrivValidator defines the functionality of a local Tendermint validator // that signs votes and proposals, and never double signs. type PrivValidator interface { - GetPubKey() (crypto.PubKey, error) + GetPubKey(context.Context) (crypto.PubKey, error) - SignVote(chainID string, vote *tmproto.Vote) error - SignProposal(chainID string, proposal *tmproto.Proposal) error + SignVote(ctx context.Context, chainID string, vote *tmproto.Vote) error + SignProposal(ctx context.Context, chainID string, proposal *tmproto.Proposal) error } type PrivValidatorsByAddress []PrivValidator @@ -26,11 +27,11 @@ func (pvs PrivValidatorsByAddress) Len() int { } func (pvs PrivValidatorsByAddress) Less(i, j int) bool { - pvi, err := pvs[i].GetPubKey() + pvi, err := pvs[i].GetPubKey(context.Background()) if err != nil { panic(err) } - pvj, err := pvs[j].GetPubKey() + pvj, err := pvs[j].GetPubKey(context.Background()) if err != nil { panic(err) } @@ -65,12 +66,12 @@ func NewMockPVWithParams(privKey crypto.PrivKey, breakProposalSigning, breakVote } // Implements PrivValidator. -func (pv MockPV) GetPubKey() (crypto.PubKey, error) { +func (pv MockPV) GetPubKey(ctx context.Context) (crypto.PubKey, error) { return pv.PrivKey.PubKey(), nil } // Implements PrivValidator. -func (pv MockPV) SignVote(chainID string, vote *tmproto.Vote) error { +func (pv MockPV) SignVote(ctx context.Context, chainID string, vote *tmproto.Vote) error { useChainID := chainID if pv.breakVoteSigning { useChainID = "incorrect-chain-id" @@ -86,7 +87,7 @@ func (pv MockPV) SignVote(chainID string, vote *tmproto.Vote) error { } // Implements PrivValidator. -func (pv MockPV) SignProposal(chainID string, proposal *tmproto.Proposal) error { +func (pv MockPV) SignProposal(ctx context.Context, chainID string, proposal *tmproto.Proposal) error { useChainID := chainID if pv.breakProposalSigning { useChainID = "incorrect-chain-id" @@ -102,7 +103,7 @@ func (pv MockPV) SignProposal(chainID string, proposal *tmproto.Proposal) error } func (pv MockPV) ExtractIntoValidator(votingPower int64) *Validator { - pubKey, _ := pv.GetPubKey() + pubKey, _ := pv.GetPubKey(context.Background()) return &Validator{ Address: pubKey.Address(), PubKey: pubKey, @@ -112,7 +113,7 @@ func (pv MockPV) ExtractIntoValidator(votingPower int64) *Validator { // String returns a string representation of the MockPV. func (pv MockPV) String() string { - mpv, _ := pv.GetPubKey() // mockPV will never return an error, ignored here + mpv, _ := pv.GetPubKey(context.Background()) // mockPV will never return an error, ignored here return fmt.Sprintf("MockPV{%v}", mpv.Address()) } @@ -129,17 +130,17 @@ type ErroringMockPV struct { var ErroringMockPVErr = errors.New("erroringMockPV always returns an error") // Implements PrivValidator. -func (pv *ErroringMockPV) GetPubKey() (crypto.PubKey, error) { +func (pv *ErroringMockPV) GetPubKey(ctx context.Context) (crypto.PubKey, error) { return nil, ErroringMockPVErr } // Implements PrivValidator. -func (pv *ErroringMockPV) SignVote(chainID string, vote *tmproto.Vote) error { +func (pv *ErroringMockPV) SignVote(ctx context.Context, chainID string, vote *tmproto.Vote) error { return ErroringMockPVErr } // Implements PrivValidator. -func (pv *ErroringMockPV) SignProposal(chainID string, proposal *tmproto.Proposal) error { +func (pv *ErroringMockPV) SignProposal(ctx context.Context, chainID string, proposal *tmproto.Proposal) error { return ErroringMockPVErr } diff --git a/types/proposal_test.go b/types/proposal_test.go index 06b278afd..889060cc4 100644 --- a/types/proposal_test.go +++ b/types/proposal_test.go @@ -1,6 +1,7 @@ package types import ( + "context" "math" "testing" "time" @@ -56,7 +57,7 @@ func TestProposalString(t *testing.T) { func TestProposalVerifySignature(t *testing.T) { privVal := NewMockPV() - pubKey, err := privVal.GetPubKey() + pubKey, err := privVal.GetPubKey(context.Background()) require.NoError(t, err) prop := NewProposal( @@ -66,7 +67,7 @@ func TestProposalVerifySignature(t *testing.T) { signBytes := ProposalSignBytes("test_chain_id", p) // sign it - err = privVal.SignProposal("test_chain_id", p) + err = privVal.SignProposal(context.Background(), "test_chain_id", p) require.NoError(t, err) prop.Signature = p.Signature @@ -103,7 +104,7 @@ func BenchmarkProposalWriteSignBytes(b *testing.B) { func BenchmarkProposalSign(b *testing.B) { privVal := NewMockPV() for i := 0; i < b.N; i++ { - err := privVal.SignProposal("test_chain_id", pbp) + err := privVal.SignProposal(context.Background(), "test_chain_id", pbp) if err != nil { b.Error(err) } @@ -112,9 +113,9 @@ func BenchmarkProposalSign(b *testing.B) { func BenchmarkProposalVerifySignature(b *testing.B) { privVal := NewMockPV() - err := privVal.SignProposal("test_chain_id", pbp) + err := privVal.SignProposal(context.Background(), "test_chain_id", pbp) require.NoError(b, err) - pubKey, err := privVal.GetPubKey() + pubKey, err := privVal.GetPubKey(context.Background()) require.NoError(b, err) for i := 0; i < b.N; i++ { @@ -153,7 +154,7 @@ func TestProposalValidateBasic(t *testing.T) { 4, 2, 2, blockID) p := prop.ToProto() - err := privVal.SignProposal("test_chain_id", p) + err := privVal.SignProposal(context.Background(), "test_chain_id", p) prop.Signature = p.Signature require.NoError(t, err) tc.malleateProposal(prop) diff --git a/types/test_util.go b/types/test_util.go index a32cd707b..eefcbd000 100644 --- a/types/test_util.go +++ b/types/test_util.go @@ -1,6 +1,7 @@ package types import ( + "context" "fmt" "time" @@ -12,7 +13,7 @@ func MakeCommit(blockID BlockID, height uint64, round int32, // all sign for i := 0; i < len(validators); i++ { - pubKey, err := validators[i].GetPubKey() + pubKey, err := validators[i].GetPubKey(context.Background()) if err != nil { return nil, fmt.Errorf("can't get pubkey: %w", err) } @@ -37,7 +38,7 @@ func MakeCommit(blockID BlockID, height uint64, round int32, func signAddVote(privVal PrivValidator, vote *Vote, voteSet *VoteSet) (signed bool, err error) { v := vote.ToProto() - err = privVal.SignVote(voteSet.ChainID(), v) + err = privVal.SignVote(context.Background(), voteSet.ChainID(), v) if err != nil { return false, err } @@ -53,7 +54,7 @@ func MakeVote( chainID string, now time.Time, ) (*Vote, error) { - pubKey, err := privVal.GetPubKey() + pubKey, err := privVal.GetPubKey(context.Background()) if err != nil { return nil, fmt.Errorf("can't get pubkey: %w", err) } @@ -70,7 +71,7 @@ func MakeVote( } v := vote.ToProto() - if err := privVal.SignVote(chainID, v); err != nil { + if err := privVal.SignVote(context.Background(), chainID, v); err != nil { return nil, err } diff --git a/types/validator.go b/types/validator.go index 961b833e4..0f4f58146 100644 --- a/types/validator.go +++ b/types/validator.go @@ -2,6 +2,7 @@ package types import ( "bytes" + "context" "errors" "fmt" "strings" @@ -184,7 +185,7 @@ func RandValidator(randPower bool, minPower int64) (*Validator, PrivValidator) { if randPower { votePower += int64(tmrand.Uint32()) } - pubKey, err := privVal.GetPubKey() + pubKey, err := privVal.GetPubKey(context.Background()) if err != nil { panic(fmt.Errorf("could not retrieve pubkey %w", err)) } diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 44314296a..09ff06a65 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -2,6 +2,7 @@ package types import ( "bytes" + "context" "fmt" "math" "sort" @@ -755,7 +756,7 @@ func TestValidatorSet_VerifyCommit_CheckAllSignatures(t *testing.T) { // malleate 4th signature vote := voteSet.GetByIndex(3) v := vote.ToProto() - err = vals[3].SignVote("CentaurusA", v) + err = vals[3].SignVote(context.Background(), "CentaurusA", v) require.NoError(t, err) vote.Signature = v.Signature commit.Signatures[3] = vote.CommitSig() @@ -780,7 +781,7 @@ func TestValidatorSet_VerifyCommitLight_ReturnsAsSoonAsMajorityOfVotingPowerSign // malleate 4th signature (3 signatures are enough for 2/3+) vote := voteSet.GetByIndex(3) v := vote.ToProto() - err = vals[3].SignVote("CentaurusA", v) + err = vals[3].SignVote(context.Background(), "CentaurusA", v) require.NoError(t, err) vote.Signature = v.Signature commit.Signatures[3] = vote.CommitSig() @@ -803,7 +804,7 @@ func TestValidatorSet_VerifyCommitLightTrusting_ReturnsAsSoonAsTrustLevelOfVotin // malleate 3rd signature (2 signatures are enough for 1/3+ trust level) vote := voteSet.GetByIndex(2) v := vote.ToProto() - err = vals[2].SignVote("CentaurusA", v) + err = vals[2].SignVote(context.Background(), "CentaurusA", v) require.NoError(t, err) vote.Signature = v.Signature commit.Signatures[2] = vote.CommitSig() diff --git a/types/validator_test.go b/types/validator_test.go index 5eb2ed7bf..8f9ff9e04 100644 --- a/types/validator_test.go +++ b/types/validator_test.go @@ -1,6 +1,7 @@ package types import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -40,7 +41,7 @@ func TestValidatorProtoBuf(t *testing.T) { func TestValidatorValidateBasic(t *testing.T) { priv := NewMockPV() - pubKey, _ := priv.GetPubKey() + pubKey, _ := priv.GetPubKey(context.Background()) testCases := []struct { val *Validator err bool diff --git a/types/vote_set_test.go b/types/vote_set_test.go index f8c980246..3d892886a 100644 --- a/types/vote_set_test.go +++ b/types/vote_set_test.go @@ -2,6 +2,7 @@ package types import ( "bytes" + "context" "testing" "github.com/stretchr/testify/assert" @@ -18,7 +19,7 @@ func TestVoteSet_AddVote_Good(t *testing.T) { voteSet, _, privValidators := randVoteSet(height, round, tmproto.PrevoteType, 10, 1) val0 := privValidators[0] - val0p, err := val0.GetPubKey() + val0p, err := val0.GetPubKey(context.Background()) require.NoError(t, err) val0Addr := val0p.Address() @@ -61,7 +62,7 @@ func TestVoteSet_AddVote_Bad(t *testing.T) { // val0 votes for nil. { - pubKey, err := privValidators[0].GetPubKey() + pubKey, err := privValidators[0].GetPubKey(context.Background()) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 0) @@ -73,7 +74,7 @@ func TestVoteSet_AddVote_Bad(t *testing.T) { // val0 votes again for some block. { - pubKey, err := privValidators[0].GetPubKey() + pubKey, err := privValidators[0].GetPubKey(context.Background()) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 0) @@ -85,7 +86,7 @@ func TestVoteSet_AddVote_Bad(t *testing.T) { // val1 votes on another height { - pubKey, err := privValidators[1].GetPubKey() + pubKey, err := privValidators[1].GetPubKey(context.Background()) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 1) @@ -97,7 +98,7 @@ func TestVoteSet_AddVote_Bad(t *testing.T) { // val2 votes on another round { - pubKey, err := privValidators[2].GetPubKey() + pubKey, err := privValidators[2].GetPubKey(context.Background()) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 2) @@ -109,7 +110,7 @@ func TestVoteSet_AddVote_Bad(t *testing.T) { // val3 votes of another type. { - pubKey, err := privValidators[3].GetPubKey() + pubKey, err := privValidators[3].GetPubKey(context.Background()) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 3) @@ -135,7 +136,7 @@ func TestVoteSet_2_3Majority(t *testing.T) { } // 6 out of 10 voted for nil. for i := int32(0); i < 6; i++ { - pubKey, err := privValidators[i].GetPubKey() + pubKey, err := privValidators[i].GetPubKey(context.Background()) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, i) @@ -147,7 +148,7 @@ func TestVoteSet_2_3Majority(t *testing.T) { // 7th validator voted for some blockhash { - pubKey, err := privValidators[6].GetPubKey() + pubKey, err := privValidators[6].GetPubKey(context.Background()) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 6) @@ -159,7 +160,7 @@ func TestVoteSet_2_3Majority(t *testing.T) { // 8th validator voted for nil. { - pubKey, err := privValidators[7].GetPubKey() + pubKey, err := privValidators[7].GetPubKey(context.Background()) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 7) @@ -190,7 +191,7 @@ func TestVoteSet_2_3MajorityRedux(t *testing.T) { // 66 out of 100 voted for nil. for i := int32(0); i < 66; i++ { - pubKey, err := privValidators[i].GetPubKey() + pubKey, err := privValidators[i].GetPubKey(context.Background()) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, i) @@ -203,7 +204,7 @@ func TestVoteSet_2_3MajorityRedux(t *testing.T) { // 67th validator voted for nil { - pubKey, err := privValidators[66].GetPubKey() + pubKey, err := privValidators[66].GetPubKey(context.Background()) require.NoError(t, err) adrr := pubKey.Address() vote := withValidator(voteProto, adrr, 66) @@ -216,7 +217,7 @@ func TestVoteSet_2_3MajorityRedux(t *testing.T) { // 68th validator voted for a different BlockParts PartSetHeader { - pubKey, err := privValidators[67].GetPubKey() + pubKey, err := privValidators[67].GetPubKey(context.Background()) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 67) @@ -230,7 +231,7 @@ func TestVoteSet_2_3MajorityRedux(t *testing.T) { // 69th validator voted for different BlockParts Total { - pubKey, err := privValidators[68].GetPubKey() + pubKey, err := privValidators[68].GetPubKey(context.Background()) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 68) @@ -244,7 +245,7 @@ func TestVoteSet_2_3MajorityRedux(t *testing.T) { // 70th validator voted for different BlockHash { - pubKey, err := privValidators[69].GetPubKey() + pubKey, err := privValidators[69].GetPubKey(context.Background()) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 69) @@ -257,7 +258,7 @@ func TestVoteSet_2_3MajorityRedux(t *testing.T) { // 71st validator voted for the right BlockHash & BlockPartSetHeader { - pubKey, err := privValidators[70].GetPubKey() + pubKey, err := privValidators[70].GetPubKey(context.Background()) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 70) @@ -285,7 +286,7 @@ func TestVoteSet_Conflicts(t *testing.T) { BlockID: BlockID{nil, PartSetHeader{}}, } - val0, err := privValidators[0].GetPubKey() + val0, err := privValidators[0].GetPubKey(context.Background()) require.NoError(t, err) val0Addr := val0.Address() @@ -332,7 +333,7 @@ func TestVoteSet_Conflicts(t *testing.T) { // val1 votes for blockHash1. { - pv, err := privValidators[1].GetPubKey() + pv, err := privValidators[1].GetPubKey(context.Background()) assert.NoError(t, err) addr := pv.Address() vote := withValidator(voteProto, addr, 1) @@ -352,7 +353,7 @@ func TestVoteSet_Conflicts(t *testing.T) { // val2 votes for blockHash2. { - pv, err := privValidators[2].GetPubKey() + pv, err := privValidators[2].GetPubKey(context.Background()) assert.NoError(t, err) addr := pv.Address() vote := withValidator(voteProto, addr, 2) @@ -376,7 +377,7 @@ func TestVoteSet_Conflicts(t *testing.T) { // val2 votes for blockHash1. { - pv, err := privValidators[2].GetPubKey() + pv, err := privValidators[2].GetPubKey(context.Background()) assert.NoError(t, err) addr := pv.Address() vote := withValidator(voteProto, addr, 2) @@ -415,7 +416,7 @@ func TestVoteSet_MakeCommit(t *testing.T) { // 6 out of 10 voted for some block. for i := int32(0); i < 6; i++ { - pv, err := privValidators[i].GetPubKey() + pv, err := privValidators[i].GetPubKey(context.Background()) assert.NoError(t, err) addr := pv.Address() vote := withValidator(voteProto, addr, i) @@ -430,7 +431,7 @@ func TestVoteSet_MakeCommit(t *testing.T) { // 7th voted for some other block. { - pv, err := privValidators[6].GetPubKey() + pv, err := privValidators[6].GetPubKey(context.Background()) assert.NoError(t, err) addr := pv.Address() vote := withValidator(voteProto, addr, 6) @@ -443,7 +444,7 @@ func TestVoteSet_MakeCommit(t *testing.T) { // The 8th voted like everyone else. { - pv, err := privValidators[7].GetPubKey() + pv, err := privValidators[7].GetPubKey(context.Background()) assert.NoError(t, err) addr := pv.Address() vote := withValidator(voteProto, addr, 7) @@ -453,7 +454,7 @@ func TestVoteSet_MakeCommit(t *testing.T) { // The 9th voted for nil. { - pv, err := privValidators[8].GetPubKey() + pv, err := privValidators[8].GetPubKey(context.Background()) assert.NoError(t, err) addr := pv.Address() vote := withValidator(voteProto, addr, 8) diff --git a/types/vote_test.go b/types/vote_test.go index ce766a28e..49645c6d6 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -1,6 +1,7 @@ package types import ( + "context" "testing" "time" @@ -148,7 +149,7 @@ func TestVoteProposalNotEq(t *testing.T) { func TestVoteVerifySignature(t *testing.T) { privVal := NewMockPV() - pubkey, err := privVal.GetPubKey() + pubkey, err := privVal.GetPubKey(context.Background()) require.NoError(t, err) vote := examplePrecommit() @@ -156,7 +157,7 @@ func TestVoteVerifySignature(t *testing.T) { signBytes := VoteSignBytes("test_chain_id", v) // sign it - err = privVal.SignVote("test_chain_id", v) + err = privVal.SignVote(context.Background(), "test_chain_id", v) require.NoError(t, err) // verify the same vote @@ -200,7 +201,7 @@ func TestIsVoteTypeValid(t *testing.T) { func TestVoteVerify(t *testing.T) { privVal := NewMockPV() - pubkey, err := privVal.GetPubKey() + pubkey, err := privVal.GetPubKey(context.Background()) require.NoError(t, err) vote := examplePrevote() @@ -254,7 +255,7 @@ func TestVoteValidateBasic(t *testing.T) { t.Run(tc.testName, func(t *testing.T) { vote := examplePrecommit() v := vote.ToProto() - err := privVal.SignVote("test_chain_id", v) + err := privVal.SignVote(context.Background(), "test_chain_id", v) vote.Signature = v.Signature require.NoError(t, err) tc.malleateVote(vote) @@ -267,7 +268,7 @@ func TestVoteProtobuf(t *testing.T) { privVal := NewMockPV() vote := examplePrecommit() v := vote.ToProto() - err := privVal.SignVote("test_chain_id", v) + err := privVal.SignVote(context.Background(), "test_chain_id", v) vote.Signature = v.Signature require.NoError(t, err)