From fbd754b4ded5612b5031d09c275c276221cee398 Mon Sep 17 00:00:00 2001 From: samricotta <37125168+samricotta@users.noreply.github.com> Date: Thu, 11 Aug 2022 10:41:41 +0200 Subject: [PATCH] Backport of sam/abci-responses (#9090) (#9159) *backport of sam/abci-responses Co-authored-by: William Banfield <4561443+williambanfield@users.noreply.github.com> --- .golangci.yml | 2 - CHANGELOG_PENDING.md | 2 + abci/server/server.go | 5 +- behaviour/doc.go | 31 ++- blockchain/v0/pool.go | 1 + blockchain/v0/reactor_test.go | 8 +- blockchain/v1/reactor.go | 4 +- blockchain/v1/reactor_test.go | 8 +- blockchain/v2/reactor_test.go | 12 +- cmd/tendermint/commands/reindex_event.go | 3 + cmd/tendermint/commands/rollback.go | 4 +- config/config.go | 18 +- consensus/byzantine_test.go | 8 +- consensus/common_test.go | 12 +- consensus/mempool_test.go | 4 +- consensus/reactor_test.go | 4 +- consensus/replay.go | 2 +- consensus/replay_file.go | 4 +- consensus/replay_test.go | 26 +- consensus/state.go | 8 +- consensus/wal_generator.go | 4 +- crypto/merkle/doc.go | 25 +- crypto/merkle/tree.go | 8 +- crypto/secp256k1/secp256k1.go | 6 +- evidence/doc.go | 7 +- evidence/mocks/block_store.go | 4 +- evidence/pool.go | 10 +- evidence/pool_test.go | 4 +- evidence/verify.go | 19 +- go.sum | 1 + libs/cli/flags/log_level.go | 3 +- libs/clist/clist.go | 2 - libs/clist/clist_test.go | 2 + libs/flowrate/flowrate.go | 8 +- libs/json/doc.go | 15 +- libs/log/filter.go | 21 +- libs/log/logger.go | 6 +- libs/log/tmfmt_logger_test.go | 2 +- libs/pubsub/pubsub.go | 37 ++- libs/pubsub/query/query.go | 2 +- libs/pubsub/subscription.go | 3 +- light/client.go | 31 +-- light/detector.go | 20 +- light/doc.go | 46 ++-- light/rpc/client.go | 1 + light/rpc/mocks/light_client.go | 4 +- light/verifier.go | 36 +-- mempool/v0/clist_mempool.go | 10 +- node/doc.go | 45 ++-- node/node.go | 27 +- node/node_test.go | 12 +- p2p/conn/conn_notgo110.go | 8 +- p2p/conn/connection.go | 1 + p2p/mocks/peer.go | 4 +- p2p/pex/known_address.go | 17 +- p2p/pex/pex_reactor_test.go | 8 +- p2p/switch.go | 4 +- p2p/transport_test.go | 24 +- p2p/trust/metric_test.go | 1 + privval/doc.go | 10 +- proto/tendermint/abci/types.proto | 10 +- proto/tendermint/consensus/types.pb.go | 2 +- proto/tendermint/consensus/types.proto | 2 +- proto/tendermint/state/types.pb.go | 329 +++++++++++++++++++---- proto/tendermint/state/types.proto | 5 + proto/tendermint/types/evidence.proto | 18 +- proto/tendermint/types/types.proto | 8 +- proxy/mocks/app_conn_consensus.go | 4 +- proxy/mocks/app_conn_mempool.go | 4 +- proxy/mocks/app_conn_query.go | 4 +- proxy/mocks/app_conn_snapshot.go | 4 +- proxy/mocks/client_creator.go | 4 +- rpc/client/http/http.go | 30 +-- rpc/client/mock/client.go | 1 - rpc/core/blocks_test.go | 4 +- rpc/core/env.go | 2 +- rpc/jsonrpc/doc.go | 65 +++-- rpc/jsonrpc/server/http_json_handler.go | 5 +- rpc/jsonrpc/server/http_server.go | 18 +- rpc/jsonrpc/types/types.go | 23 +- state/errors.go | 7 +- state/execution_test.go | 20 +- state/export_test.go | 2 +- state/helpers_test.go | 4 +- state/indexer/mocks/block_indexer.go | 4 +- state/mocks/block_store.go | 4 +- state/mocks/evidence_pool.go | 4 +- state/mocks/store.go | 27 +- state/rollback_test.go | 7 +- state/state.go | 2 +- state/state_test.go | 36 ++- state/store.go | 98 +++++-- state/store_test.go | 81 +++++- state/tx_filter_test.go | 4 +- state/txindex/mocks/tx_indexer.go | 4 +- state/validation_test.go | 12 +- statesync/mocks/state_provider.go | 4 +- store/store.go | 13 +- store/store_test.go | 12 +- test/e2e/app/state.go | 2 +- test/e2e/generator/main.go | 2 +- test/e2e/node/config.go | 2 +- test/e2e/pkg/testnet.go | 2 +- test/e2e/runner/exec.go | 2 +- test/maverick/consensus/replay_file.go | 4 +- test/maverick/consensus/state.go | 8 +- test/maverick/consensus/wal_generator.go | 4 +- test/maverick/node/node.go | 28 +- types/block_test.go | 2 +- types/event_bus.go | 2 +- types/utils.go | 6 +- types/validator_set.go | 31 ++- types/validator_set_test.go | 5 +- types/vote_set.go | 62 ++--- 114 files changed, 1132 insertions(+), 581 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 9d846393d..c98f3c01f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -60,8 +60,6 @@ linters-settings: # check-shadowing: true revive: min-confidence: 0 - maligned: - suggest-new: true misspell: locale: US ignore-words: diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 0d381e7d0..ef7e03c60 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -25,6 +25,8 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi ### IMPROVEMENTS +- [config] \#9054 Flag added to overwrite abciresponses. + ### BUG FIXES - [#9103] fix unsafe-reset-all for working with home path (@rootwarp) diff --git a/abci/server/server.go b/abci/server/server.go index 6dd13ad02..4b70545b2 100644 --- a/abci/server/server.go +++ b/abci/server/server.go @@ -2,9 +2,8 @@ Package server is used to start a new ABCI server. It contains two server implementation: - * gRPC server - * socket server - + - gRPC server + - socket server */ package server diff --git a/behaviour/doc.go b/behaviour/doc.go index 40061e095..7b00ae1eb 100644 --- a/behaviour/doc.go +++ b/behaviour/doc.go @@ -8,35 +8,34 @@ There are four different behaviours a reactor can report. 1. bad message -type badMessage struct { - explanation string -} + type badMessage struct { + explanation string + } -This message will request the peer be stopped for an error +# This message will request the peer be stopped for an error 2. message out of order -type messageOutOfOrder struct { - explanation string -} + type messageOutOfOrder struct { + explanation string + } -This message will request the peer be stopped for an error +# This message will request the peer be stopped for an error 3. consesnsus Vote -type consensusVote struct { - explanation string -} + type consensusVote struct { + explanation string + } -This message will request the peer be marked as good +# This message will request the peer be marked as good 4. block part -type blockPart struct { - explanation string -} + type blockPart struct { + explanation string + } This message will request the peer be marked as good - */ package behaviour diff --git a/blockchain/v0/pool.go b/blockchain/v0/pool.go index 69e0b55c4..1328bfa27 100644 --- a/blockchain/v0/pool.go +++ b/blockchain/v0/pool.go @@ -410,6 +410,7 @@ func (pool *BlockPool) sendError(err error, peerID p2p.ID) { } // for debugging purposes +// //nolint:unused func (pool *BlockPool) debug() string { pool.mtx.Lock() diff --git a/blockchain/v0/reactor_test.go b/blockchain/v0/reactor_test.go index a88b499f4..dd23d015f 100644 --- a/blockchain/v0/reactor_test.go +++ b/blockchain/v0/reactor_test.go @@ -70,7 +70,9 @@ func newBlockchainReactor( blockDB := dbm.NewMemDB() stateDB := dbm.NewMemDB() - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) blockStore := store.NewBlockStore(blockDB) state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) @@ -83,7 +85,9 @@ func newBlockchainReactor( // pool.height is determined from the store. fastSync := true db := dbm.NewMemDB() - stateStore = sm.NewStore(db) + stateStore = sm.NewStore(db, sm.StoreOptions{ + DiscardABCIResponses: false, + }) blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}) if err = stateStore.Save(state); err != nil { diff --git a/blockchain/v1/reactor.go b/blockchain/v1/reactor.go index c4c61ec51..ea815afa3 100644 --- a/blockchain/v1/reactor.go +++ b/blockchain/v1/reactor.go @@ -534,8 +534,8 @@ func (bcR *BlockchainReactor) switchToConsensus() { // Called by FSM and pool: // - pool calls when it detects slow peer or when peer times out // - FSM calls when: -// - adding a block (addBlock) fails -// - reactor processing of a block reports failure and FSM sends back the peers of first and second blocks +// - adding a block (addBlock) fails +// - reactor processing of a block reports failure and FSM sends back the peers of first and second blocks func (bcR *BlockchainReactor) sendPeerError(err error, peerID p2p.ID) { bcR.Logger.Info("sendPeerError:", "peer", peerID, "error", err) msgData := bcFsmMessage{ diff --git a/blockchain/v1/reactor_test.go b/blockchain/v1/reactor_test.go index c0f371905..32b170913 100644 --- a/blockchain/v1/reactor_test.go +++ b/blockchain/v1/reactor_test.go @@ -102,7 +102,9 @@ func newBlockchainReactor( blockDB := dbm.NewMemDB() stateDB := dbm.NewMemDB() - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) blockStore := store.NewBlockStore(blockDB) state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) @@ -115,7 +117,9 @@ func newBlockchainReactor( // pool.height is determined from the store. fastSync := true db := dbm.NewMemDB() - stateStore = sm.NewStore(db) + stateStore = sm.NewStore(db, sm.StoreOptions{ + DiscardABCIResponses: false, + }) blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}) if err = stateStore.Save(state); err != nil { diff --git a/blockchain/v2/reactor_test.go b/blockchain/v2/reactor_test.go index c2792d58b..9ee05aeaa 100644 --- a/blockchain/v2/reactor_test.go +++ b/blockchain/v2/reactor_test.go @@ -159,7 +159,9 @@ func newTestReactor(p testReactorParams) *BlockchainReactor { panic(fmt.Errorf("error start app: %w", err)) } db := dbm.NewMemDB() - stateStore := sm.NewStore(db) + stateStore := sm.NewStore(db, sm.StoreOptions{ + DiscardABCIResponses: false, + }) appl = sm.NewBlockExecutor(stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}) if err = stateStore.Save(state); err != nil { panic(err) @@ -504,14 +506,18 @@ func newReactorStore( stateDB := dbm.NewMemDB() blockStore := store.NewBlockStore(dbm.NewMemDB()) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) if err != nil { panic(fmt.Errorf("error constructing state from genesis file: %w", err)) } db := dbm.NewMemDB() - stateStore = sm.NewStore(db) + stateStore = sm.NewStore(db, sm.StoreOptions{ + DiscardABCIResponses: false}, + ) blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}) if err = stateStore.Save(state); err != nil { diff --git a/cmd/tendermint/commands/reindex_event.go b/cmd/tendermint/commands/reindex_event.go index 32cb709e7..e581cff24 100644 --- a/cmd/tendermint/commands/reindex_event.go +++ b/cmd/tendermint/commands/reindex_event.go @@ -40,6 +40,9 @@ replace the backend. The default start-height is 0, meaning the tooling will sta reindex from the base block height(inclusive); and the default end-height is 0, meaning the tooling will reindex until the latest block height(inclusive). User can omit either or both arguments. + +Note: This operation requires ABCIResponses. Do not set DiscardABCIResponses to true if you +want to use this command. `, Example: ` tendermint reindex-event diff --git a/cmd/tendermint/commands/rollback.go b/cmd/tendermint/commands/rollback.go index 912e1b389..1270c0ac6 100644 --- a/cmd/tendermint/commands/rollback.go +++ b/cmd/tendermint/commands/rollback.go @@ -77,7 +77,9 @@ func loadStateAndBlockStore(config *cfg.Config) (*store.BlockStore, state.Store, if err != nil { return nil, nil, err } - stateStore := state.NewStore(stateDB) + stateStore := state.NewStore(stateDB, state.StoreOptions{ + DiscardABCIResponses: config.RPC.DiscardABCIResponses, + }) return blockStore, stateStore, nil } diff --git a/config/config.go b/config/config.go index d052b40ae..d42ae2ba7 100644 --- a/config/config.go +++ b/config/config.go @@ -405,6 +405,11 @@ type RPCConfig struct { // pprof listen address (https://golang.org/pkg/net/http/pprof) PprofListenAddress string `mapstructure:"pprof_laddr"` + + // Set false to ensure ABCI responses are persisted. + // ABCI responses are required for /BlockResults RPC queries, and + // to reindex events in the command-line tool. + DiscardABCIResponses bool `mapstructure:"discard_abci_responses"` } // DefaultRPCConfig returns a default configuration for the RPC server @@ -429,8 +434,9 @@ func DefaultRPCConfig() *RPCConfig { MaxBodyBytes: int64(1000000), // 1MB MaxHeaderBytes: 1 << 20, // same as the net/http default - TLSCertFile: "", - TLSKeyFile: "", + TLSCertFile: "", + TLSKeyFile: "", + DiscardABCIResponses: false, } } @@ -1070,12 +1076,14 @@ func (cfg *ConsensusConfig) ValidateBasic() error { return nil } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // TxIndexConfig // Remember that Event has the following structure: // type: [ -// key: value, -// ... +// +// key: value, +// ... +// // ] // // CompositeKeys are constructed by `type.key` diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index dd5febcbd..0dd5e43ca 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -50,7 +50,9 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { for i := 0; i < nValidators; i++ { logger := consensusLogger().With("test", "byzantine", "validator", i) stateDB := dbm.NewMemDB() // each state needs its own db - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) defer os.RemoveAll(thisConfig.RootDir) @@ -446,8 +448,8 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) { case <-done: case <-tick.C: for i, reactor := range reactors { - t.Log(fmt.Sprintf("Consensus Reactor %v", i)) - t.Log(fmt.Sprintf("%v", reactor)) + t.Logf(fmt.Sprintf("Consensus Reactor %v", i)) + t.Logf(fmt.Sprintf("%v", reactor)) } t.Fatalf("Timed out waiting for all validators to commit first block") } diff --git a/consensus/common_test.go b/consensus/common_test.go index 09512f49d..28f5a1266 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -428,7 +428,9 @@ func newStateWithConfigAndBlockStore( // Make State stateDB := blockDB - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) if err := stateStore.Save(state); err != nil { // for save height 1's validators info panic(err) } @@ -718,7 +720,9 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou configRootDirs := make([]string, 0, nValidators) for i := 0; i < nValidators; i++ { stateDB := dbm.NewMemDB() // each state needs its own db - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) configRootDirs = append(configRootDirs, thisConfig.RootDir) @@ -756,7 +760,9 @@ func randConsensusNetWithPeers( configRootDirs := make([]string, 0, nPeers) for i := 0; i < nPeers; i++ { stateDB := dbm.NewMemDB() // each state needs its own db - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) configRootDirs = append(configRootDirs, thisConfig.RootDir) diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go index db9662acb..73b150093 100644 --- a/consensus/mempool_test.go +++ b/consensus/mempool_test.go @@ -113,7 +113,7 @@ func deliverTxsRange(cs *State, start, end int) { func TestMempoolTxConcurrentWithCommit(t *testing.T) { state, privVals := randGenesisState(1, false, 10) blockDB := dbm.NewMemDB() - stateStore := sm.NewStore(blockDB) + stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false}) cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockDB) err := stateStore.Save(state) require.NoError(t, err) @@ -138,7 +138,7 @@ func TestMempoolRmBadTx(t *testing.T) { state, privVals := randGenesisState(1, false, 10) app := NewCounterApplication() blockDB := dbm.NewMemDB() - stateStore := sm.NewStore(blockDB) + stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false}) cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB) err := stateStore.Save(state) require.NoError(t, err) diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index 5d68cd9b7..3e7abc9ac 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -138,7 +138,9 @@ func TestReactorWithEvidence(t *testing.T) { logger := consensusLogger() for i := 0; i < nValidators; i++ { stateDB := dbm.NewMemDB() // each state needs its own db - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) defer os.RemoveAll(thisConfig.RootDir) diff --git a/consensus/replay.go b/consensus/replay.go index 9fd59a40e..bed2a2c4d 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -418,7 +418,7 @@ func (h *Handshaker) ReplayBlocks( case appBlockHeight == storeBlockHeight: // We ran Commit, but didn't save the state, so replayBlock with mock app. - abciResponses, err := h.stateStore.LoadABCIResponses(storeBlockHeight) + abciResponses, err := h.stateStore.LoadLastABCIResponse(storeBlockHeight) if err != nil { return nil, err } diff --git a/consensus/replay_file.go b/consensus/replay_file.go index 4bf7466ab..0145dfe92 100644 --- a/consensus/replay_file.go +++ b/consensus/replay_file.go @@ -297,7 +297,9 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo if err != nil { tmos.Exit(err.Error()) } - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile()) if err != nil { tmos.Exit(err.Error()) diff --git a/consensus/replay_test.go b/consensus/replay_test.go index 2970f15ed..d92bb3959 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -159,7 +159,9 @@ LOOP: logger := log.NewNopLogger() blockDB := dbm.NewMemDB() stateDB := blockDB - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) require.NoError(t, err) privValidator := loadPrivValidator(consensusReplayConfig) @@ -290,7 +292,7 @@ func (w *crashingWAL) Start() error { return w.next.Start() } func (w *crashingWAL) Stop() error { return w.next.Stop() } func (w *crashingWAL) Wait() { w.next.Wait() } -//------------------------------------------------------------------------------------------ +// ------------------------------------------------------------------------------------------ type testSim struct { GenesisState sm.State Config *cfg.Config @@ -693,7 +695,9 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin stateDB, genesisState, store = stateAndStore(config, pubKey, kvstore.ProtocolVersion) } - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) store.chain = chain store.commits = commits @@ -712,7 +716,9 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin // use a throwaway tendermint state proxyApp := proxy.NewAppConns(clientCreator2) stateDB1 := dbm.NewMemDB() - stateStore := sm.NewStore(stateDB1) + stateStore := sm.NewStore(stateDB1, sm.StoreOptions{ + DiscardABCIResponses: false, + }) err := stateStore.Save(genesisState) require.NoError(t, err) buildAppStateFromChain(proxyApp, stateStore, genesisState, chain, nBlocks, mode) @@ -891,7 +897,9 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { pubKey, err := privVal.GetPubKey() require.NoError(t, err) stateDB, state, store := stateAndStore(config, pubKey, appVersion) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) state.LastValidators = state.Validators.Copy() // mode = 0 for committing all the blocks @@ -1148,7 +1156,9 @@ func stateAndStore( pubKey crypto.PubKey, appVersion uint64) (dbm.DB, sm.State, *mockBlockStore) { stateDB := dbm.NewMemDB() - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile()) state.Version.Consensus.App = appVersion store := newMockBlockStore(config, state.ConsensusParams) @@ -1225,7 +1235,9 @@ func TestHandshakeUpdatesValidators(t *testing.T) { pubKey, err := privVal.GetPubKey() require.NoError(t, err) stateDB, state, store := stateAndStore(config, pubKey, 0x0) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) oldValAddr := state.Validators.Validators[0].Address diff --git a/consensus/state.go b/consensus/state.go index 9e6d980a3..b04e8d192 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -970,7 +970,9 @@ func (cs *State) handleTxsAvailable() { // Used internally by handleTimeout and handleMsg to make state transitions // Enter: `timeoutNewHeight` by startTime (commitTime+timeoutCommit), -// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1) +// +// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1) +// // Enter: `timeoutPrecommits` after any +2/3 precommits from (height,round-1) // Enter: +2/3 precommits for nil at (height,round-1) // Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round) @@ -1055,7 +1057,9 @@ func (cs *State) needProofBlock(height int64) bool { // Enter (CreateEmptyBlocks): from enterNewRound(height,round) // Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ): -// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval +// +// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval +// // Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool func (cs *State) enterPropose(height int64, round int32) { logger := cs.Logger.With("height", height, "round", round) diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index 1c449717b..53712bf3d 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -47,7 +47,9 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { } blockStoreDB := db.NewMemDB() stateDB := blockStoreDB - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, err := sm.MakeGenesisState(genDoc) if err != nil { return fmt.Errorf("failed to make genesis state: %w", err) diff --git a/crypto/merkle/doc.go b/crypto/merkle/doc.go index 865c30217..fe50b3463 100644 --- a/crypto/merkle/doc.go +++ b/crypto/merkle/doc.go @@ -12,20 +12,19 @@ second pre-image attacks. Hence, use this library with caution. Otherwise you might run into similar issues as, e.g., in early Bitcoin: https://bitcointalk.org/?topic=102395 - * - / \ - / \ - / \ - / \ - * * - / \ / \ - / \ / \ - / \ / \ - * * * h6 - / \ / \ / \ - h0 h1 h2 h3 h4 h5 + * + / \ + / \ + / \ + / \ + * * + / \ / \ + / \ / \ + / \ / \ + * * * h6 + / \ / \ / \ + h0 h1 h2 h3 h4 h5 TODO(ismail): add 2nd pre-image protection or clarify further on how we use this and why this secure. - */ package merkle diff --git a/crypto/merkle/tree.go b/crypto/merkle/tree.go index 466c43482..089c2f82e 100644 --- a/crypto/merkle/tree.go +++ b/crypto/merkle/tree.go @@ -47,10 +47,10 @@ func HashFromByteSlices(items [][]byte) []byte { // // These preliminary results suggest: // -// 1. The performance of the HashFromByteSlice is pretty good -// 2. Go has low overhead for recursive functions -// 3. The performance of the HashFromByteSlice routine is dominated -// by the actual hashing of data +// 1. The performance of the HashFromByteSlice is pretty good +// 2. Go has low overhead for recursive functions +// 3. The performance of the HashFromByteSlice routine is dominated +// by the actual hashing of data // // Although this work is in no way exhaustive, point #3 suggests that // optimization of this routine would need to take an alternative diff --git a/crypto/secp256k1/secp256k1.go b/crypto/secp256k1/secp256k1.go index 0fbd9ad2d..8c943f623 100644 --- a/crypto/secp256k1/secp256k1.go +++ b/crypto/secp256k1/secp256k1.go @@ -15,7 +15,7 @@ import ( tmjson "github.com/tendermint/tendermint/libs/json" ) -//------------------------------------- +// ------------------------------------- const ( PrivKeyName = "tendermint/PrivKeySecp256k1" PubKeyName = "tendermint/PubKeySecp256k1" @@ -124,8 +124,8 @@ func GenPrivKeySecp256k1(secret []byte) PrivKey { // used to reject malleable signatures // see: -// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93 -// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39 +// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93 +// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39 var secp256k1halfN = new(big.Int).Rsh(secp256k1.S256().N, 1) // Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg. diff --git a/evidence/doc.go b/evidence/doc.go index d521debd3..42ea1d6bb 100644 --- a/evidence/doc.go +++ b/evidence/doc.go @@ -3,7 +3,7 @@ Package evidence handles all evidence storage and gossiping from detection to bl For the different types of evidence refer to the `evidence.go` file in the types package or https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md. -Gossiping +# Gossiping The core functionality begins with the evidence reactor (see reactor. go) which operates both the sending and receiving of evidence. @@ -29,7 +29,7 @@ There are two buckets that evidence can be stored in: Pending & Committed. All evidence is proto encoded to disk. -Proposing +# Proposing When a new block is being proposed (in state/execution.go#CreateProposalBlock), `PendingEvidence(maxBytes)` is called to send up to the maxBytes of uncommitted evidence, from the evidence store, @@ -42,12 +42,11 @@ Once the proposed evidence is submitted, the evidence is marked as committed and is moved from the broadcasted set to the committed set. As a result it is also removed from the concurrent list so that it is no longer gossiped. -Minor Functionality +# Minor Functionality As all evidence (including POLC's) are bounded by an expiration date, those that exceed this are no longer needed and hence pruned. Currently, only committed evidence in which a marker to the height that the evidence was committed and hence very small is saved. All updates are made from the `Update(block, state)` function which should be called when a new block is committed. - */ package evidence diff --git a/evidence/mocks/block_store.go b/evidence/mocks/block_store.go index b0c67ff87..e61c4e0ae 100644 --- a/evidence/mocks/block_store.go +++ b/evidence/mocks/block_store.go @@ -58,13 +58,13 @@ func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { return r0 } -type NewBlockStoreT interface { +type mockConstructorTestingTNewBlockStore interface { mock.TestingT Cleanup(func()) } // NewBlockStore creates a new instance of BlockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlockStore(t NewBlockStoreT) *BlockStore { +func NewBlockStore(t mockConstructorTestingTNewBlockStore) *BlockStore { mock := &BlockStore{} mock.Mock.Test(t) diff --git a/evidence/pool.go b/evidence/pool.go index dfeb7a717..632adc775 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -97,11 +97,11 @@ func (evpool *Pool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64) { // Update takes both the new state and the evidence committed at that height and performs // the following operations: -// 1. Take any conflicting votes from consensus and use the state's LastBlockTime to form -// DuplicateVoteEvidence and add it to the pool. -// 2. Update the pool's state which contains evidence params relating to expiry. -// 3. Moves pending evidence that has now been committed into the committed pool. -// 4. Removes any expired evidence based on both height and time. +// 1. Take any conflicting votes from consensus and use the state's LastBlockTime to form +// DuplicateVoteEvidence and add it to the pool. +// 2. Update the pool's state which contains evidence params relating to expiry. +// 3. Moves pending evidence that has now been committed into the committed pool. +// 4. Removes any expired evidence based on both height and time. func (evpool *Pool) Update(state sm.State, ev types.EvidenceList) { // sanity check if state.LastBlockHeight <= evpool.state.LastBlockHeight { diff --git a/evidence/pool_test.go b/evidence/pool_test.go index 365a3be88..efa3dea10 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -348,7 +348,9 @@ func TestRecoverPendingEvidence(t *testing.T) { func initializeStateFromValidatorSet(valSet *types.ValidatorSet, height int64) sm.Store { stateDB := dbm.NewMemDB() - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state := sm.State{ ChainID: evidenceChainID, InitialHeight: 1, diff --git a/evidence/verify.go b/evidence/verify.go index f3eba5358..c20cb0a2d 100644 --- a/evidence/verify.go +++ b/evidence/verify.go @@ -102,13 +102,14 @@ func (evpool *Pool) verify(evidence types.Evidence) error { // VerifyLightClientAttack verifies LightClientAttackEvidence against the state of the full node. This involves // the following checks: -// - the common header from the full node has at least 1/3 voting power which is also present in -// the conflicting header's commit -// - 2/3+ of the conflicting validator set correctly signed the conflicting block -// - the nodes trusted header at the same height as the conflicting header has a different hash +// - the common header from the full node has at least 1/3 voting power which is also present in +// the conflicting header's commit +// - 2/3+ of the conflicting validator set correctly signed the conflicting block +// - the nodes trusted header at the same height as the conflicting header has a different hash // // CONTRACT: must run ValidateBasic() on the evidence before verifying -// must check that the evidence has not expired (i.e. is outside the maximum age threshold) +// +// must check that the evidence has not expired (i.e. is outside the maximum age threshold) func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, trustedHeader *types.SignedHeader, commonVals *types.ValidatorSet, now time.Time, trustPeriod time.Duration) error { // In the case of lunatic attack there will be a different commonHeader height. Therefore the node perform a single @@ -154,10 +155,10 @@ func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, t // VerifyDuplicateVote verifies DuplicateVoteEvidence against the state of full node. This involves the // following checks: -// - the validator is in the validator set at the height of the evidence -// - the height, round, type and validator address of the votes must be the same -// - the block ID's must be different -// - The signatures must both be valid +// - the validator is in the validator set at the height of the evidence +// - the height, round, type and validator address of the votes must be the same +// - the block ID's must be different +// - The signatures must both be valid func VerifyDuplicateVote(e *types.DuplicateVoteEvidence, chainID string, valSet *types.ValidatorSet) error { _, val := valSet.GetByAddress(e.VoteA.ValidatorAddress) if val == nil { diff --git a/go.sum b/go.sum index 15d806059..4e6ff0fdc 100644 --- a/go.sum +++ b/go.sum @@ -2163,6 +2163,7 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= diff --git a/libs/cli/flags/log_level.go b/libs/cli/flags/log_level.go index d96ad3f47..706305300 100644 --- a/libs/cli/flags/log_level.go +++ b/libs/cli/flags/log_level.go @@ -17,7 +17,8 @@ const ( // all other modules). // // Example: -// ParseLogLevel("consensus:debug,mempool:debug,*:error", log.NewTMLogger(os.Stdout), "info") +// +// ParseLogLevel("consensus:debug,mempool:debug,*:error", log.NewTMLogger(os.Stdout), "info") func ParseLogLevel(lvl string, logger log.Logger, defaultLogLevelValue string) (log.Logger, error) { if lvl == "" { return nil, errors.New("empty log level") diff --git a/libs/clist/clist.go b/libs/clist/clist.go index 5579b1d0f..2e4171b1c 100644 --- a/libs/clist/clist.go +++ b/libs/clist/clist.go @@ -24,7 +24,6 @@ import ( const MaxLength = int(^uint(0) >> 1) /* - CElement is an element of a linked-list Traversal from a CElement is goroutine-safe. @@ -41,7 +40,6 @@ the for-loop. Use sync.Cond when you need serial access to the "condition". In our case our condition is if `next != nil || removed`, and there's no reason to serialize that condition for goroutines waiting on NextWait() (since it's just a read operation). - */ type CElement struct { mtx tmsync.RWMutex diff --git a/libs/clist/clist_test.go b/libs/clist/clist_test.go index d10a1e5ae..ccb50ca83 100644 --- a/libs/clist/clist_test.go +++ b/libs/clist/clist_test.go @@ -68,6 +68,7 @@ func TestSmall(t *testing.T) { // This test is quite hacky because it relies on SetFinalizer // which isn't guaranteed to run at all. +// //nolint:unused,deadcode func _TestGCFifo(t *testing.T) { if runtime.GOARCH != "amd64" { @@ -117,6 +118,7 @@ func _TestGCFifo(t *testing.T) { // This test is quite hacky because it relies on SetFinalizer // which isn't guaranteed to run at all. +// //nolint:unused,deadcode func _TestGCRandom(t *testing.T) { if runtime.GOARCH != "amd64" { diff --git a/libs/flowrate/flowrate.go b/libs/flowrate/flowrate.go index c7ba93282..fdc168d18 100644 --- a/libs/flowrate/flowrate.go +++ b/libs/flowrate/flowrate.go @@ -39,10 +39,10 @@ type Monitor struct { // weight of each sample in the exponential moving average (EMA) calculation. // The exact formulas are: // -// sampleTime = currentTime - prevSampleTime -// sampleRate = byteCount / sampleTime -// weight = 1 - exp(-sampleTime/windowSize) -// newRate = weight*sampleRate + (1-weight)*oldRate +// sampleTime = currentTime - prevSampleTime +// sampleRate = byteCount / sampleTime +// weight = 1 - exp(-sampleTime/windowSize) +// newRate = weight*sampleRate + (1-weight)*oldRate // // The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s, // respectively. diff --git a/libs/json/doc.go b/libs/json/doc.go index d5ef4047f..1b92c0db6 100644 --- a/libs/json/doc.go +++ b/libs/json/doc.go @@ -13,12 +13,12 @@ // compatibility with e.g. Javascript (which uses 64-bit floats for numbers, having 53-bit // precision): // -// int32(32) // Output: 32 -// uint32(32) // Output: 32 -// int64(64) // Output: "64" -// uint64(64) // Output: "64" -// int(64) // Output: "64" -// uint(64) // Output: "64" +// int32(32) // Output: 32 +// uint32(32) // Output: 32 +// int64(64) // Output: "64" +// uint64(64) // Output: "64" +// int(64) // Output: "64" +// uint(64) // Output: "64" // // Encoding of other scalars follows encoding/json: // @@ -50,7 +50,7 @@ // Times are encoded as encoding/json, in RFC3339Nano format, but requiring UTC time zone (with zero // times emitted as "0001-01-01T00:00:00Z" as with encoding/json): // -// time.Date(2020, 6, 8, 16, 21, 28, 123, time.FixedZone("UTC+2", 2*60*60)) +// time.Date(2020, 6, 8, 16, 21, 28, 123, time.FixedZone("UTC+2", 2*60*60)) // // Output: "2020-06-08T14:21:28.000000123Z" // time.Time{} // Output: "0001-01-01T00:00:00Z" // (*time.Time)(nil) // Output: null @@ -95,5 +95,4 @@ // // Struct{Car: &Car{Wheels: 4}, Vehicle: &Car{Wheels: 4}} // // Output: {"Car": {"Wheels: 4"}, "Vehicle": {"type":"vehicle/car","value":{"Wheels":4}}} -// package json diff --git a/libs/log/filter.go b/libs/log/filter.go index e39a85dcb..4b7ed981c 100644 --- a/libs/log/filter.go +++ b/libs/log/filter.go @@ -69,18 +69,19 @@ func (l *filter) Error(msg string, keyvals ...interface{}) { // Allow*With methods, it is used as the logger's level. // // Examples: -// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto")) -// logger.With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto" // -// logger = log.NewFilter(logger, log.AllowError(), -// log.AllowInfoWith("module", "crypto"), -// log.AllowNoneWith("user", "Sam")) -// logger.With("module", "crypto", "user", "Sam").Info("Hello") # returns nil +// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto")) +// logger.With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto" // -// logger = log.NewFilter(logger, -// log.AllowError(), -// log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam")) -// logger.With("user", "Sam").With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto user=Sam" +// logger = log.NewFilter(logger, log.AllowError(), +// log.AllowInfoWith("module", "crypto"), +// log.AllowNoneWith("user", "Sam")) +// logger.With("module", "crypto", "user", "Sam").Info("Hello") # returns nil +// +// logger = log.NewFilter(logger, +// log.AllowError(), +// log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam")) +// logger.With("user", "Sam").With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto user=Sam" func (l *filter) With(keyvals ...interface{}) Logger { keyInAllowedKeyvals := false diff --git a/libs/log/logger.go b/libs/log/logger.go index 9b1a65d42..34aca8af5 100644 --- a/libs/log/logger.go +++ b/libs/log/logger.go @@ -22,9 +22,9 @@ type Logger interface { // // If w implements the following interface, so does the returned writer. // -// interface { -// Fd() uintptr -// } +// interface { +// Fd() uintptr +// } func NewSyncWriter(w io.Writer) io.Writer { return kitlog.NewSyncWriter(w) } diff --git a/libs/log/tmfmt_logger_test.go b/libs/log/tmfmt_logger_test.go index 4b82455f1..815cfddc5 100644 --- a/libs/log/tmfmt_logger_test.go +++ b/libs/log/tmfmt_logger_test.go @@ -83,7 +83,7 @@ func benchmarkRunnerKitlog(b *testing.B, logger kitlog.Logger, f func(kitlog.Log } } -//nolint: errcheck // ignore errors +// nolint: errcheck // ignore errors var ( baseMessage = func(logger kitlog.Logger) { logger.Log("foo_key", "foo_value") } withMessage = func(logger kitlog.Logger) { kitlog.With(logger, "a", "b").Log("d", "f") } diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index 914a080de..321e775c8 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -12,26 +12,25 @@ // // Example: // -// q, err := query.New("account.name='John'") -// if err != nil { -// return err -// } -// ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Second) -// defer cancel() -// subscription, err := pubsub.Subscribe(ctx, "johns-transactions", q) -// if err != nil { -// return err -// } -// -// for { -// select { -// case msg <- subscription.Out(): -// // handle msg.Data() and msg.Events() -// case <-subscription.Cancelled(): -// return subscription.Err() -// } -// } +// q, err := query.New("account.name='John'") +// if err != nil { +// return err +// } +// ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Second) +// defer cancel() +// subscription, err := pubsub.Subscribe(ctx, "johns-transactions", q) +// if err != nil { +// return err +// } // +// for { +// select { +// case msg <- subscription.Out(): +// // handle msg.Data() and msg.Events() +// case <-subscription.Cancelled(): +// return subscription.Err() +// } +// } package pubsub import ( diff --git a/libs/pubsub/query/query.go b/libs/pubsub/query/query.go index cf6903ccf..35023d565 100644 --- a/libs/pubsub/query/query.go +++ b/libs/pubsub/query/query.go @@ -1,6 +1,6 @@ // Package query provides a parser for a custom query format: // -// abci.invoice.number=22 AND abci.invoice.owner=Ivan +// abci.invoice.number=22 AND abci.invoice.owner=Ivan // // See query.peg for the grammar, which is a https://en.wikipedia.org/wiki/Parsing_expression_grammar. // More: https://github.com/PhilippeSigaud/Pegged/wiki/PEG-Basics diff --git a/libs/pubsub/subscription.go b/libs/pubsub/subscription.go index 8f90e177a..4526df51e 100644 --- a/libs/pubsub/subscription.go +++ b/libs/pubsub/subscription.go @@ -54,7 +54,8 @@ func (s *Subscription) Cancelled() <-chan struct{} { // If the channel is closed, Err returns a non-nil error explaining why: // - ErrUnsubscribed if the subscriber choose to unsubscribe, // - ErrOutOfCapacity if the subscriber is not pulling messages fast enough -// and the channel returned by Out became full, +// and the channel returned by Out became full, +// // After Err returns a non-nil error, successive calls to Err return the same // error. func (s *Subscription) Err() error { diff --git a/light/client.go b/light/client.go index 51330c77c..8d478e69c 100644 --- a/light/client.go +++ b/light/client.go @@ -284,16 +284,16 @@ func (c *Client) restoreTrustedLightBlock() error { // if options.Height: // -// 1) ahead of trustedLightBlock.Height => fetch light blocks (same height as +// 1. ahead of trustedLightBlock.Height => fetch light blocks (same height as // trustedLightBlock) from primary provider and check it's hash matches the // trustedLightBlock's hash (if not, remove trustedLightBlock and all the light blocks // before) // -// 2) equals trustedLightBlock.Height => check options.Hash matches the +// 2. equals trustedLightBlock.Height => check options.Hash matches the // trustedLightBlock's hash (if not, remove trustedLightBlock and all the light blocks // before) // -// 3) behind trustedLightBlock.Height => remove all the light blocks between +// 3. behind trustedLightBlock.Height => remove all the light blocks between // options.Height and trustedLightBlock.Height, update trustedLightBlock, then // check options.Hash matches the trustedLightBlock's hash (if not, remove // trustedLightBlock and all the light blocks before) @@ -395,10 +395,10 @@ func (c *Client) initializeWithTrustOptions(ctx context.Context, options TrustOp // TrustedLightBlock returns a trusted light block at the given height (0 - the latest). // // It returns an error if: -// - there are some issues with the trusted store, although that should not -// happen normally; -// - negative height is passed; -// - header has not been verified yet and is therefore not in the store +// - there are some issues with the trusted store, although that should not +// happen normally; +// - negative height is passed; +// - header has not been verified yet and is therefore not in the store // // Safe for concurrent use by multiple goroutines. func (c *Client) TrustedLightBlock(height int64) (*types.LightBlock, error) { @@ -510,8 +510,9 @@ func (c *Client) VerifyLightBlockAtHeight(ctx context.Context, height int64, now // // If the header, which is older than the currently trusted header, is // requested and the light client does not have it, VerifyHeader will perform: -// a) verifySkipping verification if nearest trusted header is found & not expired -// b) backwards verification in all other cases +// +// a) verifySkipping verification if nearest trusted header is found & not expired +// b) backwards verification in all other cases // // It returns ErrOldHeaderExpired if the latest trusted header expired. // @@ -980,12 +981,12 @@ func (c *Client) backwards( // lightBlockFromPrimary retrieves the lightBlock from the primary provider // at the specified height. This method also handles provider behavior as follows: // -// 1. If the provider does not respond or does not have the block, it tries again -// with a different provider -// 2. If all providers return the same error, the light client forwards the error to -// where the initial request came from -// 3. If the provider provides an invalid light block, is deemed unreliable or returns -// any other error, the primary is permanently dropped and is replaced by a witness. +// 1. If the provider does not respond or does not have the block, it tries again +// with a different provider +// 2. If all providers return the same error, the light client forwards the error to +// where the initial request came from +// 3. If the provider provides an invalid light block, is deemed unreliable or returns +// any other error, the primary is permanently dropped and is replaced by a witness. func (c *Client) lightBlockFromPrimary(ctx context.Context, height int64) (*types.LightBlock, error) { c.providerMutex.Lock() l, err := c.primary.LightBlock(ctx, height) diff --git a/light/detector.go b/light/detector.go index 881242135..1fd21f41e 100644 --- a/light/detector.go +++ b/light/detector.go @@ -109,7 +109,9 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig // // 1: errConflictingHeaders -> there may have been an attack on this light client // 2: errBadWitness -> the witness has either not responded, doesn't have the header or has given us an invalid one -// Note: In the case of an invalid header we remove the witness +// +// Note: In the case of an invalid header we remove the witness +// // 3: nil -> the hashes of the two headers match func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan error, h *types.SignedHeader, witness provider.Provider, witnessIndex int) { @@ -275,16 +277,16 @@ func (c *Client) handleConflictingHeaders( // it has received from another and preforms verifySkipping at the heights of each of the intermediate // headers in the trace until it reaches the divergentHeader. 1 of 2 things can happen. // -// 1. The light client verifies a header that is different to the intermediate header in the trace. This -// is the bifurcation point and the light client can create evidence from it -// 2. The source stops responding, doesn't have the block or sends an invalid header in which case we -// return the error and remove the witness +// 1. The light client verifies a header that is different to the intermediate header in the trace. This +// is the bifurcation point and the light client can create evidence from it +// 2. The source stops responding, doesn't have the block or sends an invalid header in which case we +// return the error and remove the witness // // CONTRACT: -// 1. Trace can not be empty len(trace) > 0 -// 2. The last block in the trace can not be of a lower height than the target block -// trace[len(trace)-1].Height >= targetBlock.Height -// 3. The +// 1. Trace can not be empty len(trace) > 0 +// 2. The last block in the trace can not be of a lower height than the target block +// trace[len(trace)-1].Height >= targetBlock.Height +// 3. The func (c *Client) examineConflictingHeaderAgainstTrace( ctx context.Context, trace []*types.LightBlock, diff --git a/light/doc.go b/light/doc.go index 700bbeb6c..fc50df2fa 100644 --- a/light/doc.go +++ b/light/doc.go @@ -63,31 +63,31 @@ This package provides three major things: Example usage: - db, err := dbm.NewGoLevelDB("light-client-db", dbDir) - if err != nil { - // handle error - } + db, err := dbm.NewGoLevelDB("light-client-db", dbDir) + if err != nil { + // handle error + } - c, err := NewHTTPClient( - chainID, - TrustOptions{ - Period: 504 * time.Hour, // 21 days - Height: 100, - Hash: header.Hash(), - }, - "http://localhost:26657", - []string{"http://witness1:26657"}, - dbs.New(db, ""), - ) - if err != nil { - // handle error - } + c, err := NewHTTPClient( + chainID, + TrustOptions{ + Period: 504 * time.Hour, // 21 days + Height: 100, + Hash: header.Hash(), + }, + "http://localhost:26657", + []string{"http://witness1:26657"}, + dbs.New(db, ""), + ) + if err != nil { + // handle error + } - h, err := c.TrustedHeader(100) - if err != nil { - // handle error - } - fmt.Println("header", h) + h, err := c.TrustedHeader(100) + if err != nil { + // handle error + } + fmt.Println("header", h) Check out other examples in example_test.go diff --git a/light/rpc/client.go b/light/rpc/client.go index 6fc1adbca..4a845a5da 100644 --- a/light/rpc/client.go +++ b/light/rpc/client.go @@ -27,6 +27,7 @@ var errNegOrZeroHeight = errors.New("negative or zero height") type KeyPathFunc func(path string, key []byte) (merkle.KeyPath, error) // LightClient is an interface that contains functionality needed by Client from the light client. +// //go:generate ../../scripts/mockery_generate.sh LightClient type LightClient interface { ChainID() string diff --git a/light/rpc/mocks/light_client.go b/light/rpc/mocks/light_client.go index 25e101c86..fabf73b01 100644 --- a/light/rpc/mocks/light_client.go +++ b/light/rpc/mocks/light_client.go @@ -100,13 +100,13 @@ func (_m *LightClient) VerifyLightBlockAtHeight(ctx context.Context, height int6 return r0, r1 } -type NewLightClientT interface { +type mockConstructorTestingTNewLightClient interface { mock.TestingT Cleanup(func()) } // NewLightClient creates a new instance of LightClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewLightClient(t NewLightClientT) *LightClient { +func NewLightClient(t mockConstructorTestingTNewLightClient) *LightClient { mock := &LightClient{} mock.Mock.Test(t) diff --git a/light/verifier.go b/light/verifier.go index 0b0a4926b..2ec02e877 100644 --- a/light/verifier.go +++ b/light/verifier.go @@ -19,13 +19,13 @@ var ( // VerifyNonAdjacent verifies non-adjacent untrustedHeader against // trustedHeader. It ensures that: // -// a) trustedHeader can still be trusted (if not, ErrOldHeaderExpired is returned) -// b) untrustedHeader is valid (if not, ErrInvalidHeader is returned) -// c) trustLevel ([1/3, 1]) of trustedHeaderVals (or trustedHeaderNextVals) -// signed correctly (if not, ErrNewValSetCantBeTrusted is returned) -// d) more than 2/3 of untrustedVals have signed h2 -// (otherwise, ErrInvalidHeader is returned) -// e) headers are non-adjacent. +// a) trustedHeader can still be trusted (if not, ErrOldHeaderExpired is returned) +// b) untrustedHeader is valid (if not, ErrInvalidHeader is returned) +// c) trustLevel ([1/3, 1]) of trustedHeaderVals (or trustedHeaderNextVals) +// signed correctly (if not, ErrNewValSetCantBeTrusted is returned) +// d) more than 2/3 of untrustedVals have signed h2 +// (otherwise, ErrInvalidHeader is returned) +// e) headers are non-adjacent. // // maxClockDrift defines how much untrustedHeader.Time can drift into the // future. @@ -81,12 +81,12 @@ func VerifyNonAdjacent( // VerifyAdjacent verifies directly adjacent untrustedHeader against // trustedHeader. It ensures that: // -// a) trustedHeader can still be trusted (if not, ErrOldHeaderExpired is returned) -// b) untrustedHeader is valid (if not, ErrInvalidHeader is returned) -// c) untrustedHeader.ValidatorsHash equals trustedHeader.NextValidatorsHash -// d) more than 2/3 of new validators (untrustedVals) have signed h2 -// (otherwise, ErrInvalidHeader is returned) -// e) headers are adjacent. +// a) trustedHeader can still be trusted (if not, ErrOldHeaderExpired is returned) +// b) untrustedHeader is valid (if not, ErrInvalidHeader is returned) +// c) untrustedHeader.ValidatorsHash equals trustedHeader.NextValidatorsHash +// d) more than 2/3 of new validators (untrustedVals) have signed h2 +// (otherwise, ErrInvalidHeader is returned) +// e) headers are adjacent. // // maxClockDrift defines how much untrustedHeader.Time can drift into the // future. @@ -212,12 +212,12 @@ func HeaderExpired(h *types.SignedHeader, trustingPeriod time.Duration, now time // VerifyBackwards verifies an untrusted header with a height one less than // that of an adjacent trusted header. It ensures that: // -// a) untrusted header is valid -// b) untrusted header has a time before the trusted header -// c) that the LastBlockID hash of the trusted header is the same as the hash -// of the trusted header +// a) untrusted header is valid +// b) untrusted header has a time before the trusted header +// c) that the LastBlockID hash of the trusted header is the same as the hash +// of the trusted header // -// For any of these cases ErrInvalidHeader is returned. +// For any of these cases ErrInvalidHeader is returned. func VerifyBackwards(untrustedHeader, trustedHeader *types.Header) error { if err := untrustedHeader.ValidateBasic(); err != nil { return ErrInvalidHeader{err} diff --git a/mempool/v0/clist_mempool.go b/mempool/v0/clist_mempool.go index 4695eed17..516a57f22 100644 --- a/mempool/v0/clist_mempool.go +++ b/mempool/v0/clist_mempool.go @@ -194,7 +194,9 @@ func (mem *CListMempool) TxsWaitChan() <-chan struct{} { // It blocks if we're waiting on Update() or Reap(). // cb: A callback from the CheckTx command. -// It gets called from another goroutine. +// +// It gets called from another goroutine. +// // CONTRACT: Either cb will get called, or err returned. // // Safe for concurrent use by multiple goroutines. @@ -310,7 +312,7 @@ func (mem *CListMempool) reqResCb( } // Called from: -// - resCbFirstTime (lock not held) if tx is valid +// - resCbFirstTime (lock not held) if tx is valid func (mem *CListMempool) addTx(memTx *mempoolTx) { e := mem.txs.PushBack(memTx) mem.txsMap.Store(memTx.tx.Key(), e) @@ -319,8 +321,8 @@ func (mem *CListMempool) addTx(memTx *mempoolTx) { } // Called from: -// - Update (lock held) if tx was committed -// - resCbRecheck (lock not held) if tx was invalidated +// - Update (lock held) if tx was committed +// - resCbRecheck (lock not held) if tx was invalidated func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) { mem.txs.Remove(elem) elem.DetachPrev() diff --git a/node/doc.go b/node/doc.go index 08f3fa258..3a145c573 100644 --- a/node/doc.go +++ b/node/doc.go @@ -6,35 +6,34 @@ Adding new p2p.Reactor(s) To add a new p2p.Reactor, use the CustomReactors option: - node, err := NewNode( - config, - privVal, - nodeKey, - clientCreator, - genesisDocProvider, - dbProvider, - metricsProvider, - logger, - CustomReactors(map[string]p2p.Reactor{"CUSTOM": customReactor}), - ) + node, err := NewNode( + config, + privVal, + nodeKey, + clientCreator, + genesisDocProvider, + dbProvider, + metricsProvider, + logger, + CustomReactors(map[string]p2p.Reactor{"CUSTOM": customReactor}), + ) Replacing existing p2p.Reactor(s) To replace the built-in p2p.Reactor, use the CustomReactors option: - node, err := NewNode( - config, - privVal, - nodeKey, - clientCreator, - genesisDocProvider, - dbProvider, - metricsProvider, - logger, - CustomReactors(map[string]p2p.Reactor{"BLOCKCHAIN": customBlockchainReactor}), - ) + node, err := NewNode( + config, + privVal, + nodeKey, + clientCreator, + genesisDocProvider, + dbProvider, + metricsProvider, + logger, + CustomReactors(map[string]p2p.Reactor{"BLOCKCHAIN": customBlockchainReactor}), + ) The list of existing reactors can be found in CustomReactors documentation. - */ package node diff --git a/node/node.go b/node/node.go index 84ed26748..d5042a55a 100644 --- a/node/node.go +++ b/node/node.go @@ -69,6 +69,8 @@ type DBContext struct { // DBProvider takes a DBContext and returns an instantiated DB. type DBProvider func(*DBContext) (dbm.DB, error) +const readHeaderTimeout = 10 * time.Second + // DefaultDBProvider returns a database using the DBBackend and DBDir // specified in the ctx.Config. func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) { @@ -144,12 +146,12 @@ type fastSyncReactor interface { // WARNING: using any name from the below list of the existing reactors will // result in replacing it with the custom one. // -// - MEMPOOL -// - BLOCKCHAIN -// - CONSENSUS -// - EVIDENCE -// - PEX -// - STATESYNC +// - MEMPOOL +// - BLOCKCHAIN +// - CONSENSUS +// - EVIDENCE +// - PEX +// - STATESYNC func CustomReactors(reactors map[string]p2p.Reactor) Option { return func(n *Node) { for name, reactor := range reactors { @@ -429,7 +431,9 @@ func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider, return nil, nil, err } evidenceLogger := logger.With("module", "evidence") - evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB), blockStore) + evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }), blockStore) if err != nil { return nil, nil, err } @@ -715,7 +719,9 @@ func NewNode(config *cfg.Config, return nil, err } - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider) if err != nil { @@ -1220,6 +1226,7 @@ func (n *Node) startPrometheusServer(addr string) *http.Server { promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections}, ), ), + ReadHeaderTimeout: readHeaderTimeout, } go func() { if err := srv.ListenAndServe(); err != http.ErrServerClosed { @@ -1402,7 +1409,9 @@ func LoadStateFromDBOrGenesisDocProvider( return sm.State{}, nil, err } } - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) if err != nil { return sm.State{}, nil, err diff --git a/node/node_test.go b/node/node_test.go index 9b82d6ca2..9b93b6fd6 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -235,7 +235,9 @@ func TestCreateProposalBlock(t *testing.T) { var height int64 = 1 state, stateDB, privVals := state(1, height) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) maxBytes := 16384 var partSize uint32 = 256 maxEvidenceBytes := int64(maxBytes / 2) @@ -340,7 +342,9 @@ func TestMaxProposalBlockSize(t *testing.T) { var height int64 = 1 state, stateDB, _ := state(1, height) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) var maxBytes int64 = 16384 var partSize uint32 = 256 state.ConsensusParams.Block.MaxBytes = maxBytes @@ -464,7 +468,9 @@ func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { // save validators to db for 2 heights stateDB := dbm.NewMemDB() - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) if err := stateStore.Save(s); err != nil { panic(err) } diff --git a/p2p/conn/conn_notgo110.go b/p2p/conn/conn_notgo110.go index 21dffad2c..96f9a2a7e 100644 --- a/p2p/conn/conn_notgo110.go +++ b/p2p/conn/conn_notgo110.go @@ -10,9 +10,13 @@ import ( // Only Go1.10 has a proper net.Conn implementation that // has the SetDeadline method implemented as per -// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706 +// +// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706 +// // lest we run into problems like -// https://github.com/tendermint/tendermint/issues/851 +// +// https://github.com/tendermint/tendermint/issues/851 +// // so for go versions < Go1.10 use our custom net.Conn creator // that doesn't return an `Unimplemented error` for net.Conn. // Before https://github.com/tendermint/tendermint/commit/49faa79bdce5663894b3febbf4955fb1d172df04 diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index 44ff83893..318297a67 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -62,6 +62,7 @@ The byte id and the relative priorities of each `Channel` are configured upon initialization of the connection. There are two methods for sending messages: + func (m MConnection) Send(chID byte, msgBytes []byte) bool {} func (m MConnection) TrySend(chID byte, msgBytes []byte}) bool {} diff --git a/p2p/mocks/peer.go b/p2p/mocks/peer.go index f739a0b21..e195c78bb 100644 --- a/p2p/mocks/peer.go +++ b/p2p/mocks/peer.go @@ -330,13 +330,13 @@ func (_m *Peer) TrySend(_a0 byte, _a1 []byte) bool { return r0 } -type NewPeerT interface { +type mockConstructorTestingTNewPeer interface { mock.TestingT Cleanup(func()) } // NewPeer creates a new instance of Peer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPeer(t NewPeerT) *Peer { +func NewPeer(t mockConstructorTestingTNewPeer) *Peer { mock := &Peer{} mock.Mock.Test(t) diff --git a/p2p/pex/known_address.go b/p2p/pex/known_address.go index e98a9e97e..33763c084 100644 --- a/p2p/pex/known_address.go +++ b/p2p/pex/known_address.go @@ -94,17 +94,16 @@ func (ka *knownAddress) removeBucketRef(bucketIdx int) int { } /* - An address is bad if the address in question is a New address, has not been tried in the last - minute, and meets one of the following criteria: +An address is bad if the address in question is a New address, has not been tried in the last +minute, and meets one of the following criteria: - 1) It claims to be from the future - 2) It hasn't been seen in over a week - 3) It has failed at least three times and never succeeded - 4) It has failed ten times in the last week - - All addresses that meet these criteria are assumed to be worthless and not - worth keeping hold of. +1) It claims to be from the future +2) It hasn't been seen in over a week +3) It has failed at least three times and never succeeded +4) It has failed ten times in the last week +All addresses that meet these criteria are assumed to be worthless and not +worth keeping hold of. */ func (ka *knownAddress) isBad() bool { // Is Old --> good diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index 4ed1254ef..1e859e7be 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -59,15 +59,15 @@ func TestPEXReactorAddRemovePeer(t *testing.T) { } // --- FAIL: TestPEXReactorRunning (11.10s) -// pex_reactor_test.go:411: expected all switches to be connected to at -// least one peer (switches: 0 => {outbound: 1, inbound: 0}, 1 => -// {outbound: 0, inbound: 1}, 2 => {outbound: 0, inbound: 0}, ) +// +// pex_reactor_test.go:411: expected all switches to be connected to at +// least one peer (switches: 0 => {outbound: 1, inbound: 0}, 1 => +// {outbound: 0, inbound: 1}, 2 => {outbound: 0, inbound: 0}, ) // // EXPLANATION: peers are getting rejected because in switch#addPeer we check // if any peer (who we already connected to) has the same IP. Even though local // peers have different IP addresses, they all have the same underlying remote // IP: 127.0.0.1. -// func TestPEXReactorRunning(t *testing.T) { N := 3 switches := make([]*p2p.Switch, N) diff --git a/p2p/switch.go b/p2p/switch.go index fa87cbccd..3214de223 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -379,8 +379,8 @@ func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) { // to the PEX/Addrbook to find the peer with the addr again // NOTE: this will keep trying even if the handshake or auth fails. // TODO: be more explicit with error types so we only retry on certain failures -// - ie. if we're getting ErrDuplicatePeer we can stop -// because the addrbook got us the peer back already +// - ie. if we're getting ErrDuplicatePeer we can stop +// because the addrbook got us the peer back already func (sw *Switch) reconnectToPeer(addr *NetAddress) { if sw.reconnecting.Has(string(addr.ID)) { return diff --git a/p2p/transport_test.go b/p2p/transport_test.go index 7638de4cb..cfd927c9f 100644 --- a/p2p/transport_test.go +++ b/p2p/transport_test.go @@ -79,8 +79,8 @@ func TestTransportMultiplexConnFilter(t *testing.T) { } _, err = mt.Accept(peerConfig{}) - if err, ok := err.(ErrRejected); ok { - if !err.IsFiltered() { + if e, ok := err.(ErrRejected); ok { + if !e.IsFiltered() { t.Errorf("expected peer to be filtered, got %v", err) } } else { @@ -386,8 +386,8 @@ func TestTransportMultiplexValidateNodeInfo(t *testing.T) { } _, err := mt.Accept(peerConfig{}) - if err, ok := err.(ErrRejected); ok { - if !err.IsNodeInfoInvalid() { + if e, ok := err.(ErrRejected); ok { + if !e.IsNodeInfoInvalid() { t.Errorf("expected NodeInfo to be invalid, got %v", err) } } else { @@ -425,8 +425,8 @@ func TestTransportMultiplexRejectMissmatchID(t *testing.T) { } _, err := mt.Accept(peerConfig{}) - if err, ok := err.(ErrRejected); ok { - if !err.IsAuthFailure() { + if e, ok := err.(ErrRejected); ok { + if !e.IsAuthFailure() { t.Errorf("expected auth failure, got %v", err) } } else { @@ -453,8 +453,8 @@ func TestTransportMultiplexDialRejectWrongID(t *testing.T) { _, err := dialer.Dial(*addr, peerConfig{}) if err != nil { t.Logf("connection failed: %v", err) - if err, ok := err.(ErrRejected); ok { - if !err.IsAuthFailure() { + if e, ok := err.(ErrRejected); ok { + if !e.IsAuthFailure() { t.Errorf("expected auth failure, got %v", err) } } else { @@ -490,8 +490,8 @@ func TestTransportMultiplexRejectIncompatible(t *testing.T) { }() _, err := mt.Accept(peerConfig{}) - if err, ok := err.(ErrRejected); ok { - if !err.IsIncompatible() { + if e, ok := err.(ErrRejected); ok { + if !e.IsIncompatible() { t.Errorf("expected to reject incompatible, got %v", err) } } else { @@ -517,8 +517,8 @@ func TestTransportMultiplexRejectSelf(t *testing.T) { }() if err := <-errc; err != nil { - if err, ok := err.(ErrRejected); ok { - if !err.IsSelf() { + if e, ok := err.(ErrRejected); ok { + if !e.IsSelf() { t.Errorf("expected to reject self, got: %v", err) } } else { diff --git a/p2p/trust/metric_test.go b/p2p/trust/metric_test.go index 65caf38a2..c3adfd5d1 100644 --- a/p2p/trust/metric_test.go +++ b/p2p/trust/metric_test.go @@ -72,6 +72,7 @@ func TestTrustMetricCopyNilPointer(t *testing.T) { } // XXX: This test fails non-deterministically +// //nolint:unused,deadcode func _TestTrustMetricStopPause(t *testing.T) { // The TestTicker will provide manual control over diff --git a/privval/doc.go b/privval/doc.go index 7695ffe9d..63e1d071d 100644 --- a/privval/doc.go +++ b/privval/doc.go @@ -1,13 +1,12 @@ /* - Package privval provides different implementations of the types.PrivValidator. -FilePV +# FilePV FilePV is the simplest implementation and developer default. It uses one file for the private key and another to store state. -SignerListenerEndpoint +# SignerListenerEndpoint SignerListenerEndpoint establishes a connection to an external process, like a Key Management Server (KMS), using a socket. @@ -15,15 +14,14 @@ SignerListenerEndpoint listens for the external KMS process to dial in. SignerListenerEndpoint takes a listener, which determines the type of connection (ie. encrypted over tcp, or unencrypted over unix). -SignerDialerEndpoint +# SignerDialerEndpoint SignerDialerEndpoint is a simple wrapper around a net.Conn. It's used by both IPCVal and TCPVal. -SignerClient +# SignerClient SignerClient handles remote validator connections that provide signing services. In production, it's recommended to wrap it with RetrySignerClient to avoid termination in case of temporary errors. - */ package privval diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index 23a6ec2e3..340800f46 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -75,10 +75,10 @@ message RequestQuery { } message RequestBeginBlock { - bytes hash = 1; - tendermint.types.Header header = 2 [(gogoproto.nullable) = false]; - LastCommitInfo last_commit_info = 3 [(gogoproto.nullable) = false]; - repeated Evidence byzantine_validators = 4 [(gogoproto.nullable) = false]; + bytes hash = 1; + tendermint.types.Header header = 2 [(gogoproto.nullable) = false]; + LastCommitInfo last_commit_info = 3 [(gogoproto.nullable) = false]; + repeated Evidence byzantine_validators = 4 [(gogoproto.nullable) = false]; } enum CheckTxType { @@ -234,7 +234,7 @@ message ResponseDeliverTx { } message ResponseEndBlock { - repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable) = false]; + repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable) = false]; ConsensusParams consensus_param_updates = 2; repeated Event events = 3 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; diff --git a/proto/tendermint/consensus/types.pb.go b/proto/tendermint/consensus/types.pb.go index 6372a88d4..98550a3b2 100644 --- a/proto/tendermint/consensus/types.pb.go +++ b/proto/tendermint/consensus/types.pb.go @@ -104,7 +104,7 @@ func (m *NewRoundStep) GetLastCommitRound() int32 { } // NewValidBlock is sent when a validator observes a valid block B in some round r, -//i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. +// i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. // In case the block is also committed, then IsCommit flag is set to true. type NewValidBlock struct { Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` diff --git a/proto/tendermint/consensus/types.proto b/proto/tendermint/consensus/types.proto index 6e1f41371..5048f8545 100644 --- a/proto/tendermint/consensus/types.proto +++ b/proto/tendermint/consensus/types.proto @@ -18,7 +18,7 @@ message NewRoundStep { } // NewValidBlock is sent when a validator observes a valid block B in some round r, -//i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. +// i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. // In case the block is also committed, then IsCommit flag is set to true. message NewValidBlock { int64 height = 1; diff --git a/proto/tendermint/state/types.pb.go b/proto/tendermint/state/types.pb.go index 85f38cada..6b57ca1ae 100644 --- a/proto/tendermint/state/types.pb.go +++ b/proto/tendermint/state/types.pb.go @@ -199,6 +199,58 @@ func (m *ConsensusParamsInfo) GetLastHeightChanged() int64 { return 0 } +type ABCIResponsesInfo struct { + AbciResponses *ABCIResponses `protobuf:"bytes,1,opt,name=abci_responses,json=abciResponses,proto3" json:"abci_responses,omitempty"` + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *ABCIResponsesInfo) Reset() { *m = ABCIResponsesInfo{} } +func (m *ABCIResponsesInfo) String() string { return proto.CompactTextString(m) } +func (*ABCIResponsesInfo) ProtoMessage() {} +func (*ABCIResponsesInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_ccfacf933f22bf93, []int{3} +} +func (m *ABCIResponsesInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ABCIResponsesInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ABCIResponsesInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ABCIResponsesInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ABCIResponsesInfo.Merge(m, src) +} +func (m *ABCIResponsesInfo) XXX_Size() int { + return m.Size() +} +func (m *ABCIResponsesInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ABCIResponsesInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ABCIResponsesInfo proto.InternalMessageInfo + +func (m *ABCIResponsesInfo) GetAbciResponses() *ABCIResponses { + if m != nil { + return m.AbciResponses + } + return nil +} + +func (m *ABCIResponsesInfo) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + type Version struct { Consensus version.Consensus `protobuf:"bytes,1,opt,name=consensus,proto3" json:"consensus"` Software string `protobuf:"bytes,2,opt,name=software,proto3" json:"software,omitempty"` @@ -208,7 +260,7 @@ func (m *Version) Reset() { *m = Version{} } func (m *Version) String() string { return proto.CompactTextString(m) } func (*Version) ProtoMessage() {} func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_ccfacf933f22bf93, []int{3} + return fileDescriptor_ccfacf933f22bf93, []int{4} } func (m *Version) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -284,7 +336,7 @@ func (m *State) Reset() { *m = State{} } func (m *State) String() string { return proto.CompactTextString(m) } func (*State) ProtoMessage() {} func (*State) Descriptor() ([]byte, []int) { - return fileDescriptor_ccfacf933f22bf93, []int{4} + return fileDescriptor_ccfacf933f22bf93, []int{5} } func (m *State) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -415,6 +467,7 @@ func init() { proto.RegisterType((*ABCIResponses)(nil), "tendermint.state.ABCIResponses") proto.RegisterType((*ValidatorsInfo)(nil), "tendermint.state.ValidatorsInfo") proto.RegisterType((*ConsensusParamsInfo)(nil), "tendermint.state.ConsensusParamsInfo") + proto.RegisterType((*ABCIResponsesInfo)(nil), "tendermint.state.ABCIResponsesInfo") proto.RegisterType((*Version)(nil), "tendermint.state.Version") proto.RegisterType((*State)(nil), "tendermint.state.State") } @@ -422,55 +475,58 @@ func init() { func init() { proto.RegisterFile("tendermint/state/types.proto", fileDescriptor_ccfacf933f22bf93) } var fileDescriptor_ccfacf933f22bf93 = []byte{ - // 763 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x6f, 0xd3, 0x30, - 0x14, 0x6e, 0xe8, 0xb6, 0xb6, 0xce, 0xda, 0x0e, 0x8f, 0x43, 0xd6, 0xb1, 0xb4, 0x2b, 0x3f, 0x34, - 0x71, 0x48, 0xa5, 0x71, 0x40, 0x5c, 0x26, 0x2d, 0x2d, 0x62, 0x95, 0x26, 0x04, 0xd9, 0xb4, 0x03, - 0x97, 0xc8, 0x6d, 0xbc, 0x24, 0xa2, 0x4d, 0xa2, 0xd8, 0x2d, 0xe3, 0x0f, 0xe0, 0xbe, 0x2b, 0xff, - 0xd1, 0x8e, 0x3b, 0x22, 0x0e, 0x03, 0xba, 0x7f, 0x04, 0xd9, 0xce, 0x0f, 0xb7, 0x65, 0xd2, 0x10, - 0x37, 0xfb, 0x7d, 0xdf, 0xfb, 0xfc, 0xf9, 0xf9, 0x3d, 0x19, 0x3c, 0xa6, 0x38, 0x70, 0x70, 0x3c, - 0xf6, 0x03, 0xda, 0x21, 0x14, 0x51, 0xdc, 0xa1, 0x5f, 0x22, 0x4c, 0x8c, 0x28, 0x0e, 0x69, 0x08, - 0x37, 0x72, 0xd4, 0xe0, 0x68, 0xe3, 0x91, 0x1b, 0xba, 0x21, 0x07, 0x3b, 0x6c, 0x25, 0x78, 0x8d, - 0x6d, 0x49, 0x05, 0x0d, 0x86, 0xbe, 0x2c, 0xd2, 0x90, 0x8f, 0xe0, 0xf1, 0x39, 0xb4, 0xb5, 0x84, - 0x4e, 0xd1, 0xc8, 0x77, 0x10, 0x0d, 0xe3, 0x84, 0xb1, 0xb3, 0xc4, 0x88, 0x50, 0x8c, 0xc6, 0xa9, - 0x80, 0x2e, 0xc1, 0x53, 0x1c, 0x13, 0x3f, 0x0c, 0xe6, 0x0e, 0x68, 0xba, 0x61, 0xe8, 0x8e, 0x70, - 0x87, 0xef, 0x06, 0x93, 0xf3, 0x0e, 0xf5, 0xc7, 0x98, 0x50, 0x34, 0x8e, 0x04, 0xa1, 0xfd, 0x43, - 0x01, 0xd5, 0x43, 0xb3, 0xdb, 0xb7, 0x30, 0x89, 0xc2, 0x80, 0x60, 0x02, 0xbb, 0x40, 0x75, 0xf0, - 0xc8, 0x9f, 0xe2, 0xd8, 0xa6, 0x17, 0x44, 0x53, 0x5a, 0xc5, 0x3d, 0x75, 0xbf, 0x6d, 0x48, 0xc5, - 0x60, 0x97, 0x34, 0xd2, 0x84, 0x9e, 0xe0, 0x9e, 0x5e, 0x58, 0xc0, 0x49, 0x97, 0x04, 0x1e, 0x80, - 0x0a, 0x0e, 0x1c, 0x7b, 0x30, 0x0a, 0x87, 0x9f, 0xb4, 0x07, 0x2d, 0x65, 0x4f, 0xdd, 0xdf, 0xbd, - 0x53, 0xe2, 0x4d, 0xe0, 0x98, 0x8c, 0x68, 0x95, 0x71, 0xb2, 0x82, 0x3d, 0xa0, 0x0e, 0xb0, 0xeb, - 0x07, 0x89, 0x42, 0x91, 0x2b, 0x3c, 0xb9, 0x53, 0xc1, 0x64, 0x5c, 0xa1, 0x01, 0x06, 0xd9, 0xba, - 0xfd, 0x55, 0x01, 0xb5, 0xb3, 0xb4, 0xa0, 0xa4, 0x1f, 0x9c, 0x87, 0xb0, 0x0b, 0xaa, 0x59, 0x89, - 0x6d, 0x82, 0xa9, 0xa6, 0x70, 0x69, 0x5d, 0x96, 0x16, 0x05, 0xcc, 0x12, 0x4f, 0x30, 0xb5, 0xd6, - 0xa7, 0xd2, 0x0e, 0x1a, 0x60, 0x73, 0x84, 0x08, 0xb5, 0x3d, 0xec, 0xbb, 0x1e, 0xb5, 0x87, 0x1e, - 0x0a, 0x5c, 0xec, 0xf0, 0x7b, 0x16, 0xad, 0x87, 0x0c, 0x3a, 0xe2, 0x48, 0x57, 0x00, 0xed, 0x6f, - 0x0a, 0xd8, 0xec, 0x32, 0x9f, 0x01, 0x99, 0x90, 0xf7, 0xfc, 0xfd, 0xb8, 0x19, 0x0b, 0x6c, 0x0c, - 0xd3, 0xb0, 0x2d, 0xde, 0x35, 0xf1, 0xb3, 0xbb, 0xec, 0x67, 0x41, 0xc0, 0x5c, 0xb9, 0xba, 0x69, - 0x16, 0xac, 0xfa, 0x70, 0x3e, 0xfc, 0xcf, 0xde, 0x3c, 0x50, 0x3a, 0x13, 0x8d, 0x03, 0x0f, 0x41, - 0x25, 0x53, 0x4b, 0x7c, 0xec, 0xc8, 0x3e, 0x92, 0x06, 0xcb, 0x9d, 0x24, 0x1e, 0xf2, 0x2c, 0xd8, - 0x00, 0x65, 0x12, 0x9e, 0xd3, 0xcf, 0x28, 0xc6, 0xfc, 0xc8, 0x8a, 0x95, 0xed, 0xdb, 0xbf, 0xd7, - 0xc0, 0xea, 0x09, 0x9b, 0x23, 0xf8, 0x1a, 0x94, 0x12, 0xad, 0xe4, 0x98, 0x2d, 0x63, 0x71, 0xd6, - 0x8c, 0xc4, 0x54, 0x72, 0x44, 0xca, 0x87, 0xcf, 0x41, 0x79, 0xe8, 0x21, 0x3f, 0xb0, 0x7d, 0x71, - 0xa7, 0x8a, 0xa9, 0xce, 0x6e, 0x9a, 0xa5, 0x2e, 0x8b, 0xf5, 0x7b, 0x56, 0x89, 0x83, 0x7d, 0x07, - 0x3e, 0x03, 0x35, 0x3f, 0xf0, 0xa9, 0x8f, 0x46, 0x49, 0x25, 0xb4, 0x1a, 0xaf, 0x40, 0x35, 0x89, - 0x8a, 0x22, 0xc0, 0x17, 0x80, 0x97, 0x44, 0xb4, 0x59, 0xca, 0x2c, 0x72, 0x66, 0x9d, 0x01, 0xbc, - 0x8f, 0x12, 0xae, 0x05, 0xaa, 0x12, 0xd7, 0x77, 0xb4, 0x95, 0x65, 0xef, 0xe2, 0xa9, 0x78, 0x56, - 0xbf, 0x67, 0x6e, 0x32, 0xef, 0xb3, 0x9b, 0xa6, 0x7a, 0x9c, 0x4a, 0xf5, 0x7b, 0x96, 0x9a, 0xe9, - 0xf6, 0x1d, 0x78, 0x0c, 0xea, 0x92, 0x26, 0x1b, 0x4e, 0x6d, 0x95, 0xab, 0x36, 0x0c, 0x31, 0xb9, - 0x46, 0x3a, 0xb9, 0xc6, 0x69, 0x3a, 0xb9, 0x66, 0x99, 0xc9, 0x5e, 0xfe, 0x6c, 0x2a, 0x56, 0x35, - 0xd3, 0x62, 0x28, 0x7c, 0x0b, 0xea, 0x01, 0xbe, 0xa0, 0x76, 0xd6, 0xac, 0x44, 0x5b, 0xbb, 0x57, - 0x7b, 0xd7, 0x58, 0x5a, 0x3e, 0x29, 0xf0, 0x00, 0x00, 0x49, 0xa3, 0x74, 0x2f, 0x0d, 0x29, 0x83, - 0x19, 0xe1, 0xd7, 0x92, 0x44, 0xca, 0xf7, 0x33, 0xc2, 0xd2, 0x24, 0x23, 0x5d, 0xa0, 0xcb, 0xdd, - 0x9c, 0xeb, 0x65, 0x8d, 0x5d, 0xe1, 0x8f, 0xb5, 0x9d, 0x37, 0x76, 0x9e, 0x9d, 0xb4, 0xf8, 0x5f, - 0xc7, 0x0c, 0xfc, 0xe7, 0x98, 0xbd, 0x03, 0x4f, 0xe7, 0xc6, 0x6c, 0x41, 0x3f, 0xb3, 0xa7, 0x72, - 0x7b, 0x2d, 0x69, 0xee, 0xe6, 0x85, 0x52, 0x8f, 0x69, 0x23, 0xc6, 0x98, 0x4c, 0x46, 0x94, 0xd8, - 0x1e, 0x22, 0x9e, 0xb6, 0xde, 0x52, 0xf6, 0xd6, 0x45, 0x23, 0x5a, 0x22, 0x7e, 0x84, 0x88, 0x07, - 0xb7, 0x40, 0x19, 0x45, 0x91, 0xa0, 0x54, 0x39, 0xa5, 0x84, 0xa2, 0x88, 0x41, 0xe6, 0x87, 0xab, - 0x99, 0xae, 0x5c, 0xcf, 0x74, 0xe5, 0xd7, 0x4c, 0x57, 0x2e, 0x6f, 0xf5, 0xc2, 0xf5, 0xad, 0x5e, - 0xf8, 0x7e, 0xab, 0x17, 0x3e, 0xbe, 0x72, 0x7d, 0xea, 0x4d, 0x06, 0xc6, 0x30, 0x1c, 0x77, 0xe4, - 0x3f, 0x25, 0x5f, 0x8a, 0x8f, 0x6d, 0xf1, 0x4b, 0x1c, 0xac, 0xf1, 0xf8, 0xcb, 0x3f, 0x01, 0x00, - 0x00, 0xff, 0xff, 0xa5, 0x17, 0xac, 0x23, 0x2d, 0x07, 0x00, 0x00, + // 805 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcd, 0x8e, 0xe3, 0x44, + 0x10, 0x8e, 0xc9, 0x6e, 0x7e, 0xca, 0x93, 0x64, 0xb7, 0x07, 0x21, 0x6f, 0x96, 0x75, 0xb2, 0xe1, + 0x47, 0x23, 0x0e, 0x8e, 0xb4, 0x1c, 0x10, 0x97, 0x95, 0xd6, 0x09, 0xb0, 0x91, 0x56, 0x08, 0x3c, + 0xa3, 0x39, 0x70, 0xb1, 0x3a, 0x71, 0x8f, 0x6d, 0x91, 0xd8, 0x96, 0xbb, 0x13, 0x86, 0x07, 0xe0, + 0x3e, 0x57, 0xde, 0x68, 0x8e, 0x73, 0x44, 0x1c, 0x06, 0xc8, 0xbc, 0x08, 0xea, 0x1f, 0xdb, 0x9d, + 0x84, 0x91, 0x06, 0xed, 0xad, 0x5d, 0xf5, 0xd5, 0x57, 0x5f, 0x55, 0x57, 0xb5, 0xe1, 0x63, 0x46, + 0x92, 0x80, 0xe4, 0xab, 0x38, 0x61, 0x63, 0xca, 0x30, 0x23, 0x63, 0xf6, 0x6b, 0x46, 0xa8, 0x93, + 0xe5, 0x29, 0x4b, 0xd1, 0x93, 0xca, 0xeb, 0x08, 0x6f, 0xff, 0xc3, 0x30, 0x0d, 0x53, 0xe1, 0x1c, + 0xf3, 0x93, 0xc4, 0xf5, 0x9f, 0x6b, 0x2c, 0x78, 0xbe, 0x88, 0x75, 0x92, 0xbe, 0x9e, 0x42, 0xd8, + 0x77, 0xbc, 0xc3, 0x03, 0xef, 0x06, 0x2f, 0xe3, 0x00, 0xb3, 0x34, 0x57, 0x88, 0x17, 0x07, 0x88, + 0x0c, 0xe7, 0x78, 0x55, 0x10, 0xd8, 0x9a, 0x7b, 0x43, 0x72, 0x1a, 0xa7, 0xc9, 0x4e, 0x82, 0x41, + 0x98, 0xa6, 0xe1, 0x92, 0x8c, 0xc5, 0xd7, 0x7c, 0x7d, 0x31, 0x66, 0xf1, 0x8a, 0x50, 0x86, 0x57, + 0x99, 0x04, 0x8c, 0xfe, 0x34, 0xa0, 0xf3, 0xc6, 0x9d, 0xcc, 0x3c, 0x42, 0xb3, 0x34, 0xa1, 0x84, + 0xa2, 0x09, 0x98, 0x01, 0x59, 0xc6, 0x1b, 0x92, 0xfb, 0xec, 0x92, 0x5a, 0xc6, 0xb0, 0x7e, 0x62, + 0xbe, 0x1a, 0x39, 0x5a, 0x33, 0x78, 0x91, 0x4e, 0x11, 0x30, 0x95, 0xd8, 0xb3, 0x4b, 0x0f, 0x82, + 0xe2, 0x48, 0xd1, 0x6b, 0x68, 0x93, 0x24, 0xf0, 0xe7, 0xcb, 0x74, 0xf1, 0xb3, 0xf5, 0xc1, 0xd0, + 0x38, 0x31, 0x5f, 0xbd, 0xbc, 0x97, 0xe2, 0x9b, 0x24, 0x70, 0x39, 0xd0, 0x6b, 0x11, 0x75, 0x42, + 0x53, 0x30, 0xe7, 0x24, 0x8c, 0x13, 0xc5, 0x50, 0x17, 0x0c, 0x9f, 0xdc, 0xcb, 0xe0, 0x72, 0xac, + 0xe4, 0x80, 0x79, 0x79, 0x1e, 0xfd, 0x66, 0x40, 0xf7, 0xbc, 0x68, 0x28, 0x9d, 0x25, 0x17, 0x29, + 0x9a, 0x40, 0xa7, 0x6c, 0xb1, 0x4f, 0x09, 0xb3, 0x0c, 0x41, 0x6d, 0xeb, 0xd4, 0xb2, 0x81, 0x65, + 0xe0, 0x29, 0x61, 0xde, 0xd1, 0x46, 0xfb, 0x42, 0x0e, 0x1c, 0x2f, 0x31, 0x65, 0x7e, 0x44, 0xe2, + 0x30, 0x62, 0xfe, 0x22, 0xc2, 0x49, 0x48, 0x02, 0x51, 0x67, 0xdd, 0x7b, 0xca, 0x5d, 0x6f, 0x85, + 0x67, 0x22, 0x1d, 0xa3, 0xdf, 0x0d, 0x38, 0x9e, 0x70, 0x9d, 0x09, 0x5d, 0xd3, 0x1f, 0xc4, 0xfd, + 0x09, 0x31, 0x1e, 0x3c, 0x59, 0x14, 0x66, 0x5f, 0xde, 0xab, 0xd2, 0xf3, 0xf2, 0x50, 0xcf, 0x1e, + 0x81, 0xfb, 0xe8, 0xfa, 0x76, 0x50, 0xf3, 0x7a, 0x8b, 0x5d, 0xf3, 0xff, 0xd6, 0x46, 0xe1, 0xe9, + 0xce, 0xfd, 0x0b, 0x61, 0xdf, 0x42, 0x97, 0xf7, 0xd7, 0xcf, 0x0b, 0xab, 0x92, 0x35, 0x70, 0xf6, + 0x77, 0xc2, 0xd9, 0x09, 0xf6, 0x3a, 0x3c, 0xac, 0x9a, 0xa5, 0x8f, 0xa0, 0x21, 0x75, 0xa8, 0xfc, + 0xea, 0x6b, 0x14, 0x41, 0xf3, 0x5c, 0x4e, 0x2b, 0x7a, 0x03, 0xed, 0xb2, 0x04, 0x95, 0xe5, 0x85, + 0x9e, 0x45, 0x4d, 0x75, 0x55, 0xbe, 0x2a, 0xbc, 0x8a, 0x42, 0x7d, 0x68, 0xd1, 0xf4, 0x82, 0xfd, + 0x82, 0x73, 0x22, 0xf2, 0xb4, 0xbd, 0xf2, 0x7b, 0xf4, 0x4f, 0x03, 0x1e, 0x9f, 0x72, 0xa1, 0xe8, + 0x6b, 0x68, 0x2a, 0x2e, 0x95, 0xe6, 0xd9, 0x61, 0x31, 0x4a, 0x94, 0x4a, 0x51, 0xe0, 0xd1, 0xe7, + 0xd0, 0x5a, 0x44, 0x38, 0x4e, 0xfc, 0x58, 0x36, 0xb2, 0xed, 0x9a, 0xdb, 0xdb, 0x41, 0x73, 0xc2, + 0x6d, 0xb3, 0xa9, 0xd7, 0x14, 0xce, 0x59, 0x80, 0x3e, 0x83, 0x6e, 0x9c, 0xc4, 0x2c, 0xc6, 0x4b, + 0xd5, 0x7e, 0xab, 0x2b, 0xca, 0xee, 0x28, 0xab, 0xec, 0x3c, 0xfa, 0x02, 0xc4, 0x3d, 0xc8, 0xd9, + 0x2e, 0x90, 0x75, 0x81, 0xec, 0x71, 0x87, 0x18, 0x5e, 0x85, 0xf5, 0xa0, 0xa3, 0x61, 0xe3, 0xc0, + 0x7a, 0x74, 0xa8, 0x5d, 0xce, 0x87, 0x88, 0x9a, 0x4d, 0xdd, 0x63, 0xae, 0x7d, 0x7b, 0x3b, 0x30, + 0xdf, 0x15, 0x54, 0xb3, 0xa9, 0x67, 0x96, 0xbc, 0xb3, 0x00, 0xbd, 0x83, 0x9e, 0xc6, 0xc9, 0x5f, + 0x04, 0xeb, 0xb1, 0x60, 0xed, 0x3b, 0xf2, 0xb9, 0x70, 0x8a, 0xe7, 0xc2, 0x39, 0x2b, 0x9e, 0x0b, + 0xb7, 0xc5, 0x69, 0xaf, 0xfe, 0x1a, 0x18, 0x5e, 0xa7, 0xe4, 0xe2, 0x5e, 0xf4, 0x1d, 0xf4, 0x12, + 0x72, 0xc9, 0xfc, 0x72, 0x43, 0xa8, 0xd5, 0x78, 0xd0, 0x4e, 0x75, 0x79, 0x58, 0xb5, 0x9e, 0xe8, + 0x35, 0x80, 0xc6, 0xd1, 0x7c, 0x10, 0x87, 0x16, 0xc1, 0x85, 0x88, 0xb2, 0x34, 0x92, 0xd6, 0xc3, + 0x84, 0xf0, 0x30, 0x4d, 0xc8, 0x04, 0x6c, 0x7d, 0x85, 0x2a, 0xbe, 0x72, 0x9b, 0xda, 0xe2, 0xb2, + 0x9e, 0x57, 0xdb, 0x54, 0x45, 0xab, 0xbd, 0xfa, 0xcf, 0xdd, 0x86, 0xf7, 0xdc, 0xed, 0xef, 0xe1, + 0xd3, 0x9d, 0xdd, 0xde, 0xe3, 0x2f, 0xe5, 0x99, 0x42, 0xde, 0x50, 0x5b, 0xf6, 0x5d, 0xa2, 0x42, + 0x63, 0x31, 0x88, 0x39, 0xa1, 0xeb, 0x25, 0xa3, 0x7e, 0x84, 0x69, 0x64, 0x1d, 0x0d, 0x8d, 0x93, + 0x23, 0x39, 0x88, 0x9e, 0xb4, 0xbf, 0xc5, 0x34, 0x42, 0xcf, 0xa0, 0x85, 0xb3, 0x4c, 0x42, 0x3a, + 0x02, 0xd2, 0xc4, 0x59, 0xc6, 0x5d, 0xee, 0x8f, 0xd7, 0x5b, 0xdb, 0xb8, 0xd9, 0xda, 0xc6, 0xdf, + 0x5b, 0xdb, 0xb8, 0xba, 0xb3, 0x6b, 0x37, 0x77, 0x76, 0xed, 0x8f, 0x3b, 0xbb, 0xf6, 0xd3, 0x57, + 0x61, 0xcc, 0xa2, 0xf5, 0xdc, 0x59, 0xa4, 0xab, 0xb1, 0xfe, 0x23, 0xab, 0x8e, 0xf2, 0x6f, 0xba, + 0xff, 0x1f, 0x9e, 0x37, 0x84, 0xfd, 0xcb, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x1a, 0xb9, + 0x2e, 0xa2, 0x07, 0x00, 0x00, } func (m *ABCIResponses) Marshal() (dAtA []byte, err error) { @@ -612,6 +668,46 @@ func (m *ConsensusParamsInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ABCIResponsesInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ABCIResponsesInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ABCIResponsesInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 + } + if m.AbciResponses != nil { + { + size, err := m.AbciResponses.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *Version) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -747,12 +843,12 @@ func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x32 } - n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):]) - if err10 != nil { - return 0, err10 + n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):]) + if err11 != nil { + return 0, err11 } - i -= n10 - i = encodeVarintTypes(dAtA, i, uint64(n10)) + i -= n11 + i = encodeVarintTypes(dAtA, i, uint64(n11)) i-- dAtA[i] = 0x2a { @@ -854,6 +950,22 @@ func (m *ConsensusParamsInfo) Size() (n int) { return n } +func (m *ABCIResponsesInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AbciResponses != nil { + l = m.AbciResponses.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + func (m *Version) Size() (n int) { if m == nil { return 0 @@ -1291,6 +1403,111 @@ func (m *ConsensusParamsInfo) Unmarshal(dAtA []byte) error { } return nil } +func (m *ABCIResponsesInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ABCIResponsesInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ABCIResponsesInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AbciResponses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AbciResponses == nil { + m.AbciResponses = &ABCIResponses{} + } + if err := m.AbciResponses.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Version) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/proto/tendermint/state/types.proto b/proto/tendermint/state/types.proto index 919da91e5..f3fdc0ef3 100644 --- a/proto/tendermint/state/types.proto +++ b/proto/tendermint/state/types.proto @@ -32,6 +32,11 @@ message ConsensusParamsInfo { int64 last_height_changed = 2; } +message ABCIResponsesInfo { + ABCIResponses abci_responses = 1; + int64 height = 2; +} + message Version { tendermint.version.Consensus consensus = 1 [(gogoproto.nullable) = false]; string software = 2; diff --git a/proto/tendermint/types/evidence.proto b/proto/tendermint/types/evidence.proto index 3b234571b..451b8dca3 100644 --- a/proto/tendermint/types/evidence.proto +++ b/proto/tendermint/types/evidence.proto @@ -17,20 +17,20 @@ message Evidence { // DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes. message DuplicateVoteEvidence { - tendermint.types.Vote vote_a = 1; - tendermint.types.Vote vote_b = 2; - int64 total_voting_power = 3; - int64 validator_power = 4; - google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + tendermint.types.Vote vote_a = 1; + tendermint.types.Vote vote_b = 2; + int64 total_voting_power = 3; + int64 validator_power = 4; + google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; } // LightClientAttackEvidence contains evidence of a set of validators attempting to mislead a light client. message LightClientAttackEvidence { - tendermint.types.LightBlock conflicting_block = 1; - int64 common_height = 2; + tendermint.types.LightBlock conflicting_block = 1; + int64 common_height = 2; repeated tendermint.types.Validator byzantine_validators = 3; - int64 total_voting_power = 4; - google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + int64 total_voting_power = 4; + google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; } message EvidenceList { diff --git a/proto/tendermint/types/types.proto b/proto/tendermint/types/types.proto index 7f7ea74ca..8d4f00972 100644 --- a/proto/tendermint/types/types.proto +++ b/proto/tendermint/types/types.proto @@ -106,10 +106,10 @@ message Vote { // Commit contains the evidence that a block was committed by a set of validators. message Commit { - int64 height = 1; - int32 round = 2; - BlockID block_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"]; - repeated CommitSig signatures = 4 [(gogoproto.nullable) = false]; + int64 height = 1; + int32 round = 2; + BlockID block_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"]; + repeated CommitSig signatures = 4 [(gogoproto.nullable) = false]; } // CommitSig is a part of the Vote included in a Commit. diff --git a/proxy/mocks/app_conn_consensus.go b/proxy/mocks/app_conn_consensus.go index 5acd8cac9..d70cd7cc1 100644 --- a/proxy/mocks/app_conn_consensus.go +++ b/proxy/mocks/app_conn_consensus.go @@ -141,13 +141,13 @@ func (_m *AppConnConsensus) SetResponseCallback(_a0 abcicli.Callback) { _m.Called(_a0) } -type NewAppConnConsensusT interface { +type mockConstructorTestingTNewAppConnConsensus interface { mock.TestingT Cleanup(func()) } // NewAppConnConsensus creates a new instance of AppConnConsensus. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAppConnConsensus(t NewAppConnConsensusT) *AppConnConsensus { +func NewAppConnConsensus(t mockConstructorTestingTNewAppConnConsensus) *AppConnConsensus { mock := &AppConnConsensus{} mock.Mock.Test(t) diff --git a/proxy/mocks/app_conn_mempool.go b/proxy/mocks/app_conn_mempool.go index 87128ce85..05e23dd43 100644 --- a/proxy/mocks/app_conn_mempool.go +++ b/proxy/mocks/app_conn_mempool.go @@ -102,13 +102,13 @@ func (_m *AppConnMempool) SetResponseCallback(_a0 abcicli.Callback) { _m.Called(_a0) } -type NewAppConnMempoolT interface { +type mockConstructorTestingTNewAppConnMempool interface { mock.TestingT Cleanup(func()) } // NewAppConnMempool creates a new instance of AppConnMempool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAppConnMempool(t NewAppConnMempoolT) *AppConnMempool { +func NewAppConnMempool(t mockConstructorTestingTNewAppConnMempool) *AppConnMempool { mock := &AppConnMempool{} mock.Mock.Test(t) diff --git a/proxy/mocks/app_conn_query.go b/proxy/mocks/app_conn_query.go index 6560ca0b5..544ab765e 100644 --- a/proxy/mocks/app_conn_query.go +++ b/proxy/mocks/app_conn_query.go @@ -96,13 +96,13 @@ func (_m *AppConnQuery) QuerySync(_a0 types.RequestQuery) (*types.ResponseQuery, return r0, r1 } -type NewAppConnQueryT interface { +type mockConstructorTestingTNewAppConnQuery interface { mock.TestingT Cleanup(func()) } // NewAppConnQuery creates a new instance of AppConnQuery. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAppConnQuery(t NewAppConnQueryT) *AppConnQuery { +func NewAppConnQuery(t mockConstructorTestingTNewAppConnQuery) *AppConnQuery { mock := &AppConnQuery{} mock.Mock.Test(t) diff --git a/proxy/mocks/app_conn_snapshot.go b/proxy/mocks/app_conn_snapshot.go index a90cd798a..e3d5cb6cd 100644 --- a/proxy/mocks/app_conn_snapshot.go +++ b/proxy/mocks/app_conn_snapshot.go @@ -119,13 +119,13 @@ func (_m *AppConnSnapshot) OfferSnapshotSync(_a0 types.RequestOfferSnapshot) (*t return r0, r1 } -type NewAppConnSnapshotT interface { +type mockConstructorTestingTNewAppConnSnapshot interface { mock.TestingT Cleanup(func()) } // NewAppConnSnapshot creates a new instance of AppConnSnapshot. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAppConnSnapshot(t NewAppConnSnapshotT) *AppConnSnapshot { +func NewAppConnSnapshot(t mockConstructorTestingTNewAppConnSnapshot) *AppConnSnapshot { mock := &AppConnSnapshot{} mock.Mock.Test(t) diff --git a/proxy/mocks/client_creator.go b/proxy/mocks/client_creator.go index 759b17957..eced0aeff 100644 --- a/proxy/mocks/client_creator.go +++ b/proxy/mocks/client_creator.go @@ -35,13 +35,13 @@ func (_m *ClientCreator) NewABCIClient() (abcicli.Client, error) { return r0, r1 } -type NewClientCreatorT interface { +type mockConstructorTestingTNewClientCreator interface { mock.TestingT Cleanup(func()) } // NewClientCreator creates a new instance of ClientCreator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewClientCreator(t NewClientCreatorT) *ClientCreator { +func NewClientCreator(t mockConstructorTestingTNewClientCreator) *ClientCreator { mock := &ClientCreator{} mock.Mock.Test(t) diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index 64c3cf727..4fec87f2f 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -39,24 +39,24 @@ the example for more details. Example: - c, err := New("http://192.168.1.10:26657", "/websocket") - if err != nil { - // handle error - } + c, err := New("http://192.168.1.10:26657", "/websocket") + if err != nil { + // handle error + } - // call Start/Stop if you're subscribing to events - err = c.Start() - if err != nil { - // handle error - } - defer c.Stop() + // call Start/Stop if you're subscribing to events + err = c.Start() + if err != nil { + // handle error + } + defer c.Stop() - res, err := c.Status() - if err != nil { - // handle error - } + res, err := c.Status() + if err != nil { + // handle error + } - // handle result + // handle result */ type HTTP struct { remote string diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index ed911ec20..ec3a358cd 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -47,7 +47,6 @@ var _ client.Client = Client{} // Call is used by recorders to save a call and response. // It can also be used to configure mock responses. -// type Call struct { Name string Args interface{} diff --git a/rpc/core/blocks_test.go b/rpc/core/blocks_test.go index 161978d7b..80d0643d2 100644 --- a/rpc/core/blocks_test.go +++ b/rpc/core/blocks_test.go @@ -81,7 +81,9 @@ func TestBlockResults(t *testing.T) { } env = &Environment{} - env.StateStore = sm.NewStore(dbm.NewMemDB()) + env.StateStore = sm.NewStore(dbm.NewMemDB(), sm.StoreOptions{ + DiscardABCIResponses: false, + }) err := env.StateStore.SaveABCIResponses(100, results) require.NoError(t, err) env.BlockStore = mockBlockStore{height: 100} diff --git a/rpc/core/env.go b/rpc/core/env.go index 11a51bfe7..e92319937 100644 --- a/rpc/core/env.go +++ b/rpc/core/env.go @@ -69,7 +69,7 @@ type peers interface { Peers() p2p.IPeerSet } -//---------------------------------------------- +// ---------------------------------------------- // Environment contains objects and interfaces used by the RPC. It is expected // to be setup once during startup. type Environment struct { diff --git a/rpc/jsonrpc/doc.go b/rpc/jsonrpc/doc.go index b014fe38d..eddee24ca 100644 --- a/rpc/jsonrpc/doc.go +++ b/rpc/jsonrpc/doc.go @@ -1,7 +1,7 @@ // HTTP RPC server supporting calls via uri params, jsonrpc over HTTP, and jsonrpc over // websockets // -// Client Requests +// # Client Requests // // Suppose we want to expose the rpc function `HelloWorld(name string, num int)`. // @@ -9,12 +9,12 @@ // // As a GET request, it would have URI encoded parameters, and look like: // -// curl 'http://localhost:8008/hello_world?name="my_world"&num=5' +// curl 'http://localhost:8008/hello_world?name="my_world"&num=5' // // Note the `'` around the url, which is just so bash doesn't ignore the quotes in `"my_world"`. // This should also work: // -// curl http://localhost:8008/hello_world?name=\"my_world\"&num=5 +// curl http://localhost:8008/hello_world?name=\"my_world\"&num=5 // // A GET request to `/` returns a list of available endpoints. // For those which take arguments, the arguments will be listed in order, with `_` where the actual value should be. @@ -23,20 +23,19 @@ // // As a POST request, we use JSONRPC. For instance, the same request would have this as the body: // -// { -// "jsonrpc": "2.0", -// "id": "anything", -// "method": "hello_world", -// "params": { -// "name": "my_world", -// "num": 5 -// } -// } +// { +// "jsonrpc": "2.0", +// "id": "anything", +// "method": "hello_world", +// "params": { +// "name": "my_world", +// "num": 5 +// } +// } // // With the above saved in file `data.json`, we can make the request with // -// curl --data @data.json http://localhost:8008 -// +// curl --data @data.json http://localhost:8008 // // WebSocket (JSONRPC) // @@ -44,42 +43,42 @@ // Websocket connections are available at their own endpoint, typically `/websocket`, // though this is configurable when starting the server. // -// Server Definition +// # Server Definition // // Define some types and routes: // -// type ResultStatus struct { -// Value string -// } +// type ResultStatus struct { +// Value string +// } // // Define some routes // -// var Routes = map[string]*rpcserver.RPCFunc{ -// "status": rpcserver.NewRPCFunc(Status, "arg"), -// } +// var Routes = map[string]*rpcserver.RPCFunc{ +// "status": rpcserver.NewRPCFunc(Status, "arg"), +// } // // An rpc function: // -// func Status(v string) (*ResultStatus, error) { -// return &ResultStatus{v}, nil -// } +// func Status(v string) (*ResultStatus, error) { +// return &ResultStatus{v}, nil +// } // // Now start the server: // -// mux := http.NewServeMux() -// rpcserver.RegisterRPCFuncs(mux, Routes) -// wm := rpcserver.NewWebsocketManager(Routes) -// mux.HandleFunc("/websocket", wm.WebsocketHandler) -// logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) -// listener, err := rpc.Listen("0.0.0.0:8080", rpcserver.Config{}) -// if err != nil { panic(err) } -// go rpcserver.Serve(listener, mux, logger) +// mux := http.NewServeMux() +// rpcserver.RegisterRPCFuncs(mux, Routes) +// wm := rpcserver.NewWebsocketManager(Routes) +// mux.HandleFunc("/websocket", wm.WebsocketHandler) +// logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) +// listener, err := rpc.Listen("0.0.0.0:8080", rpcserver.Config{}) +// if err != nil { panic(err) } +// go rpcserver.Serve(listener, mux, logger) // // Note that unix sockets are supported as well (eg. `/path/to/socket` instead of `0.0.0.0:8008`) // Now see all available endpoints by sending a GET request to `0.0.0.0:8008`. // Each route is available as a GET request, as a JSONRPCv2 POST request, and via JSONRPCv2 over websockets. // -// Examples +// # Examples // // - [Tendermint](https://github.com/tendermint/tendermint/blob/master/rpc/core/routes.go) package jsonrpc diff --git a/rpc/jsonrpc/server/http_json_handler.go b/rpc/jsonrpc/server/http_json_handler.go index 28dfcbf8a..8724ab8e4 100644 --- a/rpc/jsonrpc/server/http_json_handler.go +++ b/rpc/jsonrpc/server/http_json_handler.go @@ -176,8 +176,9 @@ func arrayParamsToArgs( // array. // // Example: -// rpcFunc.args = [rpctypes.Context string] -// rpcFunc.argNames = ["arg"] +// +// rpcFunc.args = [rpctypes.Context string] +// rpcFunc.argNames = ["arg"] func jsonParamsToArgs(rpcFunc *RPCFunc, raw []byte) ([]reflect.Value, error) { const argsOffset = 1 diff --git a/rpc/jsonrpc/server/http_server.go b/rpc/jsonrpc/server/http_server.go index f653e6cc6..6eaa0ab93 100644 --- a/rpc/jsonrpc/server/http_server.go +++ b/rpc/jsonrpc/server/http_server.go @@ -53,10 +53,11 @@ func DefaultConfig() *Config { func Serve(listener net.Listener, handler http.Handler, logger log.Logger, config *Config) error { logger.Info("serve", "msg", log.NewLazySprintf("Starting RPC HTTP server on %s", listener.Addr())) s := &http.Server{ - Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), - ReadTimeout: config.ReadTimeout, - WriteTimeout: config.WriteTimeout, - MaxHeaderBytes: config.MaxHeaderBytes, + Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), + ReadTimeout: config.ReadTimeout, + ReadHeaderTimeout: config.ReadTimeout, + WriteTimeout: config.WriteTimeout, + MaxHeaderBytes: config.MaxHeaderBytes, } err := s.Serve(listener) logger.Info("RPC HTTP server stopped", "err", err) @@ -78,10 +79,11 @@ func ServeTLS( logger.Info("serve tls", "msg", log.NewLazySprintf("Starting RPC HTTPS server on %s (cert: %q, key: %q)", listener.Addr(), certFile, keyFile)) s := &http.Server{ - Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), - ReadTimeout: config.ReadTimeout, - WriteTimeout: config.WriteTimeout, - MaxHeaderBytes: config.MaxHeaderBytes, + Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), + ReadTimeout: config.ReadTimeout, + ReadHeaderTimeout: config.ReadTimeout, + WriteTimeout: config.WriteTimeout, + MaxHeaderBytes: config.MaxHeaderBytes, } err := s.ServeTLS(listener, certFile, keyFile) diff --git a/rpc/jsonrpc/types/types.go b/rpc/jsonrpc/types/types.go index ca7dd3de9..33eb0a6c9 100644 --- a/rpc/jsonrpc/types/types.go +++ b/rpc/jsonrpc/types/types.go @@ -215,15 +215,17 @@ func (resp RPCResponse) String() string { } // From the JSON-RPC 2.0 spec: +// // If there was an error in detecting the id in the Request object (e.g. Parse -// error/Invalid Request), it MUST be Null. +// error/Invalid Request), it MUST be Null. func RPCParseError(err error) RPCResponse { return NewRPCErrorResponse(nil, -32700, "Parse error. Invalid JSON", err.Error()) } // From the JSON-RPC 2.0 spec: +// // If there was an error in detecting the id in the Request object (e.g. Parse -// error/Invalid Request), it MUST be Null. +// error/Invalid Request), it MUST be Null. func RPCInvalidRequestError(id jsonrpcid, err error) RPCResponse { return NewRPCErrorResponse(id, -32600, "Invalid Request", err.Error()) } @@ -276,9 +278,12 @@ type Context struct { // RemoteAddr returns the remote address (usually a string "IP:port"). // If neither HTTPReq nor WSConn is set, an empty string is returned. // HTTP: -// http.Request#RemoteAddr +// +// http.Request#RemoteAddr +// // WS: -// result of GetRemoteAddr +// +// result of GetRemoteAddr func (ctx *Context) RemoteAddr() string { if ctx.HTTPReq != nil { return ctx.HTTPReq.RemoteAddr @@ -291,10 +296,13 @@ func (ctx *Context) RemoteAddr() string { // Context returns the request's context. // The returned context is always non-nil; it defaults to the background context. // HTTP: -// The context is canceled when the client's connection closes, the request -// is canceled (with HTTP/2), or when the ServeHTTP method returns. +// +// The context is canceled when the client's connection closes, the request +// is canceled (with HTTP/2), or when the ServeHTTP method returns. +// // WS: -// The context is canceled when the client's connections closes. +// +// The context is canceled when the client's connections closes. func (ctx *Context) Context() context.Context { if ctx.HTTPReq != nil { return ctx.HTTPReq.Context() @@ -307,7 +315,6 @@ func (ctx *Context) Context() context.Context { //---------------------------------------- // SOCKETS -// // Determine if its a unix or tcp socket. // If tcp, must specify the port; `0.0.0.0` will return incorrectly as "unix" since there's no port // TODO: deprecate diff --git a/state/errors.go b/state/errors.go index 6e0cdfa47..38c581f7d 100644 --- a/state/errors.go +++ b/state/errors.go @@ -1,6 +1,9 @@ package state -import "fmt" +import ( + "errors" + "fmt" +) type ( ErrInvalidBlock error @@ -99,3 +102,5 @@ func (e ErrNoConsensusParamsForHeight) Error() string { func (e ErrNoABCIResponsesForHeight) Error() string { return fmt.Sprintf("could not find results for height #%d", e.Height) } + +var ErrABCIResponsesNotPersisted = errors.New("node is not persisting abci responses") diff --git a/state/execution_test.go b/state/execution_test.go index 3d7fa93ab..ece092400 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -41,7 +41,9 @@ func TestApplyBlock(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, _ := makeState(1, 1) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mmock.Mempool{}, sm.EmptyEvidencePool{}) @@ -67,7 +69,9 @@ func TestBeginBlockValidators(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // no need to check error again state, stateDB, _ := makeState(2, 2) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) prevHash := state.LastBlockID.Hash prevParts := types.PartSetHeader{} @@ -130,7 +134,9 @@ func TestBeginBlockByzantineValidators(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, privVals := makeState(1, 1) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) defaultEvidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) privVal := privVals[state.Validators.Validators[0].Address.String()] @@ -354,7 +360,9 @@ func TestEndBlockValidatorUpdates(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, _ := makeState(1, 1) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) blockExec := sm.NewBlockExecutor( stateStore, @@ -425,7 +433,9 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, _ := makeState(1, 1) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) blockExec := sm.NewBlockExecutor( stateStore, log.TestingLogger(), diff --git a/state/export_test.go b/state/export_test.go index 56c3d764c..f6ca0e9cd 100644 --- a/state/export_test.go +++ b/state/export_test.go @@ -43,6 +43,6 @@ func ValidateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, params tmproto // SaveValidatorsInfo is an alias for the private saveValidatorsInfo method in // store.go, exported exclusively and explicitly for testing. func SaveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *types.ValidatorSet) error { - stateStore := dbStore{db} + stateStore := dbStore{db, StoreOptions{DiscardABCIResponses: false}} return stateStore.saveValidatorsInfo(height, lastHeightChanged, valSet) } diff --git a/state/helpers_test.go b/state/helpers_test.go index 19549f160..5bf20255d 100644 --- a/state/helpers_test.go +++ b/state/helpers_test.go @@ -115,7 +115,9 @@ func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValida }) stateDB := dbm.NewMemDB() - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) if err := stateStore.Save(s); err != nil { panic(err) } diff --git a/state/indexer/mocks/block_indexer.go b/state/indexer/mocks/block_indexer.go index 583e64d3a..2c0f0ecb0 100644 --- a/state/indexer/mocks/block_indexer.go +++ b/state/indexer/mocks/block_indexer.go @@ -75,13 +75,13 @@ func (_m *BlockIndexer) Search(ctx context.Context, q *query.Query) ([]int64, er return r0, r1 } -type NewBlockIndexerT interface { +type mockConstructorTestingTNewBlockIndexer interface { mock.TestingT Cleanup(func()) } // NewBlockIndexer creates a new instance of BlockIndexer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlockIndexer(t NewBlockIndexerT) *BlockIndexer { +func NewBlockIndexer(t mockConstructorTestingTNewBlockIndexer) *BlockIndexer { mock := &BlockIndexer{} mock.Mock.Test(t) diff --git a/state/mocks/block_store.go b/state/mocks/block_store.go index 20e6a9b16..4493a6e3f 100644 --- a/state/mocks/block_store.go +++ b/state/mocks/block_store.go @@ -193,13 +193,13 @@ func (_m *BlockStore) Size() int64 { return r0 } -type NewBlockStoreT interface { +type mockConstructorTestingTNewBlockStore interface { mock.TestingT Cleanup(func()) } // NewBlockStore creates a new instance of BlockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlockStore(t NewBlockStoreT) *BlockStore { +func NewBlockStore(t mockConstructorTestingTNewBlockStore) *BlockStore { mock := &BlockStore{} mock.Mock.Test(t) diff --git a/state/mocks/evidence_pool.go b/state/mocks/evidence_pool.go index b4d57d04d..7279d36f7 100644 --- a/state/mocks/evidence_pool.go +++ b/state/mocks/evidence_pool.go @@ -70,13 +70,13 @@ func (_m *EvidencePool) Update(_a0 state.State, _a1 types.EvidenceList) { _m.Called(_a0, _a1) } -type NewEvidencePoolT interface { +type mockConstructorTestingTNewEvidencePool interface { mock.TestingT Cleanup(func()) } // NewEvidencePool creates a new instance of EvidencePool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEvidencePool(t NewEvidencePoolT) *EvidencePool { +func NewEvidencePool(t mockConstructorTestingTNewEvidencePool) *EvidencePool { mock := &EvidencePool{} mock.Mock.Test(t) diff --git a/state/mocks/store.go b/state/mocks/store.go index d02eedb6d..8cbe49080 100644 --- a/state/mocks/store.go +++ b/state/mocks/store.go @@ -153,6 +153,29 @@ func (_m *Store) LoadFromDBOrGenesisFile(_a0 string) (state.State, error) { return r0, r1 } +// LoadLastABCIResponse provides a mock function with given fields: _a0 +func (_m *Store) LoadLastABCIResponse(_a0 int64) (*tendermintstate.ABCIResponses, error) { + ret := _m.Called(_a0) + + var r0 *tendermintstate.ABCIResponses + if rf, ok := ret.Get(0).(func(int64) *tendermintstate.ABCIResponses); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*tendermintstate.ABCIResponses) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // LoadValidators provides a mock function with given fields: _a0 func (_m *Store) LoadValidators(_a0 int64) (*tenderminttypes.ValidatorSet, error) { ret := _m.Called(_a0) @@ -218,13 +241,13 @@ func (_m *Store) SaveABCIResponses(_a0 int64, _a1 *tendermintstate.ABCIResponses return r0 } -type NewStoreT interface { +type mockConstructorTestingTNewStore interface { mock.TestingT Cleanup(func()) } // NewStore creates a new instance of Store. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewStore(t NewStoreT) *Store { +func NewStore(t mockConstructorTestingTNewStore) *Store { mock := &Store{} mock.Mock.Test(t) diff --git a/state/rollback_test.go b/state/rollback_test.go index 3428b0ef8..4cd203810 100644 --- a/state/rollback_test.go +++ b/state/rollback_test.go @@ -82,7 +82,10 @@ func TestRollback(t *testing.T) { } func TestRollbackNoState(t *testing.T) { - stateStore := state.NewStore(dbm.NewMemDB()) + stateStore := state.NewStore(dbm.NewMemDB(), + state.StoreOptions{ + DiscardABCIResponses: false, + }) blockStore := &mocks.BlockStore{} _, _, err := state.Rollback(blockStore, stateStore) @@ -115,7 +118,7 @@ func TestRollbackDifferentStateHeight(t *testing.T) { } func setupStateStore(t *testing.T, height int64) state.Store { - stateStore := state.NewStore(dbm.NewMemDB()) + stateStore := state.NewStore(dbm.NewMemDB(), state.StoreOptions{DiscardABCIResponses: false}) valSet, _ := types.RandValidatorSet(5, 10) params := types.DefaultConsensusParams() diff --git a/state/state.go b/state/state.go index a1df48d45..1cba8e270 100644 --- a/state/state.go +++ b/state/state.go @@ -17,7 +17,7 @@ import ( "github.com/tendermint/tendermint/version" ) -// database keys +// database key var ( stateKey = []byte("stateKey") ) diff --git a/state/state_test.go b/state/state_test.go index b4e33626a..cec4dfd9a 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -29,7 +29,9 @@ func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { config := cfg.ResetTestRoot("state_") dbType := dbm.BackendType(config.DBBackend) stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) require.NoError(t, err) state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) assert.NoError(t, err, "expected no error on LoadStateFromDBOrGenesisFile") @@ -76,7 +78,9 @@ func TestMakeGenesisStateNilValidators(t *testing.T) { func TestStateSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) assert := assert.New(t) state.LastBlockHeight++ @@ -95,7 +99,9 @@ func TestStateSaveLoad(t *testing.T) { func TestABCIResponsesSaveLoad1(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) assert := assert.New(t) state.LastBlockHeight++ @@ -128,7 +134,9 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { defer tearDown(t) assert := assert.New(t) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) cases := [...]struct { // Height is implied to equal index+2, @@ -216,7 +224,9 @@ func TestValidatorSimpleSaveLoad(t *testing.T) { defer tearDown(t) assert := assert.New(t) - statestore := sm.NewStore(stateDB) + statestore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) // Can't load anything for height 0. _, err := statestore.LoadValidators(0) @@ -249,7 +259,9 @@ func TestValidatorSimpleSaveLoad(t *testing.T) { func TestOneValidatorChangesSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) // Change vals at these heights. changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20} @@ -901,7 +913,9 @@ func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) { const valSetSize = 2 tearDown, stateDB, state := setupTestCase(t) t.Cleanup(func() { tearDown(t) }) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state.Validators = genValSet(valSetSize) state.NextValidators = state.Validators.CopyIncrementProposerPriority(1) err := stateStore.Save(state) @@ -926,7 +940,9 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { const valSetSize = 7 tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) require.Equal(t, int64(0), state.LastBlockHeight) state.Validators = genValSet(valSetSize) state.NextValidators = state.Validators.CopyIncrementProposerPriority(1) @@ -990,7 +1006,9 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) // Change vals at these heights. changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20} diff --git a/state/store.go b/state/store.go index 428460db8..0a8a0be12 100644 --- a/state/store.go +++ b/state/store.go @@ -37,6 +37,10 @@ func calcABCIResponsesKey(height int64) []byte { return []byte(fmt.Sprintf("abciResponsesKey:%v", height)) } +func calcLastABCIResponsesKey(height int64) []byte { + return []byte(fmt.Sprintf("lastABCIResponsesKey:%v", height)) +} + //---------------------- //go:generate ../scripts/mockery_generate.sh Store @@ -58,6 +62,8 @@ type Store interface { LoadValidators(int64) (*types.ValidatorSet, error) // LoadABCIResponses loads the abciResponse for a given height LoadABCIResponses(int64) (*tmstate.ABCIResponses, error) + // LoadLastABCIResponse loads the last abciResponse for a given height + LoadLastABCIResponse(int64) (*tmstate.ABCIResponses, error) // LoadConsensusParams loads the consensus params for a given height LoadConsensusParams(int64) (tmproto.ConsensusParams, error) // Save overwrites the previous state with the updated one @@ -75,13 +81,24 @@ type Store interface { // dbStore wraps a db (github.com/tendermint/tm-db) type dbStore struct { db dbm.DB + + StoreOptions +} + +type StoreOptions struct { + + // DiscardABCIResponses determines whether or not the store + // retains all ABCIResponses. If DiscardABCiResponses is enabled, + // the store will maintain only the response object from the latest + // height. + DiscardABCIResponses bool } var _ Store = (*dbStore)(nil) // NewStore creates the dbStore of the state pkg. -func NewStore(db dbm.DB) Store { - return dbStore{db} +func NewStore(db dbm.DB, options StoreOptions) Store { + return dbStore{db, options} } // LoadStateFromDBOrGenesisFile loads the most recent state from the database, @@ -358,12 +375,13 @@ func ABCIResponsesResultsHash(ar *tmstate.ABCIResponses) []byte { } // LoadABCIResponses loads the ABCIResponses for the given height from the -// database. If not found, ErrNoABCIResponsesForHeight is returned. -// -// This is useful for recovering from crashes where we called app.Commit and -// before we called s.Save(). It can also be used to produce Merkle proofs of -// the result of txs. +// database. If the node has DiscardABCIResponses set to true, ErrABCIResponsesNotPersisted +// is persisted. If not found, ErrNoABCIResponsesForHeight is returned. func (store dbStore) LoadABCIResponses(height int64) (*tmstate.ABCIResponses, error) { + if store.DiscardABCIResponses { + return nil, ErrABCIResponsesNotPersisted + } + buf, err := store.db.Get(calcABCIResponsesKey(height)) if err != nil { return nil, err @@ -385,12 +403,43 @@ func (store dbStore) LoadABCIResponses(height int64) (*tmstate.ABCIResponses, er return abciResponses, nil } +// LoadLastABCIResponses loads the ABCIResponses from the most recent height. +// The height parameter is used to ensure that the response corresponds to the latest height. +// If not, an error is returned. +// +// This method is used for recovering in the case that we called the Commit ABCI +// method on the application but crashed before persisting the results. +func (store dbStore) LoadLastABCIResponse(height int64) (*tmstate.ABCIResponses, error) { + bz, err := store.db.Get(calcLastABCIResponsesKey(height)) + if err != nil { + return nil, err + } + + if len(bz) == 0 { + return nil, errors.New("no last ABCI response has been persisted") + } + + abciResponse := new(tmstate.ABCIResponsesInfo) + err = abciResponse.Unmarshal(bz) + if err != nil { + tmos.Exit(fmt.Sprintf(`LoadLastABCIResponses: Data has been corrupted or its spec has + changed: %v\n`, err)) + } + + // Here we validate the result by comparing its height to the expected height. + if height != abciResponse.GetHeight() { + return nil, errors.New("expected height %d but last stored abci responses was at height %d") + } + + return abciResponse.AbciResponses, nil +} + // SaveABCIResponses persists the ABCIResponses to the database. // This is useful in case we crash after app.Commit and before s.Save(). // Responses are indexed by height so they can also be loaded later to produce // Merkle proofs. // -// Exposed for testing. +// CONTRACT: height must be monotonically increasing every time this is called. func (store dbStore) SaveABCIResponses(height int64, abciResponses *tmstate.ABCIResponses) error { var dtxs []*abci.ResponseDeliverTx // strip nil values, @@ -401,17 +450,30 @@ func (store dbStore) SaveABCIResponses(height int64, abciResponses *tmstate.ABCI } abciResponses.DeliverTxs = dtxs - bz, err := abciResponses.Marshal() + // If the flag is false then we save the ABCIResponse. This can be used for the /BlockResults + // query or to reindex an event using the command line. + if !store.DiscardABCIResponses { + bz, err := abciResponses.Marshal() + if err != nil { + return err + } + if err := store.db.Set(calcABCIResponsesKey(height), bz); err != nil { + return err + } + } + + // We always save the last ABCI response for crash recovery. + // This overwrites the previous saved ABCI Response. + response := &tmstate.ABCIResponsesInfo{ + AbciResponses: abciResponses, + Height: height, + } + bz, err := response.Marshal() if err != nil { return err } - err = store.db.SetSync(calcABCIResponsesKey(height), bz) - if err != nil { - return err - } - - return nil + return store.db.SetSync(calcLastABCIResponsesKey(height), bz) } //----------------------------------------------------------------------------- @@ -471,7 +533,7 @@ func loadValidatorsInfo(db dbm.DB, height int64) (*tmstate.ValidatorsInfo, error } if len(buf) == 0 { - return nil, errors.New("value retrieved from db is empty") + return nil, errors.New("no last ABCI response has been persisted") } v := new(tmstate.ValidatorsInfo) @@ -479,7 +541,7 @@ func loadValidatorsInfo(db dbm.DB, height int64) (*tmstate.ValidatorsInfo, error if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED tmos.Exit(fmt.Sprintf(`LoadValidators: Data has been corrupted or its spec has changed: - %v\n`, err)) + %v\n`, err)) } // TODO: ensure that buf is completely read. @@ -557,7 +619,7 @@ func (store dbStore) loadConsensusParamsInfo(height int64) (*tmstate.ConsensusPa return nil, err } if len(buf) == 0 { - return nil, errors.New("value retrieved from db is empty") + return nil, errors.New("no last ABCI response has been persisted") } paramsInfo := new(tmstate.ConsensusParamsInfo) diff --git a/state/store_test.go b/state/store_test.go index e43921519..b91eeb578 100644 --- a/state/store_test.go +++ b/state/store_test.go @@ -23,7 +23,9 @@ import ( func TestStoreLoadValidators(t *testing.T) { stateDB := dbm.NewMemDB() - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) val, _ := types.RandValidator(true, 10) vals := types.NewValidatorSet([]*types.Validator{val}) @@ -54,7 +56,9 @@ func BenchmarkLoadValidators(b *testing.B) { dbType := dbm.BackendType(config.DBBackend) stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) require.NoError(b, err) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) if err != nil { b.Fatal(err) @@ -107,7 +111,9 @@ func TestPruneStates(t *testing.T) { tc := tc t.Run(name, func(t *testing.T) { db := dbm.NewMemDB() - stateStore := sm.NewStore(db) + stateStore := sm.NewStore(db, sm.StoreOptions{ + DiscardABCIResponses: false, + }) pk := ed25519.GenPrivKey().PubKey() // Generate a bunch of state data. Validators change for heights ending with 3, and @@ -229,3 +235,72 @@ func sliceToMap(s []int64) map[int64]bool { } return m } + +func TestLastABCIResponses(t *testing.T) { + // create an empty state store. + t.Run("Not persisting responses", func(t *testing.T) { + stateDB := dbm.NewMemDB() + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) + responses, err := stateStore.LoadABCIResponses(1) + require.Error(t, err) + require.Nil(t, responses) + // stub the abciresponses. + response1 := &tmstate.ABCIResponses{ + BeginBlock: &abci.ResponseBeginBlock{}, + DeliverTxs: []*abci.ResponseDeliverTx{ + {Code: 32, Data: []byte("Hello"), Log: "Huh?"}, + }, + EndBlock: &abci.ResponseEndBlock{}, + } + // create new db and state store and set discard abciresponses to false. + stateDB = dbm.NewMemDB() + stateStore = sm.NewStore(stateDB, sm.StoreOptions{DiscardABCIResponses: false}) + height := int64(10) + // save the last abci response. + err = stateStore.SaveABCIResponses(height, response1) + require.NoError(t, err) + // search for the last abciresponse and check if it has saved. + lastResponse, err := stateStore.LoadLastABCIResponse(height) + require.NoError(t, err) + // check to see if the saved response height is the same as the loaded height. + assert.Equal(t, lastResponse, response1) + // use an incorret height to make sure the state store errors. + _, err = stateStore.LoadLastABCIResponse(height + 1) + assert.Error(t, err) + // check if the abci response didnt save in the abciresponses. + responses, err = stateStore.LoadABCIResponses(height) + require.NoError(t, err, responses) + require.Equal(t, response1, responses) + }) + + t.Run("persisting responses", func(t *testing.T) { + stateDB := dbm.NewMemDB() + height := int64(10) + // stub the second abciresponse. + response2 := &tmstate.ABCIResponses{ + BeginBlock: &abci.ResponseBeginBlock{}, + DeliverTxs: []*abci.ResponseDeliverTx{ + {Code: 44, Data: []byte("Hello again"), Log: "????"}, + }, + EndBlock: &abci.ResponseEndBlock{}, + } + // create a new statestore with the responses on. + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: true, + }) + // save an additional response. + err := stateStore.SaveABCIResponses(height+1, response2) + require.NoError(t, err) + // check to see if the response saved by calling the last response. + lastResponse2, err := stateStore.LoadLastABCIResponse(height + 1) + require.NoError(t, err) + // check to see if the saved response height is the same as the loaded height. + assert.Equal(t, response2, lastResponse2) + // should error as we are no longer saving the response. + _, err = stateStore.LoadABCIResponses(height + 1) + assert.Equal(t, sm.ErrABCIResponsesNotPersisted, err) + }) + +} diff --git a/state/tx_filter_test.go b/state/tx_filter_test.go index 7936d94c7..d5ab761ac 100644 --- a/state/tx_filter_test.go +++ b/state/tx_filter_test.go @@ -33,7 +33,9 @@ func TestTxFilter(t *testing.T) { for i, tc := range testCases { stateDB, err := dbm.NewDB("state", "memdb", os.TempDir()) require.NoError(t, err) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) require.NoError(t, err) diff --git a/state/txindex/mocks/tx_indexer.go b/state/txindex/mocks/tx_indexer.go index 210942e2e..93d0eb9c2 100644 --- a/state/txindex/mocks/tx_indexer.go +++ b/state/txindex/mocks/tx_indexer.go @@ -92,13 +92,13 @@ func (_m *TxIndexer) Search(ctx context.Context, q *query.Query) ([]*types.TxRes return r0, r1 } -type NewTxIndexerT interface { +type mockConstructorTestingTNewTxIndexer interface { mock.TestingT Cleanup(func()) } // NewTxIndexer creates a new instance of TxIndexer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTxIndexer(t NewTxIndexerT) *TxIndexer { +func NewTxIndexer(t mockConstructorTestingTNewTxIndexer) *TxIndexer { mock := &TxIndexer{} mock.Mock.Test(t) diff --git a/state/validation_test.go b/state/validation_test.go index afd47a650..ddca0d98c 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -28,7 +28,9 @@ func TestValidateBlockHeader(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, privVals := makeState(3, 1) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) blockExec := sm.NewBlockExecutor( stateStore, log.TestingLogger(), @@ -99,7 +101,9 @@ func TestValidateBlockCommit(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, privVals := makeState(1, 1) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) blockExec := sm.NewBlockExecutor( stateStore, log.TestingLogger(), @@ -213,7 +217,9 @@ func TestValidateBlockEvidence(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, privVals := makeState(4, 1) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) defaultEvidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) evpool := &mocks.EvidencePool{} diff --git a/statesync/mocks/state_provider.go b/statesync/mocks/state_provider.go index 5029f615e..f52b9e33d 100644 --- a/statesync/mocks/state_provider.go +++ b/statesync/mocks/state_provider.go @@ -83,13 +83,13 @@ func (_m *StateProvider) State(ctx context.Context, height uint64) (state.State, return r0, r1 } -type NewStateProviderT interface { +type mockConstructorTestingTNewStateProvider interface { mock.TestingT Cleanup(func()) } // NewStateProvider creates a new instance of StateProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewStateProvider(t NewStateProviderT) *StateProvider { +func NewStateProvider(t mockConstructorTestingTNewStateProvider) *StateProvider { mock := &StateProvider{} mock.Mock.Test(t) diff --git a/store/store.go b/store/store.go index 6f27d27d1..48fd1c97e 100644 --- a/store/store.go +++ b/store/store.go @@ -17,9 +17,9 @@ import ( BlockStore is a simple low level store for blocks. There are three types of information stored: - - BlockMeta: Meta information about each block - - Block part: Parts of each block, aggregated w/ PartSet - - Commit: The commit part of each block, for gossiping precommit votes + - BlockMeta: Meta information about each block + - Block part: Parts of each block, aggregated w/ PartSet + - Commit: The commit part of each block, for gossiping precommit votes Currently the precommit signatures are duplicated in the Block parts as well as the Commit. In the future this may change, perhaps by moving @@ -325,9 +325,10 @@ func (bs *BlockStore) PruneBlocks(height int64) (uint64, error) { // SaveBlock persists the given block, blockParts, and seenCommit to the underlying db. // blockParts: Must be parts of the block // seenCommit: The +2/3 precommits that were seen which committed at height. -// If all the nodes restart after committing a block, -// we need this to reload the precommits to catch-up nodes to the -// most recent height. Otherwise they'd stall at H-1. +// +// If all the nodes restart after committing a block, +// we need this to reload the precommits to catch-up nodes to the +// most recent height. Otherwise they'd stall at H-1. func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { if block == nil { panic("BlockStore can only save a non-nil block") diff --git a/store/store_test.go b/store/store_test.go index ea07c73e6..c9ea3e925 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -60,7 +60,9 @@ func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFu // stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB()) blockDB := dbm.NewMemDB() stateDB := dbm.NewMemDB() - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) if err != nil { panic(fmt.Errorf("error constructing state from genesis file: %w", err)) @@ -369,7 +371,9 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { func TestLoadBaseMeta(t *testing.T) { config := cfg.ResetTestRoot("blockchain_reactor_test") defer os.RemoveAll(config.RootDir) - stateStore := sm.NewStore(dbm.NewMemDB()) + stateStore := sm.NewStore(dbm.NewMemDB(), sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) require.NoError(t, err) bs := NewBlockStore(dbm.NewMemDB()) @@ -425,7 +429,9 @@ func TestLoadBlockPart(t *testing.T) { func TestPruneBlocks(t *testing.T) { config := cfg.ResetTestRoot("blockchain_reactor_test") defer os.RemoveAll(config.RootDir) - stateStore := sm.NewStore(dbm.NewMemDB()) + stateStore := sm.NewStore(dbm.NewMemDB(), sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) require.NoError(t, err) db := dbm.NewMemDB() diff --git a/test/e2e/app/state.go b/test/e2e/app/state.go index 1ede6fb4c..c10e74e4f 100644 --- a/test/e2e/app/state.go +++ b/test/e2e/app/state.go @@ -1,4 +1,4 @@ -//nolint: gosec +// nolint: gosec package app import ( diff --git a/test/e2e/generator/main.go b/test/e2e/generator/main.go index f17b4f3f4..e3154f900 100644 --- a/test/e2e/generator/main.go +++ b/test/e2e/generator/main.go @@ -1,4 +1,4 @@ -//nolint: gosec +// nolint: gosec package main import ( diff --git a/test/e2e/node/config.go b/test/e2e/node/config.go index 7efb4e822..52cc56111 100644 --- a/test/e2e/node/config.go +++ b/test/e2e/node/config.go @@ -1,4 +1,4 @@ -//nolint: goconst +// nolint: goconst package main import ( diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index 4d46de922..7b95a4cd2 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -1,4 +1,4 @@ -//nolint: gosec +// nolint: gosec package e2e import ( diff --git a/test/e2e/runner/exec.go b/test/e2e/runner/exec.go index f790f7fc1..15204c73d 100644 --- a/test/e2e/runner/exec.go +++ b/test/e2e/runner/exec.go @@ -1,4 +1,4 @@ -//nolint: gosec +// nolint: gosec package main import ( diff --git a/test/maverick/consensus/replay_file.go b/test/maverick/consensus/replay_file.go index 2dbc5cf37..9e4f2e17b 100644 --- a/test/maverick/consensus/replay_file.go +++ b/test/maverick/consensus/replay_file.go @@ -298,7 +298,9 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo if err != nil { tmos.Exit(err.Error()) } - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile()) if err != nil { tmos.Exit(err.Error()) diff --git a/test/maverick/consensus/state.go b/test/maverick/consensus/state.go index e67868214..84c2ffcb0 100644 --- a/test/maverick/consensus/state.go +++ b/test/maverick/consensus/state.go @@ -236,7 +236,9 @@ func (cs *State) handleMsg(mi msgInfo) { // Enter (CreateEmptyBlocks): from enterNewRound(height,round) // Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ): -// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval +// +// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval +// // Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool func (cs *State) enterPropose(height int64, round int32) { logger := cs.Logger.With("height", height, "round", round) @@ -1103,7 +1105,9 @@ func (cs *State) handleTxsAvailable() { // Used internally by handleTimeout and handleMsg to make state transitions // Enter: `timeoutNewHeight` by startTime (commitTime+timeoutCommit), -// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1) +// +// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1) +// // Enter: `timeoutPrecommits` after any +2/3 precommits from (height,round-1) // Enter: +2/3 precommits for nil at (height,round-1) // Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round) diff --git a/test/maverick/consensus/wal_generator.go b/test/maverick/consensus/wal_generator.go index 6997db14e..4e3cc5924 100644 --- a/test/maverick/consensus/wal_generator.go +++ b/test/maverick/consensus/wal_generator.go @@ -49,7 +49,9 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { } blockStoreDB := db.NewMemDB() stateDB := blockStoreDB - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, err := sm.MakeGenesisState(genDoc) if err != nil { return fmt.Errorf("failed to make genesis state: %w", err) diff --git a/test/maverick/node/node.go b/test/maverick/node/node.go index 919554c6d..822967158 100644 --- a/test/maverick/node/node.go +++ b/test/maverick/node/node.go @@ -97,6 +97,8 @@ type DBContext struct { // DBProvider takes a DBContext and returns an instantiated DB. type DBProvider func(*DBContext) (dbm.DB, error) +const readHeaderTimeout = 10 * time.Second + // DefaultDBProvider returns a database using the DBBackend and DBDir // specified in the ctx.Config. func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) { @@ -174,12 +176,12 @@ type fastSyncReactor interface { // WARNING: using any name from the below list of the existing reactors will // result in replacing it with the custom one. // -// - MEMPOOL -// - BLOCKCHAIN -// - CONSENSUS -// - EVIDENCE -// - PEX -// - STATESYNC +// - MEMPOOL +// - BLOCKCHAIN +// - CONSENSUS +// - EVIDENCE +// - PEX +// - STATESYNC func CustomReactors(reactors map[string]p2p.Reactor) Option { return func(n *Node) { for name, reactor := range reactors { @@ -439,8 +441,11 @@ func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider, if err != nil { return nil, nil, err } + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: config.RPC.DiscardABCIResponses, + }) evidenceLogger := logger.With("module", "evidence") - evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB), blockStore) + evidencePool, err := evidence.NewPool(evidenceDB, stateStore, blockStore) if err != nil { return nil, nil, err } @@ -729,7 +734,9 @@ func NewNode(config *cfg.Config, return nil, err } - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider) if err != nil { @@ -1215,6 +1222,7 @@ func (n *Node) startPrometheusServer(addr string) *http.Server { promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections}, ), ), + ReadHeaderTimeout: readHeaderTimeout, } go func() { if err := srv.ListenAndServe(); err != http.ErrServerClosed { @@ -1396,7 +1404,9 @@ func LoadStateFromDBOrGenesisDocProvider( // was changed, accidentally or not). Also good for audit trail. saveGenesisDoc(stateDB, genDoc) } - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) if err != nil { return sm.State{}, nil, err diff --git a/types/block_test.go b/types/block_test.go index 2355cb0f1..dc0579fec 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -200,7 +200,7 @@ func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) BlockID { var nilBytes []byte -// This follows RFC-6962, i.e. `echo -n '' | sha256sum` +// This follows RFC-6962, i.e. `echo -n ” | sha256sum` var emptyBytes = []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55} diff --git a/types/event_bus.go b/types/event_bus.go index 3cebcf680..2506efa83 100644 --- a/types/event_bus.go +++ b/types/event_bus.go @@ -227,7 +227,7 @@ func (b *EventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpd return b.Publish(EventValidatorSetUpdates, data) } -//----------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- type NopEventBus struct{} func (NopEventBus) Subscribe( diff --git a/types/utils.go b/types/utils.go index cec47e202..60e82fe3f 100644 --- a/types/utils.go +++ b/types/utils.go @@ -4,9 +4,9 @@ import "reflect" // Go lacks a simple and safe way to see if something is a typed nil. // See: -// - https://dave.cheney.net/2017/08/09/typed-nils-in-go-2 -// - https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I/discussion -// - https://github.com/golang/go/issues/21538 +// - https://dave.cheney.net/2017/08/09/typed-nils-in-go-2 +// - https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I/discussion +// - https://github.com/golang/go/issues/21538 func isTypedNil(o interface{}) bool { rv := reflect.ValueOf(o) switch rv.Kind() { diff --git a/types/validator_set.go b/types/validator_set.go index 5b2ec85a5..39a004b0b 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -411,14 +411,17 @@ func processChanges(origChanges []*Validator) (updates, removals []*Validator, e // // Inputs: // updates - a list of proper validator changes, i.e. they have been verified by processChanges for duplicates -// and invalid values. +// +// and invalid values. +// // vals - the original validator set. Note that vals is NOT modified by this function. // removedPower - the total voting power that will be removed after the updates are verified and applied. // // Returns: // tvpAfterUpdatesBeforeRemovals - the new total voting power if these updates would be applied without the removals. -// Note that this will be < 2 * MaxTotalVotingPower in case high power validators are removed and -// validators are added/ updated with high power values. +// +// Note that this will be < 2 * MaxTotalVotingPower in case high power validators are removed and +// validators are added/ updated with high power values. // // err - non-nil if the maximum allowed total voting power would be exceeded func verifyUpdates( @@ -467,8 +470,9 @@ func numNewValidators(updates []*Validator, vals *ValidatorSet) int { // 'updates' parameter must be a list of unique validators to be added or updated. // // 'updatedTotalVotingPower' is the total voting power of a set where all updates would be applied but -// not the removals. It must be < 2*MaxTotalVotingPower and may be close to this limit if close to -// MaxTotalVotingPower will be removed. This is still safe from overflow since MaxTotalVotingPower is maxInt64/8. +// +// not the removals. It must be < 2*MaxTotalVotingPower and may be close to this limit if close to +// MaxTotalVotingPower will be removed. This is still safe from overflow since MaxTotalVotingPower is maxInt64/8. // // No changes are made to the validator set 'vals'. func computeNewPriorities(updates []*Validator, vals *ValidatorSet, updatedTotalVotingPower int64) { @@ -638,14 +642,15 @@ func (vals *ValidatorSet) updateWithChangeSet(changes []*Validator, allowDeletes // UpdateWithChangeSet attempts to update the validator set with 'changes'. // It performs the following steps: -// - validates the changes making sure there are no duplicates and splits them in updates and deletes -// - verifies that applying the changes will not result in errors -// - computes the total voting power BEFORE removals to ensure that in the next steps the priorities -// across old and newly added validators are fair -// - computes the priorities of new validators against the final set -// - applies the updates against the validator set -// - applies the removals against the validator set -// - performs scaling and centering of priority values +// - validates the changes making sure there are no duplicates and splits them in updates and deletes +// - verifies that applying the changes will not result in errors +// - computes the total voting power BEFORE removals to ensure that in the next steps the priorities +// across old and newly added validators are fair +// - computes the priorities of new validators against the final set +// - applies the updates against the validator set +// - applies the removals against the validator set +// - performs scaling and centering of priority values +// // If an error is detected during verification steps, it is returned and the validator set // is not changed. func (vals *ValidatorSet) UpdateWithChangeSet(changes []*Validator) error { diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 67a9a96ea..6fbbb0885 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -1641,7 +1641,7 @@ func TestValidatorSetProtoBuf(t *testing.T) { } } -//--------------------- +// --------------------- // Sort validators by priority and address type validatorsByPriority []*Validator @@ -1682,9 +1682,8 @@ func (tvals testValsByVotingPower) Swap(i, j int) { tvals[i], tvals[j] = tvals[j], tvals[i] } -//------------------------------------- +// ------------------------------------- // Benchmark tests -// func BenchmarkUpdates(b *testing.B) { const ( n = 100 diff --git a/types/vote_set.go b/types/vote_set.go index abdc18e61..9686a580e 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -25,38 +25,38 @@ const ( type P2PID string /* - VoteSet helps collect signatures from validators at each height+round for a - predefined vote type. +VoteSet helps collect signatures from validators at each height+round for a +predefined vote type. - We need VoteSet to be able to keep track of conflicting votes when validators - double-sign. Yet, we can't keep track of *all* the votes seen, as that could - be a DoS attack vector. +We need VoteSet to be able to keep track of conflicting votes when validators +double-sign. Yet, we can't keep track of *all* the votes seen, as that could +be a DoS attack vector. - There are two storage areas for votes. - 1. voteSet.votes - 2. voteSet.votesByBlock +There are two storage areas for votes. +1. voteSet.votes +2. voteSet.votesByBlock - `.votes` is the "canonical" list of votes. It always has at least one vote, - if a vote from a validator had been seen at all. Usually it keeps track of - the first vote seen, but when a 2/3 majority is found, votes for that get - priority and are copied over from `.votesByBlock`. +`.votes` is the "canonical" list of votes. It always has at least one vote, +if a vote from a validator had been seen at all. Usually it keeps track of +the first vote seen, but when a 2/3 majority is found, votes for that get +priority and are copied over from `.votesByBlock`. - `.votesByBlock` keeps track of a list of votes for a particular block. There - are two ways a &blockVotes{} gets created in `.votesByBlock`. - 1. the first vote seen by a validator was for the particular block. - 2. a peer claims to have seen 2/3 majority for the particular block. +`.votesByBlock` keeps track of a list of votes for a particular block. There +are two ways a &blockVotes{} gets created in `.votesByBlock`. +1. the first vote seen by a validator was for the particular block. +2. a peer claims to have seen 2/3 majority for the particular block. - Since the first vote from a validator will always get added in `.votesByBlock` - , all votes in `.votes` will have a corresponding entry in `.votesByBlock`. +Since the first vote from a validator will always get added in `.votesByBlock` +, all votes in `.votes` will have a corresponding entry in `.votesByBlock`. - When a &blockVotes{} in `.votesByBlock` reaches a 2/3 majority quorum, its - votes are copied into `.votes`. +When a &blockVotes{} in `.votesByBlock` reaches a 2/3 majority quorum, its +votes are copied into `.votes`. - All this is memory bounded because conflicting votes only get added if a peer - told us to track that block, each peer only gets to tell us 1 such block, and, - there's only a limited number of peers. +All this is memory bounded because conflicting votes only get added if a peer +told us to track that block, each peer only gets to tell us 1 such block, and, +there's only a limited number of peers. - NOTE: Assumes that the sum total of voting power does not exceed MaxUInt64. +NOTE: Assumes that the sum total of voting power does not exceed MaxUInt64. */ type VoteSet struct { chainID string @@ -133,8 +133,10 @@ func (voteSet *VoteSet) Size() int { // Returns added=true if vote is valid and new. // Otherwise returns err=ErrVote[ -// UnexpectedStep | InvalidIndex | InvalidAddress | -// InvalidSignature | InvalidBlockHash | ConflictingVotes ] +// +// UnexpectedStep | InvalidIndex | InvalidAddress | +// InvalidSignature | InvalidBlockHash | ConflictingVotes ] +// // Duplicate votes return added=false, err=nil. // Conflicting votes return added=*, err=ErrVoteConflictingVotes. // NOTE: vote should not be mutated after adding. @@ -636,10 +638,10 @@ func (voteSet *VoteSet) MakeCommit() *Commit { //-------------------------------------------------------------------------------- /* - Votes for a particular block - There are two ways a *blockVotes gets created for a blockKey. - 1. first (non-conflicting) vote of a validator w/ blockKey (peerMaj23=false) - 2. A peer claims to have a 2/3 majority w/ blockKey (peerMaj23=true) +Votes for a particular block +There are two ways a *blockVotes gets created for a blockKey. +1. first (non-conflicting) vote of a validator w/ blockKey (peerMaj23=false) +2. A peer claims to have a 2/3 majority w/ blockKey (peerMaj23=true) */ type blockVotes struct { peerMaj23 bool // peer claims to have maj23