Compare commits

...

6 Commits

Author SHA1 Message Date
Sam Ricotta
afaed78146 removing unnecessary formatting 2022-08-19 14:26:19 +02:00
Thane Thomson
596ca4e591 config: Move discard_abci_responses flag into its own storage section (#9275)
* config: Move discard_abci_responses flag into its own storage section

Signed-off-by: Thane Thomson <connect@thanethomson.com>

* Update config comment to highlight space saving tradeoff

Signed-off-by: Thane Thomson <connect@thanethomson.com>

Signed-off-by: Thane Thomson <connect@thanethomson.com>
2022-08-19 11:07:43 +02:00
samricotta
28b7cd6d6e Update to ABCILastResponseskey (#9253)
* update last responses key
2022-08-19 11:07:43 +02:00
samricotta
6321e4023d update default (#9235) 2022-08-19 11:07:43 +02:00
samricotta
22c68d7b41 Small update to toml.go for abci-responses (#9232)
* update to toml
2022-08-19 11:07:43 +02:00
samricotta
2ca8a8cc43 Backport of sam/abci-responses (#9090) (#9159)
*backport of sam/abci-responses

Co-authored-by: William Banfield <4561443+williambanfield@users.noreply.github.com>
2022-08-19 11:07:43 +02:00
57 changed files with 752 additions and 229 deletions

View File

@@ -31,6 +31,8 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
### IMPROVEMENTS
- [config] \#9054 Configuration flag added to permit Tendermint to discard all ABCIResponses except for the most recent
### BUG FIXES
- [consensus] \#9229 fix round number of `enterPropose` when handling `RoundStepNewRound` timeout. (@fatcat22)

View File

@@ -70,7 +70,9 @@ func newReactor(
blockDB := dbm.NewMemDB()
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
blockStore := store.NewBlockStore(blockDB)
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
@@ -83,7 +85,9 @@ func newReactor(
// pool.height is determined from the store.
fastSync := true
db := dbm.NewMemDB()
stateStore = sm.NewStore(db)
stateStore = sm.NewStore(db, sm.StoreOptions{
DiscardABCIResponses: false,
})
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
mock.Mempool{}, sm.EmptyEvidencePool{})
if err = stateStore.Save(state); err != nil {

View File

@@ -40,6 +40,9 @@ replace the backend. The default start-height is 0, meaning the tooling will sta
reindex from the base block height(inclusive); and the default end-height is 0, meaning
the tooling will reindex until the latest block height(inclusive). User can omit
either or both arguments.
Note: This operation requires ABCIResponses. Do not set DiscardABCIResponses to true if you
want to use this command.
`,
Example: `
tendermint reindex-event

View File

@@ -77,7 +77,9 @@ func loadStateAndBlockStore(config *cfg.Config) (*store.BlockStore, state.Store,
if err != nil {
return nil, nil, err
}
stateStore := state.NewStore(stateDB)
stateStore := state.NewStore(stateDB, state.StoreOptions{
DiscardABCIResponses: config.Storage.DiscardABCIResponses,
})
return blockStore, stateStore, nil
}

View File

@@ -77,6 +77,7 @@ type Config struct {
// https://github.com/tendermint/tendermint/issues/9279
DeprecatedFastSyncConfig map[interface{}]interface{} `mapstructure:"fastsync"`
Consensus *ConsensusConfig `mapstructure:"consensus"`
Storage *StorageConfig `mapstructure:"storage"`
TxIndex *TxIndexConfig `mapstructure:"tx_index"`
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
}
@@ -91,6 +92,7 @@ func DefaultConfig() *Config {
StateSync: DefaultStateSyncConfig(),
BlockSync: DefaultBlockSyncConfig(),
Consensus: DefaultConsensusConfig(),
Storage: DefaultStorageConfig(),
TxIndex: DefaultTxIndexConfig(),
Instrumentation: DefaultInstrumentationConfig(),
}
@@ -106,6 +108,7 @@ func TestConfig() *Config {
StateSync: TestStateSyncConfig(),
BlockSync: TestBlockSyncConfig(),
Consensus: TestConsensusConfig(),
Storage: TestStorageConfig(),
TxIndex: TestTxIndexConfig(),
Instrumentation: TestInstrumentationConfig(),
}
@@ -1087,11 +1090,41 @@ func (cfg *ConsensusConfig) ValidateBasic() error {
}
//-----------------------------------------------------------------------------
// StorageConfig
// StorageConfig allows more fine-grained control over certain storage-related
// behavior.
type StorageConfig struct {
// Set to false to ensure ABCI responses are persisted. ABCI responses are
// required for `/block_results` RPC queries, and to reindex events in the
// command-line tool.
DiscardABCIResponses bool `mapstructure:"discard_abci_responses"`
}
// DefaultStorageConfig returns the default configuration options relating to
// Tendermint storage optimization.
func DefaultStorageConfig() *StorageConfig {
return &StorageConfig{
DiscardABCIResponses: false,
}
}
// TestStorageConfig returns storage configuration that can be used for
// testing.
func TestStorageConfig() *StorageConfig {
return &StorageConfig{
DiscardABCIResponses: false,
}
}
// -----------------------------------------------------------------------------
// TxIndexConfig
// Remember that Event has the following structure:
// type: [
// key: value,
// ...
//
// key: value,
// ...
//
// ]
//
// CompositeKeys are constructed by `type.key`

View File

@@ -482,6 +482,16 @@ create_empty_blocks_interval = "{{ .Consensus.CreateEmptyBlocksInterval }}"
peer_gossip_sleep_duration = "{{ .Consensus.PeerGossipSleepDuration }}"
peer_query_maj23_sleep_duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
#######################################################
### Storage Configuration Options ###
#######################################################
# Set to true to discard ABCI responses from the state store, which can save a
# considerable amount of disk space. Set to false to ensure ABCI responses are
# persisted. ABCI responses are required for /block_results RPC queries, and to
# reindex events in the command-line tool.
discard_abci_responses = {{ .Storage.DiscardABCIResponses}}
#######################################################
### Transaction Indexer Configuration Options ###
#######################################################

View File

@@ -50,7 +50,9 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
for i := 0; i < nValidators; i++ {
logger := consensusLogger().With("test", "byzantine", "validator", i)
stateDB := dbm.NewMemDB() // each state needs its own db
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
defer os.RemoveAll(thisConfig.RootDir)

View File

@@ -426,7 +426,9 @@ func newStateWithConfigAndBlockStore(
// Make State
stateDB := blockDB
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
if err := stateStore.Save(state); err != nil { // for save height 1's validators info
panic(err)
}
@@ -716,7 +718,9 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
configRootDirs := make([]string, 0, nValidators)
for i := 0; i < nValidators; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
configRootDirs = append(configRootDirs, thisConfig.RootDir)
@@ -754,7 +758,9 @@ func randConsensusNetWithPeers(
configRootDirs := make([]string, 0, nPeers)
for i := 0; i < nPeers; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
configRootDirs = append(configRootDirs, thisConfig.RootDir)

View File

@@ -113,7 +113,7 @@ func deliverTxsRange(cs *State, start, end int) {
func TestMempoolTxConcurrentWithCommit(t *testing.T) {
state, privVals := randGenesisState(1, false, 10)
blockDB := dbm.NewMemDB()
stateStore := sm.NewStore(blockDB)
stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false})
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockDB)
err := stateStore.Save(state)
require.NoError(t, err)
@@ -138,7 +138,7 @@ func TestMempoolRmBadTx(t *testing.T) {
state, privVals := randGenesisState(1, false, 10)
app := NewCounterApplication()
blockDB := dbm.NewMemDB()
stateStore := sm.NewStore(blockDB)
stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false})
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB)
err := stateStore.Save(state)
require.NoError(t, err)

View File

@@ -138,7 +138,9 @@ func TestReactorWithEvidence(t *testing.T) {
logger := consensusLogger()
for i := 0; i < nValidators; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
defer os.RemoveAll(thisConfig.RootDir)

View File

@@ -418,7 +418,7 @@ func (h *Handshaker) ReplayBlocks(
case appBlockHeight == storeBlockHeight:
// We ran Commit, but didn't save the state, so replayBlock with mock app.
abciResponses, err := h.stateStore.LoadABCIResponses(storeBlockHeight)
abciResponses, err := h.stateStore.LoadLastABCIResponse(storeBlockHeight)
if err != nil {
return nil, err
}

View File

@@ -297,7 +297,9 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
if err != nil {
tmos.Exit(err.Error())
}
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile())
if err != nil {
tmos.Exit(err.Error())

View File

@@ -158,7 +158,9 @@ LOOP:
logger := log.NewNopLogger()
blockDB := dbm.NewMemDB()
stateDB := blockDB
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
require.NoError(t, err)
privValidator := loadPrivValidator(consensusReplayConfig)
@@ -692,7 +694,9 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
stateDB, genesisState, store = stateAndStore(config, pubKey, kvstore.ProtocolVersion)
}
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
store.chain = chain
store.commits = commits
@@ -711,7 +715,9 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
// use a throwaway tendermint state
proxyApp := proxy.NewAppConns(clientCreator2, proxy.NopMetrics())
stateDB1 := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB1)
stateStore := sm.NewStore(stateDB1, sm.StoreOptions{
DiscardABCIResponses: false,
})
err := stateStore.Save(genesisState)
require.NoError(t, err)
buildAppStateFromChain(proxyApp, stateStore, genesisState, chain, nBlocks, mode)
@@ -890,7 +896,9 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
pubKey, err := privVal.GetPubKey()
require.NoError(t, err)
stateDB, state, store := stateAndStore(config, pubKey, appVersion)
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile())
state.LastValidators = state.Validators.Copy()
// mode = 0 for committing all the blocks
@@ -1147,7 +1155,9 @@ func stateAndStore(
pubKey crypto.PubKey,
appVersion uint64) (dbm.DB, sm.State, *mockBlockStore) {
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile())
state.Version.Consensus.App = appVersion
store := newMockBlockStore(config, state.ConsensusParams)
@@ -1224,7 +1234,9 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
pubKey, err := privVal.GetPubKey()
require.NoError(t, err)
stateDB, state, store := stateAndStore(config, pubKey, 0x0)
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
oldValAddr := state.Validators.Validators[0].Address

View File

@@ -978,7 +978,7 @@ func (cs *State) handleTxsAvailable() {
// Used internally by handleTimeout and handleMsg to make state transitions
// Enter: `timeoutNewHeight` by startTime (commitTime+timeoutCommit),
// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1)
// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1)
// Enter: `timeoutPrecommits` after any +2/3 precommits from (height,round-1)
// Enter: +2/3 precommits for nil at (height,round-1)
// Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round)
@@ -1060,7 +1060,7 @@ func (cs *State) needProofBlock(height int64) bool {
// Enter (CreateEmptyBlocks): from enterNewRound(height,round)
// Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ):
// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval
// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval
// Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool
func (cs *State) enterPropose(height int64, round int32) {
logger := cs.Logger.With("height", height, "round", round)

View File

@@ -47,7 +47,9 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
}
blockStoreDB := db.NewMemDB()
stateDB := blockStoreDB
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
state, err := sm.MakeGenesisState(genDoc)
if err != nil {
return fmt.Errorf("failed to make genesis state: %w", err)

View File

@@ -24,7 +24,6 @@ https://bitcointalk.org/?topic=102395
* * * h6
/ \ / \ / \
h0 h1 h2 h3 h4 h5
TODO(ismail): add 2nd pre-image protection or clarify further on how we use this and why this secure.
*/

View File

@@ -97,11 +97,11 @@ func (evpool *Pool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64) {
// Update takes both the new state and the evidence committed at that height and performs
// the following operations:
// 1. Take any conflicting votes from consensus and use the state's LastBlockTime to form
// DuplicateVoteEvidence and add it to the pool.
// 2. Update the pool's state which contains evidence params relating to expiry.
// 3. Moves pending evidence that has now been committed into the committed pool.
// 4. Removes any expired evidence based on both height and time.
// 1. Take any conflicting votes from consensus and use the state's LastBlockTime to form
// DuplicateVoteEvidence and add it to the pool.
// 2. Update the pool's state which contains evidence params relating to expiry.
// 3. Moves pending evidence that has now been committed into the committed pool.
// 4. Removes any expired evidence based on both height and time.
func (evpool *Pool) Update(state sm.State, ev types.EvidenceList) {
// sanity check
if state.LastBlockHeight <= evpool.state.LastBlockHeight {

View File

@@ -348,7 +348,9 @@ func TestRecoverPendingEvidence(t *testing.T) {
func initializeStateFromValidatorSet(valSet *types.ValidatorSet, height int64) sm.Store {
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
state := sm.State{
ChainID: evidenceChainID,
InitialHeight: 1,

View File

@@ -108,7 +108,7 @@ func (evpool *Pool) verify(evidence types.Evidence) error {
// - the nodes trusted header at the same height as the conflicting header has a different hash
//
// CONTRACT: must run ValidateBasic() on the evidence before verifying
// must check that the evidence has not expired (i.e. is outside the maximum age threshold)
// must check that the evidence has not expired (i.e. is outside the maximum age threshold)
func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, trustedHeader *types.SignedHeader,
commonVals *types.ValidatorSet, now time.Time, trustPeriod time.Duration) error {
// In the case of lunatic attack there will be a different commonHeader height. Therefore the node perform a single

1
go.sum
View File

@@ -1031,6 +1031,7 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=

View File

@@ -17,7 +17,7 @@ const (
// all other modules).
//
// Example:
// ParseLogLevel("consensus:debug,mempool:debug,*:error", log.NewTMLogger(os.Stdout), "info")
// ParseLogLevel("consensus:debug,mempool:debug,*:error", log.NewTMLogger(os.Stdout), "info")
func ParseLogLevel(lvl string, logger log.Logger, defaultLogLevelValue string) (log.Logger, error) {
if lvl == "" {
return nil, errors.New("empty log level")

View File

@@ -13,12 +13,12 @@
// compatibility with e.g. Javascript (which uses 64-bit floats for numbers, having 53-bit
// precision):
//
// int32(32) // Output: 32
// uint32(32) // Output: 32
// int64(64) // Output: "64"
// uint64(64) // Output: "64"
// int(64) // Output: "64"
// uint(64) // Output: "64"
// int32(32) // Output: 32
// uint32(32) // Output: 32
// int64(64) // Output: "64"
// uint64(64) // Output: "64"
// int(64) // Output: "64"
// uint(64) // Output: "64"
//
// Encoding of other scalars follows encoding/json:
//
@@ -50,7 +50,7 @@
// Times are encoded as encoding/json, in RFC3339Nano format, but requiring UTC time zone (with zero
// times emitted as "0001-01-01T00:00:00Z" as with encoding/json):
//
// time.Date(2020, 6, 8, 16, 21, 28, 123, time.FixedZone("UTC+2", 2*60*60))
// time.Date(2020, 6, 8, 16, 21, 28, 123, time.FixedZone("UTC+2", 2*60*60))
// // Output: "2020-06-08T14:21:28.000000123Z"
// time.Time{} // Output: "0001-01-01T00:00:00Z"
// (*time.Time)(nil) // Output: null

View File

@@ -31,7 +31,6 @@
// return subscription.Err()
// }
// }
//
package pubsub
import (

View File

@@ -67,7 +67,6 @@ Example usage:
if err != nil {
// handle error
}
c, err := NewHTTPClient(
chainID,
TrustOptions{
@@ -82,7 +81,6 @@ Example usage:
if err != nil {
// handle error
}
h, err := c.TrustedHeader(100)
if err != nil {
// handle error

View File

@@ -430,7 +430,9 @@ func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
return nil, nil, err
}
evidenceLogger := logger.With("module", "evidence")
evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB), blockStore)
evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: config.Storage.DiscardABCIResponses,
}), blockStore)
if err != nil {
return nil, nil, err
}
@@ -714,7 +716,9 @@ func NewNode(config *cfg.Config,
return nil, err
}
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: config.Storage.DiscardABCIResponses,
})
state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider)
if err != nil {
@@ -1390,7 +1394,9 @@ func LoadStateFromDBOrGenesisDocProvider(
return sm.State{}, nil, err
}
}
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
if err != nil {
return sm.State{}, nil, err

View File

@@ -235,7 +235,9 @@ func TestCreateProposalBlock(t *testing.T) {
var height int64 = 1
state, stateDB, privVals := state(1, height)
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
maxBytes := 16384
var partSize uint32 = 256
maxEvidenceBytes := int64(maxBytes / 2)
@@ -340,7 +342,9 @@ func TestMaxProposalBlockSize(t *testing.T) {
var height int64 = 1
state, stateDB, _ := state(1, height)
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
var maxBytes int64 = 16384
var partSize uint32 = 256
state.ConsensusParams.Block.MaxBytes = maxBytes
@@ -464,7 +468,9 @@ func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) {
// save validators to db for 2 heights
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
if err := stateStore.Save(s); err != nil {
panic(err)
}

View File

@@ -10,9 +10,9 @@ import (
// Only Go1.10 has a proper net.Conn implementation that
// has the SetDeadline method implemented as per
// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706
// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706
// lest we run into problems like
// https://github.com/tendermint/tendermint/issues/851
// https://github.com/tendermint/tendermint/issues/851
// so for go versions < Go1.10 use our custom net.Conn creator
// that doesn't return an `Unimplemented error` for net.Conn.
// Before https://github.com/tendermint/tendermint/commit/49faa79bdce5663894b3febbf4955fb1d172df04

View File

@@ -104,7 +104,6 @@ func (ka *knownAddress) removeBucketRef(bucketIdx int) int {
All addresses that meet these criteria are assumed to be worthless and not
worth keeping hold of.
*/
func (ka *knownAddress) isBad() bool {
// Is Old --> good

View File

@@ -58,9 +58,9 @@ func TestPEXReactorAddRemovePeer(t *testing.T) {
}
// --- FAIL: TestPEXReactorRunning (11.10s)
// pex_reactor_test.go:411: expected all switches to be connected to at
// least one peer (switches: 0 => {outbound: 1, inbound: 0}, 1 =>
// {outbound: 0, inbound: 1}, 2 => {outbound: 0, inbound: 0}, )
// pex_reactor_test.go:411: expected all switches to be connected to at
// least one peer (switches: 0 => {outbound: 1, inbound: 0}, 1 =>
// {outbound: 0, inbound: 1}, 2 => {outbound: 0, inbound: 0}, )
//
// EXPLANATION: peers are getting rejected because in switch#addPeer we check
// if any peer (who we already connected to) has the same IP. Even though local

View File

@@ -75,10 +75,10 @@ message RequestQuery {
}
message RequestBeginBlock {
bytes hash = 1;
tendermint.types.Header header = 2 [(gogoproto.nullable) = false];
LastCommitInfo last_commit_info = 3 [(gogoproto.nullable) = false];
repeated Evidence byzantine_validators = 4 [(gogoproto.nullable) = false];
bytes hash = 1;
tendermint.types.Header header = 2 [(gogoproto.nullable) = false];
LastCommitInfo last_commit_info = 3 [(gogoproto.nullable) = false];
repeated Evidence byzantine_validators = 4 [(gogoproto.nullable) = false];
}
enum CheckTxType {
@@ -234,7 +234,7 @@ message ResponseDeliverTx {
}
message ResponseEndBlock {
repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable) = false];
repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable) = false];
ConsensusParams consensus_param_updates = 2;
repeated Event events = 3
[(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"];

View File

@@ -104,7 +104,7 @@ func (m *NewRoundStep) GetLastCommitRound() int32 {
}
// NewValidBlock is sent when a validator observes a valid block B in some round r,
//i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r.
// i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r.
// In case the block is also committed, then IsCommit flag is set to true.
type NewValidBlock struct {
Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"`

View File

@@ -18,7 +18,7 @@ message NewRoundStep {
}
// NewValidBlock is sent when a validator observes a valid block B in some round r,
//i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r.
// i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r.
// In case the block is also committed, then IsCommit flag is set to true.
message NewValidBlock {
int64 height = 1;

View File

@@ -199,6 +199,58 @@ func (m *ConsensusParamsInfo) GetLastHeightChanged() int64 {
return 0
}
type ABCIResponsesInfo struct {
AbciResponses *ABCIResponses `protobuf:"bytes,1,opt,name=abci_responses,json=abciResponses,proto3" json:"abci_responses,omitempty"`
Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"`
}
func (m *ABCIResponsesInfo) Reset() { *m = ABCIResponsesInfo{} }
func (m *ABCIResponsesInfo) String() string { return proto.CompactTextString(m) }
func (*ABCIResponsesInfo) ProtoMessage() {}
func (*ABCIResponsesInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_ccfacf933f22bf93, []int{3}
}
func (m *ABCIResponsesInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ABCIResponsesInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ABCIResponsesInfo.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ABCIResponsesInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_ABCIResponsesInfo.Merge(m, src)
}
func (m *ABCIResponsesInfo) XXX_Size() int {
return m.Size()
}
func (m *ABCIResponsesInfo) XXX_DiscardUnknown() {
xxx_messageInfo_ABCIResponsesInfo.DiscardUnknown(m)
}
var xxx_messageInfo_ABCIResponsesInfo proto.InternalMessageInfo
func (m *ABCIResponsesInfo) GetAbciResponses() *ABCIResponses {
if m != nil {
return m.AbciResponses
}
return nil
}
func (m *ABCIResponsesInfo) GetHeight() int64 {
if m != nil {
return m.Height
}
return 0
}
type Version struct {
Consensus version.Consensus `protobuf:"bytes,1,opt,name=consensus,proto3" json:"consensus"`
Software string `protobuf:"bytes,2,opt,name=software,proto3" json:"software,omitempty"`
@@ -208,7 +260,7 @@ func (m *Version) Reset() { *m = Version{} }
func (m *Version) String() string { return proto.CompactTextString(m) }
func (*Version) ProtoMessage() {}
func (*Version) Descriptor() ([]byte, []int) {
return fileDescriptor_ccfacf933f22bf93, []int{3}
return fileDescriptor_ccfacf933f22bf93, []int{4}
}
func (m *Version) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -284,7 +336,7 @@ func (m *State) Reset() { *m = State{} }
func (m *State) String() string { return proto.CompactTextString(m) }
func (*State) ProtoMessage() {}
func (*State) Descriptor() ([]byte, []int) {
return fileDescriptor_ccfacf933f22bf93, []int{4}
return fileDescriptor_ccfacf933f22bf93, []int{5}
}
func (m *State) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -415,6 +467,7 @@ func init() {
proto.RegisterType((*ABCIResponses)(nil), "tendermint.state.ABCIResponses")
proto.RegisterType((*ValidatorsInfo)(nil), "tendermint.state.ValidatorsInfo")
proto.RegisterType((*ConsensusParamsInfo)(nil), "tendermint.state.ConsensusParamsInfo")
proto.RegisterType((*ABCIResponsesInfo)(nil), "tendermint.state.ABCIResponsesInfo")
proto.RegisterType((*Version)(nil), "tendermint.state.Version")
proto.RegisterType((*State)(nil), "tendermint.state.State")
}
@@ -422,55 +475,58 @@ func init() {
func init() { proto.RegisterFile("tendermint/state/types.proto", fileDescriptor_ccfacf933f22bf93) }
var fileDescriptor_ccfacf933f22bf93 = []byte{
// 763 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x6f, 0xd3, 0x30,
0x14, 0x6e, 0xe8, 0xb6, 0xb6, 0xce, 0xda, 0x0e, 0x8f, 0x43, 0xd6, 0xb1, 0xb4, 0x2b, 0x3f, 0x34,
0x71, 0x48, 0xa5, 0x71, 0x40, 0x5c, 0x26, 0x2d, 0x2d, 0x62, 0x95, 0x26, 0x04, 0xd9, 0xb4, 0x03,
0x97, 0xc8, 0x6d, 0xbc, 0x24, 0xa2, 0x4d, 0xa2, 0xd8, 0x2d, 0xe3, 0x0f, 0xe0, 0xbe, 0x2b, 0xff,
0xd1, 0x8e, 0x3b, 0x22, 0x0e, 0x03, 0xba, 0x7f, 0x04, 0xd9, 0xce, 0x0f, 0xb7, 0x65, 0xd2, 0x10,
0x37, 0xfb, 0x7d, 0xdf, 0xfb, 0xfc, 0xf9, 0xf9, 0x3d, 0x19, 0x3c, 0xa6, 0x38, 0x70, 0x70, 0x3c,
0xf6, 0x03, 0xda, 0x21, 0x14, 0x51, 0xdc, 0xa1, 0x5f, 0x22, 0x4c, 0x8c, 0x28, 0x0e, 0x69, 0x08,
0x37, 0x72, 0xd4, 0xe0, 0x68, 0xe3, 0x91, 0x1b, 0xba, 0x21, 0x07, 0x3b, 0x6c, 0x25, 0x78, 0x8d,
0x6d, 0x49, 0x05, 0x0d, 0x86, 0xbe, 0x2c, 0xd2, 0x90, 0x8f, 0xe0, 0xf1, 0x39, 0xb4, 0xb5, 0x84,
0x4e, 0xd1, 0xc8, 0x77, 0x10, 0x0d, 0xe3, 0x84, 0xb1, 0xb3, 0xc4, 0x88, 0x50, 0x8c, 0xc6, 0xa9,
0x80, 0x2e, 0xc1, 0x53, 0x1c, 0x13, 0x3f, 0x0c, 0xe6, 0x0e, 0x68, 0xba, 0x61, 0xe8, 0x8e, 0x70,
0x87, 0xef, 0x06, 0x93, 0xf3, 0x0e, 0xf5, 0xc7, 0x98, 0x50, 0x34, 0x8e, 0x04, 0xa1, 0xfd, 0x43,
0x01, 0xd5, 0x43, 0xb3, 0xdb, 0xb7, 0x30, 0x89, 0xc2, 0x80, 0x60, 0x02, 0xbb, 0x40, 0x75, 0xf0,
0xc8, 0x9f, 0xe2, 0xd8, 0xa6, 0x17, 0x44, 0x53, 0x5a, 0xc5, 0x3d, 0x75, 0xbf, 0x6d, 0x48, 0xc5,
0x60, 0x97, 0x34, 0xd2, 0x84, 0x9e, 0xe0, 0x9e, 0x5e, 0x58, 0xc0, 0x49, 0x97, 0x04, 0x1e, 0x80,
0x0a, 0x0e, 0x1c, 0x7b, 0x30, 0x0a, 0x87, 0x9f, 0xb4, 0x07, 0x2d, 0x65, 0x4f, 0xdd, 0xdf, 0xbd,
0x53, 0xe2, 0x4d, 0xe0, 0x98, 0x8c, 0x68, 0x95, 0x71, 0xb2, 0x82, 0x3d, 0xa0, 0x0e, 0xb0, 0xeb,
0x07, 0x89, 0x42, 0x91, 0x2b, 0x3c, 0xb9, 0x53, 0xc1, 0x64, 0x5c, 0xa1, 0x01, 0x06, 0xd9, 0xba,
0xfd, 0x55, 0x01, 0xb5, 0xb3, 0xb4, 0xa0, 0xa4, 0x1f, 0x9c, 0x87, 0xb0, 0x0b, 0xaa, 0x59, 0x89,
0x6d, 0x82, 0xa9, 0xa6, 0x70, 0x69, 0x5d, 0x96, 0x16, 0x05, 0xcc, 0x12, 0x4f, 0x30, 0xb5, 0xd6,
0xa7, 0xd2, 0x0e, 0x1a, 0x60, 0x73, 0x84, 0x08, 0xb5, 0x3d, 0xec, 0xbb, 0x1e, 0xb5, 0x87, 0x1e,
0x0a, 0x5c, 0xec, 0xf0, 0x7b, 0x16, 0xad, 0x87, 0x0c, 0x3a, 0xe2, 0x48, 0x57, 0x00, 0xed, 0x6f,
0x0a, 0xd8, 0xec, 0x32, 0x9f, 0x01, 0x99, 0x90, 0xf7, 0xfc, 0xfd, 0xb8, 0x19, 0x0b, 0x6c, 0x0c,
0xd3, 0xb0, 0x2d, 0xde, 0x35, 0xf1, 0xb3, 0xbb, 0xec, 0x67, 0x41, 0xc0, 0x5c, 0xb9, 0xba, 0x69,
0x16, 0xac, 0xfa, 0x70, 0x3e, 0xfc, 0xcf, 0xde, 0x3c, 0x50, 0x3a, 0x13, 0x8d, 0x03, 0x0f, 0x41,
0x25, 0x53, 0x4b, 0x7c, 0xec, 0xc8, 0x3e, 0x92, 0x06, 0xcb, 0x9d, 0x24, 0x1e, 0xf2, 0x2c, 0xd8,
0x00, 0x65, 0x12, 0x9e, 0xd3, 0xcf, 0x28, 0xc6, 0xfc, 0xc8, 0x8a, 0x95, 0xed, 0xdb, 0xbf, 0xd7,
0xc0, 0xea, 0x09, 0x9b, 0x23, 0xf8, 0x1a, 0x94, 0x12, 0xad, 0xe4, 0x98, 0x2d, 0x63, 0x71, 0xd6,
0x8c, 0xc4, 0x54, 0x72, 0x44, 0xca, 0x87, 0xcf, 0x41, 0x79, 0xe8, 0x21, 0x3f, 0xb0, 0x7d, 0x71,
0xa7, 0x8a, 0xa9, 0xce, 0x6e, 0x9a, 0xa5, 0x2e, 0x8b, 0xf5, 0x7b, 0x56, 0x89, 0x83, 0x7d, 0x07,
0x3e, 0x03, 0x35, 0x3f, 0xf0, 0xa9, 0x8f, 0x46, 0x49, 0x25, 0xb4, 0x1a, 0xaf, 0x40, 0x35, 0x89,
0x8a, 0x22, 0xc0, 0x17, 0x80, 0x97, 0x44, 0xb4, 0x59, 0xca, 0x2c, 0x72, 0x66, 0x9d, 0x01, 0xbc,
0x8f, 0x12, 0xae, 0x05, 0xaa, 0x12, 0xd7, 0x77, 0xb4, 0x95, 0x65, 0xef, 0xe2, 0xa9, 0x78, 0x56,
0xbf, 0x67, 0x6e, 0x32, 0xef, 0xb3, 0x9b, 0xa6, 0x7a, 0x9c, 0x4a, 0xf5, 0x7b, 0x96, 0x9a, 0xe9,
0xf6, 0x1d, 0x78, 0x0c, 0xea, 0x92, 0x26, 0x1b, 0x4e, 0x6d, 0x95, 0xab, 0x36, 0x0c, 0x31, 0xb9,
0x46, 0x3a, 0xb9, 0xc6, 0x69, 0x3a, 0xb9, 0x66, 0x99, 0xc9, 0x5e, 0xfe, 0x6c, 0x2a, 0x56, 0x35,
0xd3, 0x62, 0x28, 0x7c, 0x0b, 0xea, 0x01, 0xbe, 0xa0, 0x76, 0xd6, 0xac, 0x44, 0x5b, 0xbb, 0x57,
0x7b, 0xd7, 0x58, 0x5a, 0x3e, 0x29, 0xf0, 0x00, 0x00, 0x49, 0xa3, 0x74, 0x2f, 0x0d, 0x29, 0x83,
0x19, 0xe1, 0xd7, 0x92, 0x44, 0xca, 0xf7, 0x33, 0xc2, 0xd2, 0x24, 0x23, 0x5d, 0xa0, 0xcb, 0xdd,
0x9c, 0xeb, 0x65, 0x8d, 0x5d, 0xe1, 0x8f, 0xb5, 0x9d, 0x37, 0x76, 0x9e, 0x9d, 0xb4, 0xf8, 0x5f,
0xc7, 0x0c, 0xfc, 0xe7, 0x98, 0xbd, 0x03, 0x4f, 0xe7, 0xc6, 0x6c, 0x41, 0x3f, 0xb3, 0xa7, 0x72,
0x7b, 0x2d, 0x69, 0xee, 0xe6, 0x85, 0x52, 0x8f, 0x69, 0x23, 0xc6, 0x98, 0x4c, 0x46, 0x94, 0xd8,
0x1e, 0x22, 0x9e, 0xb6, 0xde, 0x52, 0xf6, 0xd6, 0x45, 0x23, 0x5a, 0x22, 0x7e, 0x84, 0x88, 0x07,
0xb7, 0x40, 0x19, 0x45, 0x91, 0xa0, 0x54, 0x39, 0xa5, 0x84, 0xa2, 0x88, 0x41, 0xe6, 0x87, 0xab,
0x99, 0xae, 0x5c, 0xcf, 0x74, 0xe5, 0xd7, 0x4c, 0x57, 0x2e, 0x6f, 0xf5, 0xc2, 0xf5, 0xad, 0x5e,
0xf8, 0x7e, 0xab, 0x17, 0x3e, 0xbe, 0x72, 0x7d, 0xea, 0x4d, 0x06, 0xc6, 0x30, 0x1c, 0x77, 0xe4,
0x3f, 0x25, 0x5f, 0x8a, 0x8f, 0x6d, 0xf1, 0x4b, 0x1c, 0xac, 0xf1, 0xf8, 0xcb, 0x3f, 0x01, 0x00,
0x00, 0xff, 0xff, 0xa5, 0x17, 0xac, 0x23, 0x2d, 0x07, 0x00, 0x00,
// 805 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcd, 0x8e, 0xe3, 0x44,
0x10, 0x8e, 0xc9, 0x6e, 0x7e, 0xca, 0x93, 0x64, 0xb7, 0x07, 0x21, 0x6f, 0x96, 0x75, 0xb2, 0xe1,
0x47, 0x23, 0x0e, 0x8e, 0xb4, 0x1c, 0x10, 0x97, 0x95, 0xd6, 0x09, 0xb0, 0x91, 0x56, 0x08, 0x3c,
0xa3, 0x39, 0x70, 0xb1, 0x3a, 0x71, 0x8f, 0x6d, 0x91, 0xd8, 0x96, 0xbb, 0x13, 0x86, 0x07, 0xe0,
0x3e, 0x57, 0xde, 0x68, 0x8e, 0x73, 0x44, 0x1c, 0x06, 0xc8, 0xbc, 0x08, 0xea, 0x1f, 0xdb, 0x9d,
0x84, 0x91, 0x06, 0xed, 0xad, 0x5d, 0xf5, 0xd5, 0x57, 0x5f, 0x55, 0x57, 0xb5, 0xe1, 0x63, 0x46,
0x92, 0x80, 0xe4, 0xab, 0x38, 0x61, 0x63, 0xca, 0x30, 0x23, 0x63, 0xf6, 0x6b, 0x46, 0xa8, 0x93,
0xe5, 0x29, 0x4b, 0xd1, 0x93, 0xca, 0xeb, 0x08, 0x6f, 0xff, 0xc3, 0x30, 0x0d, 0x53, 0xe1, 0x1c,
0xf3, 0x93, 0xc4, 0xf5, 0x9f, 0x6b, 0x2c, 0x78, 0xbe, 0x88, 0x75, 0x92, 0xbe, 0x9e, 0x42, 0xd8,
0x77, 0xbc, 0xc3, 0x03, 0xef, 0x06, 0x2f, 0xe3, 0x00, 0xb3, 0x34, 0x57, 0x88, 0x17, 0x07, 0x88,
0x0c, 0xe7, 0x78, 0x55, 0x10, 0xd8, 0x9a, 0x7b, 0x43, 0x72, 0x1a, 0xa7, 0xc9, 0x4e, 0x82, 0x41,
0x98, 0xa6, 0xe1, 0x92, 0x8c, 0xc5, 0xd7, 0x7c, 0x7d, 0x31, 0x66, 0xf1, 0x8a, 0x50, 0x86, 0x57,
0x99, 0x04, 0x8c, 0xfe, 0x34, 0xa0, 0xf3, 0xc6, 0x9d, 0xcc, 0x3c, 0x42, 0xb3, 0x34, 0xa1, 0x84,
0xa2, 0x09, 0x98, 0x01, 0x59, 0xc6, 0x1b, 0x92, 0xfb, 0xec, 0x92, 0x5a, 0xc6, 0xb0, 0x7e, 0x62,
0xbe, 0x1a, 0x39, 0x5a, 0x33, 0x78, 0x91, 0x4e, 0x11, 0x30, 0x95, 0xd8, 0xb3, 0x4b, 0x0f, 0x82,
0xe2, 0x48, 0xd1, 0x6b, 0x68, 0x93, 0x24, 0xf0, 0xe7, 0xcb, 0x74, 0xf1, 0xb3, 0xf5, 0xc1, 0xd0,
0x38, 0x31, 0x5f, 0xbd, 0xbc, 0x97, 0xe2, 0x9b, 0x24, 0x70, 0x39, 0xd0, 0x6b, 0x11, 0x75, 0x42,
0x53, 0x30, 0xe7, 0x24, 0x8c, 0x13, 0xc5, 0x50, 0x17, 0x0c, 0x9f, 0xdc, 0xcb, 0xe0, 0x72, 0xac,
0xe4, 0x80, 0x79, 0x79, 0x1e, 0xfd, 0x66, 0x40, 0xf7, 0xbc, 0x68, 0x28, 0x9d, 0x25, 0x17, 0x29,
0x9a, 0x40, 0xa7, 0x6c, 0xb1, 0x4f, 0x09, 0xb3, 0x0c, 0x41, 0x6d, 0xeb, 0xd4, 0xb2, 0x81, 0x65,
0xe0, 0x29, 0x61, 0xde, 0xd1, 0x46, 0xfb, 0x42, 0x0e, 0x1c, 0x2f, 0x31, 0x65, 0x7e, 0x44, 0xe2,
0x30, 0x62, 0xfe, 0x22, 0xc2, 0x49, 0x48, 0x02, 0x51, 0x67, 0xdd, 0x7b, 0xca, 0x5d, 0x6f, 0x85,
0x67, 0x22, 0x1d, 0xa3, 0xdf, 0x0d, 0x38, 0x9e, 0x70, 0x9d, 0x09, 0x5d, 0xd3, 0x1f, 0xc4, 0xfd,
0x09, 0x31, 0x1e, 0x3c, 0x59, 0x14, 0x66, 0x5f, 0xde, 0xab, 0xd2, 0xf3, 0xf2, 0x50, 0xcf, 0x1e,
0x81, 0xfb, 0xe8, 0xfa, 0x76, 0x50, 0xf3, 0x7a, 0x8b, 0x5d, 0xf3, 0xff, 0xd6, 0x46, 0xe1, 0xe9,
0xce, 0xfd, 0x0b, 0x61, 0xdf, 0x42, 0x97, 0xf7, 0xd7, 0xcf, 0x0b, 0xab, 0x92, 0x35, 0x70, 0xf6,
0x77, 0xc2, 0xd9, 0x09, 0xf6, 0x3a, 0x3c, 0xac, 0x9a, 0xa5, 0x8f, 0xa0, 0x21, 0x75, 0xa8, 0xfc,
0xea, 0x6b, 0x14, 0x41, 0xf3, 0x5c, 0x4e, 0x2b, 0x7a, 0x03, 0xed, 0xb2, 0x04, 0x95, 0xe5, 0x85,
0x9e, 0x45, 0x4d, 0x75, 0x55, 0xbe, 0x2a, 0xbc, 0x8a, 0x42, 0x7d, 0x68, 0xd1, 0xf4, 0x82, 0xfd,
0x82, 0x73, 0x22, 0xf2, 0xb4, 0xbd, 0xf2, 0x7b, 0xf4, 0x4f, 0x03, 0x1e, 0x9f, 0x72, 0xa1, 0xe8,
0x6b, 0x68, 0x2a, 0x2e, 0x95, 0xe6, 0xd9, 0x61, 0x31, 0x4a, 0x94, 0x4a, 0x51, 0xe0, 0xd1, 0xe7,
0xd0, 0x5a, 0x44, 0x38, 0x4e, 0xfc, 0x58, 0x36, 0xb2, 0xed, 0x9a, 0xdb, 0xdb, 0x41, 0x73, 0xc2,
0x6d, 0xb3, 0xa9, 0xd7, 0x14, 0xce, 0x59, 0x80, 0x3e, 0x83, 0x6e, 0x9c, 0xc4, 0x2c, 0xc6, 0x4b,
0xd5, 0x7e, 0xab, 0x2b, 0xca, 0xee, 0x28, 0xab, 0xec, 0x3c, 0xfa, 0x02, 0xc4, 0x3d, 0xc8, 0xd9,
0x2e, 0x90, 0x75, 0x81, 0xec, 0x71, 0x87, 0x18, 0x5e, 0x85, 0xf5, 0xa0, 0xa3, 0x61, 0xe3, 0xc0,
0x7a, 0x74, 0xa8, 0x5d, 0xce, 0x87, 0x88, 0x9a, 0x4d, 0xdd, 0x63, 0xae, 0x7d, 0x7b, 0x3b, 0x30,
0xdf, 0x15, 0x54, 0xb3, 0xa9, 0x67, 0x96, 0xbc, 0xb3, 0x00, 0xbd, 0x83, 0x9e, 0xc6, 0xc9, 0x5f,
0x04, 0xeb, 0xb1, 0x60, 0xed, 0x3b, 0xf2, 0xb9, 0x70, 0x8a, 0xe7, 0xc2, 0x39, 0x2b, 0x9e, 0x0b,
0xb7, 0xc5, 0x69, 0xaf, 0xfe, 0x1a, 0x18, 0x5e, 0xa7, 0xe4, 0xe2, 0x5e, 0xf4, 0x1d, 0xf4, 0x12,
0x72, 0xc9, 0xfc, 0x72, 0x43, 0xa8, 0xd5, 0x78, 0xd0, 0x4e, 0x75, 0x79, 0x58, 0xb5, 0x9e, 0xe8,
0x35, 0x80, 0xc6, 0xd1, 0x7c, 0x10, 0x87, 0x16, 0xc1, 0x85, 0x88, 0xb2, 0x34, 0x92, 0xd6, 0xc3,
0x84, 0xf0, 0x30, 0x4d, 0xc8, 0x04, 0x6c, 0x7d, 0x85, 0x2a, 0xbe, 0x72, 0x9b, 0xda, 0xe2, 0xb2,
0x9e, 0x57, 0xdb, 0x54, 0x45, 0xab, 0xbd, 0xfa, 0xcf, 0xdd, 0x86, 0xf7, 0xdc, 0xed, 0xef, 0xe1,
0xd3, 0x9d, 0xdd, 0xde, 0xe3, 0x2f, 0xe5, 0x99, 0x42, 0xde, 0x50, 0x5b, 0xf6, 0x5d, 0xa2, 0x42,
0x63, 0x31, 0x88, 0x39, 0xa1, 0xeb, 0x25, 0xa3, 0x7e, 0x84, 0x69, 0x64, 0x1d, 0x0d, 0x8d, 0x93,
0x23, 0x39, 0x88, 0x9e, 0xb4, 0xbf, 0xc5, 0x34, 0x42, 0xcf, 0xa0, 0x85, 0xb3, 0x4c, 0x42, 0x3a,
0x02, 0xd2, 0xc4, 0x59, 0xc6, 0x5d, 0xee, 0x8f, 0xd7, 0x5b, 0xdb, 0xb8, 0xd9, 0xda, 0xc6, 0xdf,
0x5b, 0xdb, 0xb8, 0xba, 0xb3, 0x6b, 0x37, 0x77, 0x76, 0xed, 0x8f, 0x3b, 0xbb, 0xf6, 0xd3, 0x57,
0x61, 0xcc, 0xa2, 0xf5, 0xdc, 0x59, 0xa4, 0xab, 0xb1, 0xfe, 0x23, 0xab, 0x8e, 0xf2, 0x6f, 0xba,
0xff, 0x1f, 0x9e, 0x37, 0x84, 0xfd, 0xcb, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x1a, 0xb9,
0x2e, 0xa2, 0x07, 0x00, 0x00,
}
func (m *ABCIResponses) Marshal() (dAtA []byte, err error) {
@@ -612,6 +668,46 @@ func (m *ConsensusParamsInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
func (m *ABCIResponsesInfo) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ABCIResponsesInfo) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ABCIResponsesInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Height != 0 {
i = encodeVarintTypes(dAtA, i, uint64(m.Height))
i--
dAtA[i] = 0x10
}
if m.AbciResponses != nil {
{
size, err := m.AbciResponses.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *Version) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -747,12 +843,12 @@ func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i--
dAtA[i] = 0x32
}
n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):])
if err10 != nil {
return 0, err10
n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):])
if err11 != nil {
return 0, err11
}
i -= n10
i = encodeVarintTypes(dAtA, i, uint64(n10))
i -= n11
i = encodeVarintTypes(dAtA, i, uint64(n11))
i--
dAtA[i] = 0x2a
{
@@ -854,6 +950,22 @@ func (m *ConsensusParamsInfo) Size() (n int) {
return n
}
func (m *ABCIResponsesInfo) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.AbciResponses != nil {
l = m.AbciResponses.Size()
n += 1 + l + sovTypes(uint64(l))
}
if m.Height != 0 {
n += 1 + sovTypes(uint64(m.Height))
}
return n
}
func (m *Version) Size() (n int) {
if m == nil {
return 0
@@ -1291,6 +1403,111 @@ func (m *ConsensusParamsInfo) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *ABCIResponsesInfo) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ABCIResponsesInfo: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ABCIResponsesInfo: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field AbciResponses", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.AbciResponses == nil {
m.AbciResponses = &ABCIResponses{}
}
if err := m.AbciResponses.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType)
}
m.Height = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Height |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipTypes(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Version) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0

View File

@@ -32,6 +32,11 @@ message ConsensusParamsInfo {
int64 last_height_changed = 2;
}
message ABCIResponsesInfo {
ABCIResponses abci_responses = 1;
int64 height = 2;
}
message Version {
tendermint.version.Consensus consensus = 1 [(gogoproto.nullable) = false];
string software = 2;

View File

@@ -17,20 +17,20 @@ message Evidence {
// DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes.
message DuplicateVoteEvidence {
tendermint.types.Vote vote_a = 1;
tendermint.types.Vote vote_b = 2;
int64 total_voting_power = 3;
int64 validator_power = 4;
google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
tendermint.types.Vote vote_a = 1;
tendermint.types.Vote vote_b = 2;
int64 total_voting_power = 3;
int64 validator_power = 4;
google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
}
// LightClientAttackEvidence contains evidence of a set of validators attempting to mislead a light client.
message LightClientAttackEvidence {
tendermint.types.LightBlock conflicting_block = 1;
int64 common_height = 2;
tendermint.types.LightBlock conflicting_block = 1;
int64 common_height = 2;
repeated tendermint.types.Validator byzantine_validators = 3;
int64 total_voting_power = 4;
google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
int64 total_voting_power = 4;
google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
}
message EvidenceList {

View File

@@ -106,10 +106,10 @@ message Vote {
// Commit contains the evidence that a block was committed by a set of validators.
message Commit {
int64 height = 1;
int32 round = 2;
BlockID block_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"];
repeated CommitSig signatures = 4 [(gogoproto.nullable) = false];
int64 height = 1;
int32 round = 2;
BlockID block_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"];
repeated CommitSig signatures = 4 [(gogoproto.nullable) = false];
}
// CommitSig is a part of the Vote included in a Commit.

View File

@@ -81,7 +81,9 @@ func TestBlockResults(t *testing.T) {
}
env = &Environment{}
env.StateStore = sm.NewStore(dbm.NewMemDB())
env.StateStore = sm.NewStore(dbm.NewMemDB(), sm.StoreOptions{
DiscardABCIResponses: false,
})
err := env.StateStore.SaveABCIResponses(100, results)
require.NoError(t, err)
env.BlockStore = mockBlockStore{height: 100}

View File

@@ -9,12 +9,12 @@
//
// As a GET request, it would have URI encoded parameters, and look like:
//
// curl 'http://localhost:8008/hello_world?name="my_world"&num=5'
// curl 'http://localhost:8008/hello_world?name="my_world"&num=5'
//
// Note the `'` around the url, which is just so bash doesn't ignore the quotes in `"my_world"`.
// This should also work:
//
// curl http://localhost:8008/hello_world?name=\"my_world\"&num=5
// curl http://localhost:8008/hello_world?name=\"my_world\"&num=5
//
// A GET request to `/` returns a list of available endpoints.
// For those which take arguments, the arguments will be listed in order, with `_` where the actual value should be.
@@ -35,8 +35,7 @@
//
// With the above saved in file `data.json`, we can make the request with
//
// curl --data @data.json http://localhost:8008
//
// curl --data @data.json http://localhost:8008
//
// WebSocket (JSONRPC)
//
@@ -48,32 +47,32 @@
//
// Define some types and routes:
//
// type ResultStatus struct {
// Value string
// }
// type ResultStatus struct {
// Value string
// }
//
// Define some routes
//
// var Routes = map[string]*rpcserver.RPCFunc{
// "status": rpcserver.NewRPCFunc(Status, "arg"),
// }
// var Routes = map[string]*rpcserver.RPCFunc{
// "status": rpcserver.NewRPCFunc(Status, "arg"),
// }
//
// An rpc function:
//
// func Status(v string) (*ResultStatus, error) {
// return &ResultStatus{v}, nil
// }
// func Status(v string) (*ResultStatus, error) {
// return &ResultStatus{v}, nil
// }
//
// Now start the server:
//
// mux := http.NewServeMux()
// rpcserver.RegisterRPCFuncs(mux, Routes)
// wm := rpcserver.NewWebsocketManager(Routes)
// mux.HandleFunc("/websocket", wm.WebsocketHandler)
// logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
// listener, err := rpc.Listen("0.0.0.0:8080", rpcserver.Config{})
// if err != nil { panic(err) }
// go rpcserver.Serve(listener, mux, logger)
// mux := http.NewServeMux()
// rpcserver.RegisterRPCFuncs(mux, Routes)
// wm := rpcserver.NewWebsocketManager(Routes)
// mux.HandleFunc("/websocket", wm.WebsocketHandler)
// logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
// listener, err := rpc.Listen("0.0.0.0:8080", rpcserver.Config{})
// if err != nil { panic(err) }
// go rpcserver.Serve(listener, mux, logger)
//
// Note that unix sockets are supported as well (eg. `/path/to/socket` instead of `0.0.0.0:8008`)
// Now see all available endpoints by sending a GET request to `0.0.0.0:8008`.

View File

@@ -176,8 +176,8 @@ func arrayParamsToArgs(
// array.
//
// Example:
// rpcFunc.args = [rpctypes.Context string]
// rpcFunc.argNames = ["arg"]
// rpcFunc.args = [rpctypes.Context string]
// rpcFunc.argNames = ["arg"]
func jsonParamsToArgs(rpcFunc *RPCFunc, raw []byte) ([]reflect.Value, error) {
const argsOffset = 1

View File

@@ -215,15 +215,17 @@ func (resp RPCResponse) String() string {
}
// From the JSON-RPC 2.0 spec:
//
// If there was an error in detecting the id in the Request object (e.g. Parse
// error/Invalid Request), it MUST be Null.
// error/Invalid Request), it MUST be Null.
func RPCParseError(err error) RPCResponse {
return NewRPCErrorResponse(nil, -32700, "Parse error. Invalid JSON", err.Error())
}
// From the JSON-RPC 2.0 spec:
//
// If there was an error in detecting the id in the Request object (e.g. Parse
// error/Invalid Request), it MUST be Null.
// error/Invalid Request), it MUST be Null.
func RPCInvalidRequestError(id jsonrpcid, err error) RPCResponse {
return NewRPCErrorResponse(id, -32600, "Invalid Request", err.Error())
}
@@ -276,9 +278,9 @@ type Context struct {
// RemoteAddr returns the remote address (usually a string "IP:port").
// If neither HTTPReq nor WSConn is set, an empty string is returned.
// HTTP:
// http.Request#RemoteAddr
// http.Request#RemoteAddr
// WS:
// result of GetRemoteAddr
// result of GetRemoteAddr
func (ctx *Context) RemoteAddr() string {
if ctx.HTTPReq != nil {
return ctx.HTTPReq.RemoteAddr
@@ -291,10 +293,10 @@ func (ctx *Context) RemoteAddr() string {
// Context returns the request's context.
// The returned context is always non-nil; it defaults to the background context.
// HTTP:
// The context is canceled when the client's connection closes, the request
// is canceled (with HTTP/2), or when the ServeHTTP method returns.
// The context is canceled when the client's connection closes, the request
// is canceled (with HTTP/2), or when the ServeHTTP method returns.
// WS:
// The context is canceled when the client's connections closes.
// The context is canceled when the client's connections closes.
func (ctx *Context) Context() context.Context {
if ctx.HTTPReq != nil {
return ctx.HTTPReq.Context()
@@ -306,7 +308,6 @@ func (ctx *Context) Context() context.Context {
//----------------------------------------
// SOCKETS
//
// Determine if its a unix or tcp socket.
// If tcp, must specify the port; `0.0.0.0` will return incorrectly as "unix" since there's no port

View File

@@ -1,6 +1,9 @@
package state
import "fmt"
import (
"errors"
"fmt"
)
type (
ErrInvalidBlock error
@@ -99,3 +102,5 @@ func (e ErrNoConsensusParamsForHeight) Error() string {
func (e ErrNoABCIResponsesForHeight) Error() string {
return fmt.Sprintf("could not find results for height #%d", e.Height)
}
var ErrABCIResponsesNotPersisted = errors.New("node is not persisting abci responses")

View File

@@ -41,7 +41,9 @@ func TestApplyBlock(t *testing.T) {
defer proxyApp.Stop() //nolint:errcheck // ignore for tests
state, stateDB, _ := makeState(1, 1)
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
mmock.Mempool{}, sm.EmptyEvidencePool{})
@@ -67,7 +69,9 @@ func TestBeginBlockValidators(t *testing.T) {
defer proxyApp.Stop() //nolint:errcheck // no need to check error again
state, stateDB, _ := makeState(2, 2)
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
prevHash := state.LastBlockID.Hash
prevParts := types.PartSetHeader{}
@@ -130,7 +134,9 @@ func TestBeginBlockByzantineValidators(t *testing.T) {
defer proxyApp.Stop() //nolint:errcheck // ignore for tests
state, stateDB, privVals := makeState(1, 1)
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
defaultEvidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)
privVal := privVals[state.Validators.Validators[0].Address.String()]
@@ -354,7 +360,9 @@ func TestEndBlockValidatorUpdates(t *testing.T) {
defer proxyApp.Stop() //nolint:errcheck // ignore for tests
state, stateDB, _ := makeState(1, 1)
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
blockExec := sm.NewBlockExecutor(
stateStore,
@@ -425,7 +433,9 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) {
defer proxyApp.Stop() //nolint:errcheck // ignore for tests
state, stateDB, _ := makeState(1, 1)
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
blockExec := sm.NewBlockExecutor(
stateStore,
log.TestingLogger(),

View File

@@ -43,6 +43,6 @@ func ValidateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, params tmproto
// SaveValidatorsInfo is an alias for the private saveValidatorsInfo method in
// store.go, exported exclusively and explicitly for testing.
func SaveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *types.ValidatorSet) error {
stateStore := dbStore{db}
stateStore := dbStore{db, StoreOptions{DiscardABCIResponses: false}}
return stateStore.saveValidatorsInfo(height, lastHeightChanged, valSet)
}

View File

@@ -115,7 +115,9 @@ func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValida
})
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
if err := stateStore.Save(s); err != nil {
panic(err)
}

View File

@@ -153,6 +153,29 @@ func (_m *Store) LoadFromDBOrGenesisFile(_a0 string) (state.State, error) {
return r0, r1
}
// LoadLastABCIResponse provides a mock function with given fields: _a0
func (_m *Store) LoadLastABCIResponse(_a0 int64) (*tendermintstate.ABCIResponses, error) {
ret := _m.Called(_a0)
var r0 *tendermintstate.ABCIResponses
if rf, ok := ret.Get(0).(func(int64) *tendermintstate.ABCIResponses); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*tendermintstate.ABCIResponses)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(int64) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// LoadValidators provides a mock function with given fields: _a0
func (_m *Store) LoadValidators(_a0 int64) (*tenderminttypes.ValidatorSet, error) {
ret := _m.Called(_a0)

View File

@@ -82,7 +82,10 @@ func TestRollback(t *testing.T) {
}
func TestRollbackNoState(t *testing.T) {
stateStore := state.NewStore(dbm.NewMemDB())
stateStore := state.NewStore(dbm.NewMemDB(),
state.StoreOptions{
DiscardABCIResponses: false,
})
blockStore := &mocks.BlockStore{}
_, _, err := state.Rollback(blockStore, stateStore)
@@ -115,7 +118,7 @@ func TestRollbackDifferentStateHeight(t *testing.T) {
}
func setupStateStore(t *testing.T, height int64) state.Store {
stateStore := state.NewStore(dbm.NewMemDB())
stateStore := state.NewStore(dbm.NewMemDB(), state.StoreOptions{DiscardABCIResponses: false})
valSet, _ := types.RandValidatorSet(5, 10)
params := types.DefaultConsensusParams()

View File

@@ -29,7 +29,9 @@ func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) {
config := cfg.ResetTestRoot("state_")
dbType := dbm.BackendType(config.DBBackend)
stateDB, err := dbm.NewDB("state", dbType, config.DBDir())
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
require.NoError(t, err)
state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile())
assert.NoError(t, err, "expected no error on LoadStateFromDBOrGenesisFile")
@@ -76,7 +78,9 @@ func TestMakeGenesisStateNilValidators(t *testing.T) {
func TestStateSaveLoad(t *testing.T) {
tearDown, stateDB, state := setupTestCase(t)
defer tearDown(t)
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
assert := assert.New(t)
state.LastBlockHeight++
@@ -95,7 +99,9 @@ func TestStateSaveLoad(t *testing.T) {
func TestABCIResponsesSaveLoad1(t *testing.T) {
tearDown, stateDB, state := setupTestCase(t)
defer tearDown(t)
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
assert := assert.New(t)
state.LastBlockHeight++
@@ -128,7 +134,9 @@ func TestABCIResponsesSaveLoad2(t *testing.T) {
defer tearDown(t)
assert := assert.New(t)
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
cases := [...]struct {
// Height is implied to equal index+2,
@@ -216,7 +224,9 @@ func TestValidatorSimpleSaveLoad(t *testing.T) {
defer tearDown(t)
assert := assert.New(t)
statestore := sm.NewStore(stateDB)
statestore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
// Can't load anything for height 0.
_, err := statestore.LoadValidators(0)
@@ -249,7 +259,9 @@ func TestValidatorSimpleSaveLoad(t *testing.T) {
func TestOneValidatorChangesSaveLoad(t *testing.T) {
tearDown, stateDB, state := setupTestCase(t)
defer tearDown(t)
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
// Change vals at these heights.
changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20}
@@ -901,7 +913,9 @@ func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) {
const valSetSize = 2
tearDown, stateDB, state := setupTestCase(t)
t.Cleanup(func() { tearDown(t) })
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
state.Validators = genValSet(valSetSize)
state.NextValidators = state.Validators.CopyIncrementProposerPriority(1)
err := stateStore.Save(state)
@@ -926,7 +940,9 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) {
const valSetSize = 7
tearDown, stateDB, state := setupTestCase(t)
defer tearDown(t)
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
require.Equal(t, int64(0), state.LastBlockHeight)
state.Validators = genValSet(valSetSize)
state.NextValidators = state.Validators.CopyIncrementProposerPriority(1)
@@ -990,7 +1006,9 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) {
tearDown, stateDB, state := setupTestCase(t)
defer tearDown(t)
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
// Change vals at these heights.
changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20}

View File

@@ -39,6 +39,8 @@ func calcABCIResponsesKey(height int64) []byte {
//----------------------
var lastABCIResponseKey = []byte("lastABCIResponseKey")
//go:generate ../scripts/mockery_generate.sh Store
// Store defines the state store interface
@@ -58,6 +60,8 @@ type Store interface {
LoadValidators(int64) (*types.ValidatorSet, error)
// LoadABCIResponses loads the abciResponse for a given height
LoadABCIResponses(int64) (*tmstate.ABCIResponses, error)
// LoadLastABCIResponse loads the last abciResponse for a given height
LoadLastABCIResponse(int64) (*tmstate.ABCIResponses, error)
// LoadConsensusParams loads the consensus params for a given height
LoadConsensusParams(int64) (tmproto.ConsensusParams, error)
// Save overwrites the previous state with the updated one
@@ -75,13 +79,24 @@ type Store interface {
// dbStore wraps a db (github.com/tendermint/tm-db)
type dbStore struct {
db dbm.DB
StoreOptions
}
type StoreOptions struct {
// DiscardABCIResponses determines whether or not the store
// retains all ABCIResponses. If DiscardABCiResponses is enabled,
// the store will maintain only the response object from the latest
// height.
DiscardABCIResponses bool
}
var _ Store = (*dbStore)(nil)
// NewStore creates the dbStore of the state pkg.
func NewStore(db dbm.DB) Store {
return dbStore{db}
func NewStore(db dbm.DB, options StoreOptions) Store {
return dbStore{db, options}
}
// LoadStateFromDBOrGenesisFile loads the most recent state from the database,
@@ -358,12 +373,13 @@ func ABCIResponsesResultsHash(ar *tmstate.ABCIResponses) []byte {
}
// LoadABCIResponses loads the ABCIResponses for the given height from the
// database. If not found, ErrNoABCIResponsesForHeight is returned.
//
// This is useful for recovering from crashes where we called app.Commit and
// before we called s.Save(). It can also be used to produce Merkle proofs of
// the result of txs.
// database. If the node has DiscardABCIResponses set to true, ErrABCIResponsesNotPersisted
// is persisted. If not found, ErrNoABCIResponsesForHeight is returned.
func (store dbStore) LoadABCIResponses(height int64) (*tmstate.ABCIResponses, error) {
if store.DiscardABCIResponses {
return nil, ErrABCIResponsesNotPersisted
}
buf, err := store.db.Get(calcABCIResponsesKey(height))
if err != nil {
return nil, err
@@ -385,12 +401,43 @@ func (store dbStore) LoadABCIResponses(height int64) (*tmstate.ABCIResponses, er
return abciResponses, nil
}
// LoadLastABCIResponses loads the ABCIResponses from the most recent height.
// The height parameter is used to ensure that the response corresponds to the latest height.
// If not, an error is returned.
//
// This method is used for recovering in the case that we called the Commit ABCI
// method on the application but crashed before persisting the results.
func (store dbStore) LoadLastABCIResponse(height int64) (*tmstate.ABCIResponses, error) {
bz, err := store.db.Get(lastABCIResponseKey)
if err != nil {
return nil, err
}
if len(bz) == 0 {
return nil, errors.New("no last ABCI response has been persisted")
}
abciResponse := new(tmstate.ABCIResponsesInfo)
err = abciResponse.Unmarshal(bz)
if err != nil {
tmos.Exit(fmt.Sprintf(`LoadLastABCIResponses: Data has been corrupted or its spec has
changed: %v\n`, err))
}
// Here we validate the result by comparing its height to the expected height.
if height != abciResponse.GetHeight() {
return nil, errors.New("expected height %d but last stored abci responses was at height %d")
}
return abciResponse.AbciResponses, nil
}
// SaveABCIResponses persists the ABCIResponses to the database.
// This is useful in case we crash after app.Commit and before s.Save().
// Responses are indexed by height so they can also be loaded later to produce
// Merkle proofs.
//
// Exposed for testing.
// CONTRACT: height must be monotonically increasing every time this is called.
func (store dbStore) SaveABCIResponses(height int64, abciResponses *tmstate.ABCIResponses) error {
var dtxs []*abci.ResponseDeliverTx
// strip nil values,
@@ -401,17 +448,30 @@ func (store dbStore) SaveABCIResponses(height int64, abciResponses *tmstate.ABCI
}
abciResponses.DeliverTxs = dtxs
bz, err := abciResponses.Marshal()
// If the flag is false then we save the ABCIResponse. This can be used for the /BlockResults
// query or to reindex an event using the command line.
if !store.DiscardABCIResponses {
bz, err := abciResponses.Marshal()
if err != nil {
return err
}
if err := store.db.Set(calcABCIResponsesKey(height), bz); err != nil {
return err
}
}
// We always save the last ABCI response for crash recovery.
// This overwrites the previous saved ABCI Response.
response := &tmstate.ABCIResponsesInfo{
AbciResponses: abciResponses,
Height: height,
}
bz, err := response.Marshal()
if err != nil {
return err
}
err = store.db.SetSync(calcABCIResponsesKey(height), bz)
if err != nil {
return err
}
return nil
return store.db.SetSync(lastABCIResponseKey, bz)
}
//-----------------------------------------------------------------------------
@@ -471,7 +531,7 @@ func loadValidatorsInfo(db dbm.DB, height int64) (*tmstate.ValidatorsInfo, error
}
if len(buf) == 0 {
return nil, errors.New("value retrieved from db is empty")
return nil, errors.New("no last ABCI response has been persisted")
}
v := new(tmstate.ValidatorsInfo)
@@ -479,7 +539,7 @@ func loadValidatorsInfo(db dbm.DB, height int64) (*tmstate.ValidatorsInfo, error
if err != nil {
// DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED
tmos.Exit(fmt.Sprintf(`LoadValidators: Data has been corrupted or its spec has changed:
%v\n`, err))
%v\n`, err))
}
// TODO: ensure that buf is completely read.
@@ -557,7 +617,7 @@ func (store dbStore) loadConsensusParamsInfo(height int64) (*tmstate.ConsensusPa
return nil, err
}
if len(buf) == 0 {
return nil, errors.New("value retrieved from db is empty")
return nil, errors.New("no last ABCI response has been persisted")
}
paramsInfo := new(tmstate.ConsensusParamsInfo)

View File

@@ -23,7 +23,9 @@ import (
func TestStoreLoadValidators(t *testing.T) {
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
val, _ := types.RandValidator(true, 10)
vals := types.NewValidatorSet([]*types.Validator{val})
@@ -54,7 +56,9 @@ func BenchmarkLoadValidators(b *testing.B) {
dbType := dbm.BackendType(config.DBBackend)
stateDB, err := dbm.NewDB("state", dbType, config.DBDir())
require.NoError(b, err)
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile())
if err != nil {
b.Fatal(err)
@@ -107,7 +111,9 @@ func TestPruneStates(t *testing.T) {
tc := tc
t.Run(name, func(t *testing.T) {
db := dbm.NewMemDB()
stateStore := sm.NewStore(db)
stateStore := sm.NewStore(db, sm.StoreOptions{
DiscardABCIResponses: false,
})
pk := ed25519.GenPrivKey().PubKey()
// Generate a bunch of state data. Validators change for heights ending with 3, and
@@ -229,3 +235,72 @@ func sliceToMap(s []int64) map[int64]bool {
}
return m
}
func TestLastABCIResponses(t *testing.T) {
// create an empty state store.
t.Run("Not persisting responses", func(t *testing.T) {
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
responses, err := stateStore.LoadABCIResponses(1)
require.Error(t, err)
require.Nil(t, responses)
// stub the abciresponses.
response1 := &tmstate.ABCIResponses{
BeginBlock: &abci.ResponseBeginBlock{},
DeliverTxs: []*abci.ResponseDeliverTx{
{Code: 32, Data: []byte("Hello"), Log: "Huh?"},
},
EndBlock: &abci.ResponseEndBlock{},
}
// create new db and state store and set discard abciresponses to false.
stateDB = dbm.NewMemDB()
stateStore = sm.NewStore(stateDB, sm.StoreOptions{DiscardABCIResponses: false})
height := int64(10)
// save the last abci response.
err = stateStore.SaveABCIResponses(height, response1)
require.NoError(t, err)
// search for the last abciresponse and check if it has saved.
lastResponse, err := stateStore.LoadLastABCIResponse(height)
require.NoError(t, err)
// check to see if the saved response height is the same as the loaded height.
assert.Equal(t, lastResponse, response1)
// use an incorret height to make sure the state store errors.
_, err = stateStore.LoadLastABCIResponse(height + 1)
assert.Error(t, err)
// check if the abci response didnt save in the abciresponses.
responses, err = stateStore.LoadABCIResponses(height)
require.NoError(t, err, responses)
require.Equal(t, response1, responses)
})
t.Run("persisting responses", func(t *testing.T) {
stateDB := dbm.NewMemDB()
height := int64(10)
// stub the second abciresponse.
response2 := &tmstate.ABCIResponses{
BeginBlock: &abci.ResponseBeginBlock{},
DeliverTxs: []*abci.ResponseDeliverTx{
{Code: 44, Data: []byte("Hello again"), Log: "????"},
},
EndBlock: &abci.ResponseEndBlock{},
}
// create a new statestore with the responses on.
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: true,
})
// save an additional response.
err := stateStore.SaveABCIResponses(height+1, response2)
require.NoError(t, err)
// check to see if the response saved by calling the last response.
lastResponse2, err := stateStore.LoadLastABCIResponse(height + 1)
require.NoError(t, err)
// check to see if the saved response height is the same as the loaded height.
assert.Equal(t, response2, lastResponse2)
// should error as we are no longer saving the response.
_, err = stateStore.LoadABCIResponses(height + 1)
assert.Equal(t, sm.ErrABCIResponsesNotPersisted, err)
})
}

View File

@@ -33,7 +33,9 @@ func TestTxFilter(t *testing.T) {
for i, tc := range testCases {
stateDB, err := dbm.NewDB("state", "memdb", os.TempDir())
require.NoError(t, err)
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
require.NoError(t, err)

View File

@@ -28,7 +28,9 @@ func TestValidateBlockHeader(t *testing.T) {
defer proxyApp.Stop() //nolint:errcheck // ignore for tests
state, stateDB, privVals := makeState(3, 1)
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
blockExec := sm.NewBlockExecutor(
stateStore,
log.TestingLogger(),
@@ -99,7 +101,9 @@ func TestValidateBlockCommit(t *testing.T) {
defer proxyApp.Stop() //nolint:errcheck // ignore for tests
state, stateDB, privVals := makeState(1, 1)
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
blockExec := sm.NewBlockExecutor(
stateStore,
log.TestingLogger(),
@@ -213,7 +217,9 @@ func TestValidateBlockEvidence(t *testing.T) {
defer proxyApp.Stop() //nolint:errcheck // ignore for tests
state, stateDB, privVals := makeState(4, 1)
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
defaultEvidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)
evpool := &mocks.EvidencePool{}

View File

@@ -325,9 +325,9 @@ func (bs *BlockStore) PruneBlocks(height int64) (uint64, error) {
// SaveBlock persists the given block, blockParts, and seenCommit to the underlying db.
// blockParts: Must be parts of the block
// seenCommit: The +2/3 precommits that were seen which committed at height.
// If all the nodes restart after committing a block,
// we need this to reload the precommits to catch-up nodes to the
// most recent height. Otherwise they'd stall at H-1.
// If all the nodes restart after committing a block,
// we need this to reload the precommits to catch-up nodes to the
// most recent height. Otherwise they'd stall at H-1.
func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
if block == nil {
panic("BlockStore can only save a non-nil block")

View File

@@ -60,7 +60,9 @@ func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFu
// stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB())
blockDB := dbm.NewMemDB()
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile())
if err != nil {
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
@@ -369,7 +371,9 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
func TestLoadBaseMeta(t *testing.T) {
config := cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
stateStore := sm.NewStore(dbm.NewMemDB())
stateStore := sm.NewStore(dbm.NewMemDB(), sm.StoreOptions{
DiscardABCIResponses: false,
})
state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile())
require.NoError(t, err)
bs := NewBlockStore(dbm.NewMemDB())
@@ -425,7 +429,9 @@ func TestLoadBlockPart(t *testing.T) {
func TestPruneBlocks(t *testing.T) {
config := cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
stateStore := sm.NewStore(dbm.NewMemDB())
stateStore := sm.NewStore(dbm.NewMemDB(), sm.StoreOptions{
DiscardABCIResponses: false,
})
state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile())
require.NoError(t, err)
db := dbm.NewMemDB()

View File

@@ -1,4 +1,4 @@
//nolint: gosec
// nolint:gosec
package app
import (

View File

@@ -200,7 +200,7 @@ func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) BlockID {
var nilBytes []byte
// This follows RFC-6962, i.e. `echo -n '' | sha256sum`
// This follows RFC-6962, i.e. `echo -n ''| sha256sum`
var emptyBytes = []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8,
0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b,
0x78, 0x52, 0xb8, 0x55}

View File

@@ -411,15 +411,14 @@ func processChanges(origChanges []*Validator) (updates, removals []*Validator, e
//
// Inputs:
// updates - a list of proper validator changes, i.e. they have been verified by processChanges for duplicates
// and invalid values.
// and invalid values.
// vals - the original validator set. Note that vals is NOT modified by this function.
// removedPower - the total voting power that will be removed after the updates are verified and applied.
//
// Returns:
// tvpAfterUpdatesBeforeRemovals - the new total voting power if these updates would be applied without the removals.
// Note that this will be < 2 * MaxTotalVotingPower in case high power validators are removed and
// validators are added/ updated with high power values.
//
// Note that this will be < 2 * MaxTotalVotingPower in case high power validators are removed and
// validators are added/ updated with high power values.
// err - non-nil if the maximum allowed total voting power would be exceeded
func verifyUpdates(
updates []*Validator,
@@ -467,9 +466,9 @@ func numNewValidators(updates []*Validator, vals *ValidatorSet) int {
// 'updates' parameter must be a list of unique validators to be added or updated.
//
// 'updatedTotalVotingPower' is the total voting power of a set where all updates would be applied but
// not the removals. It must be < 2*MaxTotalVotingPower and may be close to this limit if close to
// MaxTotalVotingPower will be removed. This is still safe from overflow since MaxTotalVotingPower is maxInt64/8.
//
// not the removals. It must be < 2*MaxTotalVotingPower and may be close to this limit if close to
// MaxTotalVotingPower will be removed. This is still safe from overflow since MaxTotalVotingPower is maxInt64/8.
// No changes are made to the validator set 'vals'.
func computeNewPriorities(updates []*Validator, vals *ValidatorSet, updatedTotalVotingPower int64) {
for _, valUpdate := range updates {
@@ -638,14 +637,14 @@ func (vals *ValidatorSet) updateWithChangeSet(changes []*Validator, allowDeletes
// UpdateWithChangeSet attempts to update the validator set with 'changes'.
// It performs the following steps:
// - validates the changes making sure there are no duplicates and splits them in updates and deletes
// - verifies that applying the changes will not result in errors
// - computes the total voting power BEFORE removals to ensure that in the next steps the priorities
// across old and newly added validators are fair
// - computes the priorities of new validators against the final set
// - applies the updates against the validator set
// - applies the removals against the validator set
// - performs scaling and centering of priority values
// - validates the changes making sure there are no duplicates and splits them in updates and deletes
// - verifies that applying the changes will not result in errors
// - computes the total voting power BEFORE removals to ensure that in the next steps the priorities
// across old and newly added validators are fair
// - computes the priorities of new validators against the final set
// - applies the updates against the validator set
// - applies the removals against the validator set
// - performs scaling and centering of priority values
// If an error is detected during verification steps, it is returned and the validator set
// is not changed.
func (vals *ValidatorSet) UpdateWithChangeSet(changes []*Validator) error {

View File

@@ -44,7 +44,7 @@ type P2PID string
`.votesByBlock` keeps track of a list of votes for a particular block. There
are two ways a &blockVotes{} gets created in `.votesByBlock`.
1. the first vote seen by a validator was for the particular block.
2. a peer claims to have seen 2/3 majority for the particular block.
2. a peer claims to have seen 2/3 majority for the particular block..
Since the first vote from a validator will always get added in `.votesByBlock`
, all votes in `.votes` will have a corresponding entry in `.votesByBlock`.