mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-15 01:02:50 +00:00
Compare commits
7 Commits
wb/loadtim
...
removing-f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5c624eae9d | ||
|
|
849fd79df8 | ||
|
|
3f34cafd33 | ||
|
|
5e31b29e32 | ||
|
|
ecc2b7baca | ||
|
|
dab1abe078 | ||
|
|
4be78e3125 |
@@ -2,7 +2,7 @@ linters:
|
||||
enable:
|
||||
- asciicheck
|
||||
- bodyclose
|
||||
- deadcode
|
||||
# - deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
@@ -18,15 +18,17 @@ linters:
|
||||
- ineffassign
|
||||
- misspell
|
||||
- nakedret
|
||||
- nolintlint
|
||||
# - nolintlint
|
||||
- prealloc
|
||||
- staticcheck
|
||||
# - structcheck // to be fixed by golangci-lint
|
||||
- structcheck
|
||||
- stylecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
disable:
|
||||
- unused
|
||||
- varcheck
|
||||
- deadcode
|
||||
- nolintlint
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
@@ -40,7 +42,5 @@ linters-settings:
|
||||
max-blank-identifiers: 3
|
||||
golint:
|
||||
min-confidence: 0
|
||||
maligned:
|
||||
suggest-new: true
|
||||
misspell:
|
||||
locale: US
|
||||
|
||||
@@ -31,6 +31,8 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [config] \#9054 Flag added to overwrite abciresponses.
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
[docker] \#9073 enable cross platform build using docker buildx
|
||||
|
||||
@@ -162,7 +162,7 @@ func TestValUpdates(t *testing.T) {
|
||||
|
||||
makeApplyBlock(t, kvstore, 2, diff, tx1, tx2, tx3)
|
||||
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1]) // nolint: gocritic
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1]) //nolint: gocritic
|
||||
vals2 = kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
|
||||
@@ -2,9 +2,8 @@
|
||||
Package server is used to start a new ABCI server.
|
||||
|
||||
It contains two server implementation:
|
||||
* gRPC server
|
||||
* socket server
|
||||
|
||||
- gRPC server
|
||||
- socket server
|
||||
*/
|
||||
package server
|
||||
|
||||
|
||||
@@ -79,7 +79,7 @@ func TestBcStatusResponseMessageValidateBasic(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// nolint:lll // ignore line length in tests
|
||||
//nolint:lll // ignore line length in tests
|
||||
func TestBlockchainMessageVectors(t *testing.T) {
|
||||
block := types.MakeBlock(int64(3), []types.Tx{types.Tx("Hello World")}, nil, nil)
|
||||
block.Version.Block = 11 // overwrite updated protocol version
|
||||
|
||||
@@ -410,7 +410,7 @@ func (pool *BlockPool) sendError(err error, peerID p2p.ID) {
|
||||
}
|
||||
|
||||
// for debugging purposes
|
||||
//nolint:unused
|
||||
|
||||
func (pool *BlockPool) debug() string {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
@@ -303,7 +303,7 @@ func (bcR *Reactor) poolRoutine(stateSynced bool) {
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// ask for status updates
|
||||
go bcR.BroadcastStatusRequest() // nolint: errcheck
|
||||
go bcR.BroadcastStatusRequest() //nolint: errcheck
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,7 +70,9 @@ func newReactor(
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
@@ -83,7 +85,9 @@ func newReactor(
|
||||
// pool.height is determined from the store.
|
||||
fastSync := true
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db)
|
||||
stateStore = sm.NewStore(db, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
|
||||
@@ -104,7 +104,7 @@ func killProc(pid uint64, dir string) error {
|
||||
// pipe STDERR output from tailing the Tendermint process to a file
|
||||
//
|
||||
// NOTE: This will only work on UNIX systems.
|
||||
cmd := exec.Command("tail", "-f", fmt.Sprintf("/proc/%d/fd/2", pid)) // nolint: gosec
|
||||
cmd := exec.Command("tail", "-f", fmt.Sprintf("/proc/%d/fd/2", pid)) //nolint: gosec
|
||||
|
||||
outFile, err := os.Create(filepath.Join(dir, "stacktrace.out"))
|
||||
if err != nil {
|
||||
|
||||
@@ -67,7 +67,7 @@ func copyConfig(home, dir string) error {
|
||||
func dumpProfile(dir, addr, profile string, debug int) error {
|
||||
endpoint := fmt.Sprintf("%s/debug/pprof/%s?debug=%d", addr, profile, debug)
|
||||
|
||||
resp, err := http.Get(endpoint) // nolint: gosec
|
||||
resp, err := http.Get(endpoint) //nolint: gosec
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query for %s profile: %w", profile, err)
|
||||
}
|
||||
|
||||
@@ -40,6 +40,9 @@ replace the backend. The default start-height is 0, meaning the tooling will sta
|
||||
reindex from the base block height(inclusive); and the default end-height is 0, meaning
|
||||
the tooling will reindex until the latest block height(inclusive). User can omit
|
||||
either or both arguments.
|
||||
|
||||
Note: This operation requires ABCIResponses. Do not set DiscardABCIResponses to true if you
|
||||
want to use this command.
|
||||
`,
|
||||
Example: `
|
||||
tendermint reindex-event
|
||||
|
||||
@@ -77,7 +77,9 @@ func loadStateAndBlockStore(config *cfg.Config) (*store.BlockStore, state.Store,
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
stateStore := state.NewStore(stateDB)
|
||||
stateStore := state.NewStore(stateDB, state.StoreOptions{
|
||||
DiscardABCIResponses: config.Storage.DiscardABCIResponses,
|
||||
})
|
||||
|
||||
return blockStore, stateStore, nil
|
||||
}
|
||||
|
||||
@@ -77,6 +77,7 @@ type Config struct {
|
||||
// https://github.com/tendermint/tendermint/issues/9279
|
||||
DeprecatedFastSyncConfig map[interface{}]interface{} `mapstructure:"fastsync"`
|
||||
Consensus *ConsensusConfig `mapstructure:"consensus"`
|
||||
Storage *StorageConfig `mapstructure:"storage"`
|
||||
TxIndex *TxIndexConfig `mapstructure:"tx_index"`
|
||||
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
|
||||
}
|
||||
@@ -91,6 +92,7 @@ func DefaultConfig() *Config {
|
||||
StateSync: DefaultStateSyncConfig(),
|
||||
BlockSync: DefaultBlockSyncConfig(),
|
||||
Consensus: DefaultConsensusConfig(),
|
||||
Storage: DefaultStorageConfig(),
|
||||
TxIndex: DefaultTxIndexConfig(),
|
||||
Instrumentation: DefaultInstrumentationConfig(),
|
||||
}
|
||||
@@ -106,6 +108,7 @@ func TestConfig() *Config {
|
||||
StateSync: TestStateSyncConfig(),
|
||||
BlockSync: TestBlockSyncConfig(),
|
||||
Consensus: TestConsensusConfig(),
|
||||
Storage: TestStorageConfig(),
|
||||
TxIndex: TestTxIndexConfig(),
|
||||
Instrumentation: TestInstrumentationConfig(),
|
||||
}
|
||||
@@ -1087,11 +1090,40 @@ func (cfg *ConsensusConfig) ValidateBasic() error {
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// StorageConfig
|
||||
|
||||
// StorageConfig allows more fine-grained control over certain storage-related
|
||||
// behavior.
|
||||
type StorageConfig struct {
|
||||
// Set to false to ensure ABCI responses are persisted. ABCI responses are
|
||||
// required for `/block_results` RPC queries, and to reindex events in the
|
||||
// command-line tool.
|
||||
DiscardABCIResponses bool `mapstructure:"discard_abci_responses"`
|
||||
}
|
||||
|
||||
// DefaultStorageConfig returns the default configuration options relating to
|
||||
// Tendermint storage optimization.
|
||||
func DefaultStorageConfig() *StorageConfig {
|
||||
return &StorageConfig{
|
||||
DiscardABCIResponses: false,
|
||||
}
|
||||
}
|
||||
|
||||
// TestStorageConfig returns storage configuration that can be used for
|
||||
// testing.
|
||||
func TestStorageConfig() *StorageConfig {
|
||||
return &StorageConfig{
|
||||
DiscardABCIResponses: false,
|
||||
}
|
||||
}
|
||||
|
||||
// TxIndexConfig
|
||||
// Remember that Event has the following structure:
|
||||
// type: [
|
||||
// key: value,
|
||||
// ...
|
||||
// ...
|
||||
// key: value,
|
||||
// ...
|
||||
//
|
||||
// ]
|
||||
//
|
||||
// CompositeKeys are constructed by `type.key`
|
||||
|
||||
@@ -140,8 +140,8 @@ func TestBlockSyncConfigValidateBasic(t *testing.T) {
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
}
|
||||
|
||||
// nolint: lll
|
||||
func TestConsensusConfig_ValidateBasic(t *testing.T) {
|
||||
// nolint: lll
|
||||
testcases := map[string]struct {
|
||||
modify func(*ConsensusConfig)
|
||||
expectErr bool
|
||||
@@ -166,6 +166,7 @@ func TestConsensusConfig_ValidateBasic(t *testing.T) {
|
||||
"PeerQueryMaj23SleepDuration negative": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = -1 }, true},
|
||||
"DoubleSignCheckHeight negative": {func(c *ConsensusConfig) { c.DoubleSignCheckHeight = -1 }, true},
|
||||
}
|
||||
|
||||
for desc, tc := range testcases {
|
||||
tc := tc // appease linter
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
|
||||
@@ -482,6 +482,16 @@ create_empty_blocks_interval = "{{ .Consensus.CreateEmptyBlocksInterval }}"
|
||||
peer_gossip_sleep_duration = "{{ .Consensus.PeerGossipSleepDuration }}"
|
||||
peer_query_maj23_sleep_duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
|
||||
|
||||
#######################################################
|
||||
### Storage Configuration Options ###
|
||||
#######################################################
|
||||
|
||||
# Set to true to discard ABCI responses from the state store, which can save a
|
||||
# considerable amount of disk space. Set to false to ensure ABCI responses are
|
||||
# persisted. ABCI responses are required for /block_results RPC queries, and to
|
||||
# reindex events in the command-line tool.
|
||||
discard_abci_responses = {{ .Storage.DiscardABCIResponses}}
|
||||
|
||||
#######################################################
|
||||
### Transaction Indexer Configuration Options ###
|
||||
#######################################################
|
||||
|
||||
@@ -50,7 +50,9 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
for i := 0; i < nValidators; i++ {
|
||||
logger := consensusLogger().With("test", "byzantine", "validator", i)
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
defer os.RemoveAll(thisConfig.RootDir)
|
||||
|
||||
@@ -426,7 +426,9 @@ func newStateWithConfigAndBlockStore(
|
||||
|
||||
// Make State
|
||||
stateDB := blockDB
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
if err := stateStore.Save(state); err != nil { // for save height 1's validators info
|
||||
panic(err)
|
||||
}
|
||||
@@ -716,7 +718,9 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
|
||||
configRootDirs := make([]string, 0, nValidators)
|
||||
for i := 0; i < nValidators; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
configRootDirs = append(configRootDirs, thisConfig.RootDir)
|
||||
@@ -754,7 +758,9 @@ func randConsensusNetWithPeers(
|
||||
configRootDirs := make([]string, 0, nPeers)
|
||||
for i := 0; i < nPeers; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
configRootDirs = append(configRootDirs, thisConfig.RootDir)
|
||||
|
||||
@@ -113,7 +113,7 @@ func deliverTxsRange(cs *State, start, end int) {
|
||||
func TestMempoolTxConcurrentWithCommit(t *testing.T) {
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(blockDB)
|
||||
stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false})
|
||||
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockDB)
|
||||
err := stateStore.Save(state)
|
||||
require.NoError(t, err)
|
||||
@@ -138,7 +138,7 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
app := NewCounterApplication()
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(blockDB)
|
||||
stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false})
|
||||
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB)
|
||||
err := stateStore.Save(state)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -314,7 +314,6 @@ func TestWALMsgProto(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// nolint:lll //ignore line length for tests
|
||||
func TestConsMsgsVectors(t *testing.T) {
|
||||
date := time.Date(2018, 8, 30, 12, 0, 0, 0, time.UTC)
|
||||
psh := types.PartSetHeader{
|
||||
|
||||
@@ -138,7 +138,9 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
logger := consensusLogger()
|
||||
for i := 0; i < nValidators; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
defer os.RemoveAll(thisConfig.RootDir)
|
||||
@@ -689,7 +691,7 @@ func capture() {
|
||||
// Ensure basic validation of structs is functioning
|
||||
|
||||
func TestNewRoundStepMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct { // nolint: maligned
|
||||
testCases := []struct {
|
||||
expectErr bool
|
||||
messageRound int32
|
||||
messageLastCommitRound int32
|
||||
@@ -728,7 +730,7 @@ func TestNewRoundStepMessageValidateBasic(t *testing.T) {
|
||||
|
||||
func TestNewRoundStepMessageValidateHeight(t *testing.T) {
|
||||
initialHeight := int64(10)
|
||||
testCases := []struct { // nolint: maligned
|
||||
testCases := []struct { //nolint: maligned
|
||||
expectErr bool
|
||||
messageLastCommitRound int32
|
||||
messageHeight int64
|
||||
@@ -878,7 +880,7 @@ func TestHasVoteMessageValidateBasic(t *testing.T) {
|
||||
invalidSignedMsgType tmproto.SignedMsgType = 0x03
|
||||
)
|
||||
|
||||
testCases := []struct { // nolint: maligned
|
||||
testCases := []struct { //nolint: maligned
|
||||
expectErr bool
|
||||
messageRound int32
|
||||
messageIndex int32
|
||||
@@ -923,7 +925,7 @@ func TestVoteSetMaj23MessageValidateBasic(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct { // nolint: maligned
|
||||
testCases := []struct { //nolint: maligned
|
||||
expectErr bool
|
||||
messageRound int32
|
||||
messageHeight int64
|
||||
|
||||
@@ -418,7 +418,7 @@ func (h *Handshaker) ReplayBlocks(
|
||||
|
||||
case appBlockHeight == storeBlockHeight:
|
||||
// We ran Commit, but didn't save the state, so replayBlock with mock app.
|
||||
abciResponses, err := h.stateStore.LoadABCIResponses(storeBlockHeight)
|
||||
abciResponses, err := h.stateStore.LoadLastABCIResponse(storeBlockHeight)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -297,7 +297,9 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
|
||||
if err != nil {
|
||||
tmos.Exit(err.Error())
|
||||
}
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile())
|
||||
if err != nil {
|
||||
tmos.Exit(err.Error())
|
||||
|
||||
@@ -158,7 +158,9 @@ LOOP:
|
||||
logger := log.NewNopLogger()
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := blockDB
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
privValidator := loadPrivValidator(consensusReplayConfig)
|
||||
@@ -289,7 +291,6 @@ func (w *crashingWAL) Start() error { return w.next.Start() }
|
||||
func (w *crashingWAL) Stop() error { return w.next.Stop() }
|
||||
func (w *crashingWAL) Wait() { w.next.Wait() }
|
||||
|
||||
//------------------------------------------------------------------------------------------
|
||||
type testSim struct {
|
||||
GenesisState sm.State
|
||||
Config *cfg.Config
|
||||
@@ -692,7 +693,9 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
|
||||
stateDB, genesisState, store = stateAndStore(config, pubKey, kvstore.ProtocolVersion)
|
||||
|
||||
}
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
store.chain = chain
|
||||
store.commits = commits
|
||||
|
||||
@@ -711,7 +714,9 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
|
||||
// use a throwaway tendermint state
|
||||
proxyApp := proxy.NewAppConns(clientCreator2, proxy.NopMetrics())
|
||||
stateDB1 := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB1)
|
||||
stateStore := sm.NewStore(stateDB1, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
err := stateStore.Save(genesisState)
|
||||
require.NoError(t, err)
|
||||
buildAppStateFromChain(proxyApp, stateStore, genesisState, chain, nBlocks, mode)
|
||||
@@ -890,7 +895,9 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
|
||||
pubKey, err := privVal.GetPubKey()
|
||||
require.NoError(t, err)
|
||||
stateDB, state, store := stateAndStore(config, pubKey, appVersion)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile())
|
||||
state.LastValidators = state.Validators.Copy()
|
||||
// mode = 0 for committing all the blocks
|
||||
@@ -1147,7 +1154,9 @@ func stateAndStore(
|
||||
pubKey crypto.PubKey,
|
||||
appVersion uint64) (dbm.DB, sm.State, *mockBlockStore) {
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile())
|
||||
state.Version.Consensus.App = appVersion
|
||||
store := newMockBlockStore(config, state.ConsensusParams)
|
||||
@@ -1224,7 +1233,9 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
|
||||
pubKey, err := privVal.GetPubKey()
|
||||
require.NoError(t, err)
|
||||
stateDB, state, store := stateAndStore(config, pubKey, 0x0)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
|
||||
oldValAddr := state.Validators.Validators[0].Address
|
||||
|
||||
|
||||
@@ -978,7 +978,9 @@ func (cs *State) handleTxsAvailable() {
|
||||
// Used internally by handleTimeout and handleMsg to make state transitions
|
||||
|
||||
// Enter: `timeoutNewHeight` by startTime (commitTime+timeoutCommit),
|
||||
// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1)
|
||||
//
|
||||
// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1)
|
||||
//
|
||||
// Enter: `timeoutPrecommits` after any +2/3 precommits from (height,round-1)
|
||||
// Enter: +2/3 precommits for nil at (height,round-1)
|
||||
// Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round)
|
||||
@@ -1060,7 +1062,9 @@ func (cs *State) needProofBlock(height int64) bool {
|
||||
|
||||
// Enter (CreateEmptyBlocks): from enterNewRound(height,round)
|
||||
// Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ):
|
||||
// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval
|
||||
//
|
||||
// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval
|
||||
//
|
||||
// Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool
|
||||
func (cs *State) enterPropose(height int64, round int32) {
|
||||
logger := cs.Logger.With("height", height, "round", round)
|
||||
@@ -1964,7 +1968,7 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) {
|
||||
// If the vote height is off, we'll just ignore it,
|
||||
// But if it's a conflicting sig, add it to the cs.evpool.
|
||||
// If it's otherwise invalid, punish peer.
|
||||
// nolint: gocritic
|
||||
//nolint: gocritic
|
||||
if voteErr, ok := err.(*types.ErrVoteConflictingVotes); ok {
|
||||
if cs.privValidatorPubKey == nil {
|
||||
return false, errPubKeyIsNotSet
|
||||
|
||||
@@ -47,7 +47,9 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||
}
|
||||
blockStoreDB := db.NewMemDB()
|
||||
stateDB := blockStoreDB
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make genesis state: %w", err)
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"golang.org/x/crypto/openpgp/armor" // nolint: staticcheck
|
||||
"golang.org/x/crypto/openpgp/armor"
|
||||
)
|
||||
|
||||
func EncodeArmor(blockType string, headers map[string]string, data []byte) string {
|
||||
|
||||
@@ -12,20 +12,19 @@ second pre-image attacks. Hence, use this library with caution.
|
||||
Otherwise you might run into similar issues as, e.g., in early Bitcoin:
|
||||
https://bitcointalk.org/?topic=102395
|
||||
|
||||
*
|
||||
/ \
|
||||
/ \
|
||||
/ \
|
||||
/ \
|
||||
* *
|
||||
/ \ / \
|
||||
/ \ / \
|
||||
/ \ / \
|
||||
* * * h6
|
||||
/ \ / \ / \
|
||||
h0 h1 h2 h3 h4 h5
|
||||
*
|
||||
/ \
|
||||
/ \
|
||||
/ \
|
||||
/ \
|
||||
* *
|
||||
/ \ / \
|
||||
/ \ / \
|
||||
/ \ / \
|
||||
* * * h6
|
||||
/ \ / \ / \
|
||||
h0 h1 h2 h3 h4 h5
|
||||
|
||||
TODO(ismail): add 2nd pre-image protection or clarify further on how we use this and why this secure.
|
||||
|
||||
*/
|
||||
package merkle
|
||||
|
||||
@@ -85,8 +85,8 @@ func (op ValueOp) Run(args [][]byte) ([][]byte, error) {
|
||||
|
||||
bz := new(bytes.Buffer)
|
||||
// Wrap <op.Key, vhash> to hash the KVPair.
|
||||
encodeByteSlice(bz, op.key) // nolint: errcheck // does not error
|
||||
encodeByteSlice(bz, vhash) // nolint: errcheck // does not error
|
||||
encodeByteSlice(bz, op.key)
|
||||
encodeByteSlice(bz, vhash)
|
||||
kvhash := leafHash(bz.Bytes())
|
||||
|
||||
if !bytes.Equal(kvhash, op.Proof.LeafHash) {
|
||||
|
||||
@@ -47,10 +47,10 @@ func HashFromByteSlices(items [][]byte) []byte {
|
||||
//
|
||||
// These preliminary results suggest:
|
||||
//
|
||||
// 1. The performance of the HashFromByteSlice is pretty good
|
||||
// 2. Go has low overhead for recursive functions
|
||||
// 3. The performance of the HashFromByteSlice routine is dominated
|
||||
// by the actual hashing of data
|
||||
// 1. The performance of the HashFromByteSlice is pretty good
|
||||
// 2. Go has low overhead for recursive functions
|
||||
// 3. The performance of the HashFromByteSlice routine is dominated
|
||||
// by the actual hashing of data
|
||||
//
|
||||
// Although this work is in no way exhaustive, point #3 suggests that
|
||||
// optimization of this routine would need to take an alternative
|
||||
|
||||
@@ -9,13 +9,12 @@ import (
|
||||
"math/big"
|
||||
|
||||
secp256k1 "github.com/btcsuite/btcd/btcec"
|
||||
"golang.org/x/crypto/ripemd160" // nolint: staticcheck // necessary for Bitcoin address format
|
||||
"golang.org/x/crypto/ripemd160" //nolint: staticcheck // necessary for Bitcoin address format
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
)
|
||||
|
||||
//-------------------------------------
|
||||
const (
|
||||
PrivKeyName = "tendermint/PrivKeySecp256k1"
|
||||
PubKeyName = "tendermint/PubKeySecp256k1"
|
||||
@@ -124,8 +123,8 @@ func GenPrivKeySecp256k1(secret []byte) PrivKey {
|
||||
|
||||
// used to reject malleable signatures
|
||||
// see:
|
||||
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
|
||||
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39
|
||||
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
|
||||
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39
|
||||
var secp256k1halfN = new(big.Int).Rsh(secp256k1.S256().N, 1)
|
||||
|
||||
// Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg.
|
||||
|
||||
@@ -3,7 +3,7 @@ Package evidence handles all evidence storage and gossiping from detection to bl
|
||||
For the different types of evidence refer to the `evidence.go` file in the types package
|
||||
or https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md.
|
||||
|
||||
Gossiping
|
||||
# Gossiping
|
||||
|
||||
The core functionality begins with the evidence reactor (see reactor.
|
||||
go) which operates both the sending and receiving of evidence.
|
||||
@@ -29,7 +29,7 @@ There are two buckets that evidence can be stored in: Pending & Committed.
|
||||
|
||||
All evidence is proto encoded to disk.
|
||||
|
||||
Proposing
|
||||
# Proposing
|
||||
|
||||
When a new block is being proposed (in state/execution.go#CreateProposalBlock),
|
||||
`PendingEvidence(maxBytes)` is called to send up to the maxBytes of uncommitted evidence, from the evidence store,
|
||||
@@ -42,12 +42,11 @@ Once the proposed evidence is submitted,
|
||||
the evidence is marked as committed and is moved from the broadcasted set to the committed set.
|
||||
As a result it is also removed from the concurrent list so that it is no longer gossiped.
|
||||
|
||||
Minor Functionality
|
||||
# Minor Functionality
|
||||
|
||||
As all evidence (including POLC's) are bounded by an expiration date, those that exceed this are no longer needed
|
||||
and hence pruned. Currently, only committed evidence in which a marker to the height that the evidence was committed
|
||||
and hence very small is saved. All updates are made from the `Update(block, state)` function which should be called
|
||||
when a new block is committed.
|
||||
|
||||
*/
|
||||
package evidence
|
||||
|
||||
@@ -97,11 +97,11 @@ func (evpool *Pool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64) {
|
||||
|
||||
// Update takes both the new state and the evidence committed at that height and performs
|
||||
// the following operations:
|
||||
// 1. Take any conflicting votes from consensus and use the state's LastBlockTime to form
|
||||
// DuplicateVoteEvidence and add it to the pool.
|
||||
// 2. Update the pool's state which contains evidence params relating to expiry.
|
||||
// 3. Moves pending evidence that has now been committed into the committed pool.
|
||||
// 4. Removes any expired evidence based on both height and time.
|
||||
// 1. Take any conflicting votes from consensus and use the state's LastBlockTime to form
|
||||
// DuplicateVoteEvidence and add it to the pool.
|
||||
// 2. Update the pool's state which contains evidence params relating to expiry.
|
||||
// 3. Moves pending evidence that has now been committed into the committed pool.
|
||||
// 4. Removes any expired evidence based on both height and time.
|
||||
func (evpool *Pool) Update(state sm.State, ev types.EvidenceList) {
|
||||
// sanity check
|
||||
if state.LastBlockHeight <= evpool.state.LastBlockHeight {
|
||||
|
||||
@@ -348,7 +348,9 @@ func TestRecoverPendingEvidence(t *testing.T) {
|
||||
|
||||
func initializeStateFromValidatorSet(valSet *types.ValidatorSet, height int64) sm.Store {
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state := sm.State{
|
||||
ChainID: evidenceChainID,
|
||||
InitialHeight: 1,
|
||||
|
||||
@@ -367,7 +367,6 @@ func exampleVote(t byte) *types.Vote {
|
||||
}
|
||||
}
|
||||
|
||||
// nolint:lll //ignore line length for tests
|
||||
func TestEvidenceVectors(t *testing.T) {
|
||||
|
||||
val := &types.Validator{
|
||||
|
||||
@@ -102,13 +102,13 @@ func (evpool *Pool) verify(evidence types.Evidence) error {
|
||||
|
||||
// VerifyLightClientAttack verifies LightClientAttackEvidence against the state of the full node. This involves
|
||||
// the following checks:
|
||||
// - the common header from the full node has at least 1/3 voting power which is also present in
|
||||
// the conflicting header's commit
|
||||
// - 2/3+ of the conflicting validator set correctly signed the conflicting block
|
||||
// - the nodes trusted header at the same height as the conflicting header has a different hash
|
||||
//
|
||||
// - the common header from the full node has at least 1/3 voting power which is also present in
|
||||
// the conflicting header's commit
|
||||
// - 2/3+ of the conflicting validator set correctly signed the conflicting block
|
||||
// - the nodes trusted header at the same height as the conflicting header has a different hash
|
||||
|
||||
// CONTRACT: must run ValidateBasic() on the evidence before verifying
|
||||
// must check that the evidence has not expired (i.e. is outside the maximum age threshold)
|
||||
// must check that the evidence has not expired (i.e. is outside the maximum age threshold)
|
||||
func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, trustedHeader *types.SignedHeader,
|
||||
commonVals *types.ValidatorSet, now time.Time, trustPeriod time.Duration) error {
|
||||
// In the case of lunatic attack there will be a different commonHeader height. Therefore the node perform a single
|
||||
@@ -154,10 +154,10 @@ func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, t
|
||||
|
||||
// VerifyDuplicateVote verifies DuplicateVoteEvidence against the state of full node. This involves the
|
||||
// following checks:
|
||||
// - the validator is in the validator set at the height of the evidence
|
||||
// - the height, round, type and validator address of the votes must be the same
|
||||
// - the block ID's must be different
|
||||
// - The signatures must both be valid
|
||||
// - the validator is in the validator set at the height of the evidence
|
||||
// - the height, round, type and validator address of the votes must be the same
|
||||
// - the block ID's must be different
|
||||
// - The signatures must both be valid
|
||||
func VerifyDuplicateVote(e *types.DuplicateVoteEvidence, chainID string, valSet *types.ValidatorSet) error {
|
||||
_, val := valSet.GetByAddress(e.VoteA.ValidatorAddress)
|
||||
if val == nil {
|
||||
|
||||
1
go.sum
1
go.sum
@@ -1031,6 +1031,7 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
|
||||
@@ -17,7 +17,8 @@ const (
|
||||
// all other modules).
|
||||
//
|
||||
// Example:
|
||||
// ParseLogLevel("consensus:debug,mempool:debug,*:error", log.NewTMLogger(os.Stdout), "info")
|
||||
//
|
||||
// ParseLogLevel("consensus:debug,mempool:debug,*:error", log.NewTMLogger(os.Stdout), "info")
|
||||
func ParseLogLevel(lvl string, logger log.Logger, defaultLogLevelValue string) (log.Logger, error) {
|
||||
if lvl == "" {
|
||||
return nil, errors.New("empty log level")
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
const MaxLength = int(^uint(0) >> 1)
|
||||
|
||||
/*
|
||||
|
||||
CElement is an element of a linked-list
|
||||
Traversal from a CElement is goroutine-safe.
|
||||
|
||||
@@ -41,7 +40,6 @@ the for-loop. Use sync.Cond when you need serial access to the
|
||||
"condition". In our case our condition is if `next != nil || removed`,
|
||||
and there's no reason to serialize that condition for goroutines
|
||||
waiting on NextWait() (since it's just a read operation).
|
||||
|
||||
*/
|
||||
type CElement struct {
|
||||
mtx tmsync.RWMutex
|
||||
|
||||
@@ -68,7 +68,6 @@ func TestSmall(t *testing.T) {
|
||||
|
||||
// This test is quite hacky because it relies on SetFinalizer
|
||||
// which isn't guaranteed to run at all.
|
||||
//nolint:unused,deadcode
|
||||
func _TestGCFifo(t *testing.T) {
|
||||
if runtime.GOARCH != "amd64" {
|
||||
t.Skipf("Skipping on non-amd64 machine")
|
||||
@@ -117,6 +116,7 @@ func _TestGCFifo(t *testing.T) {
|
||||
|
||||
// This test is quite hacky because it relies on SetFinalizer
|
||||
// which isn't guaranteed to run at all.
|
||||
//
|
||||
//nolint:unused,deadcode
|
||||
func _TestGCRandom(t *testing.T) {
|
||||
if runtime.GOARCH != "amd64" {
|
||||
|
||||
@@ -39,10 +39,10 @@ type Monitor struct {
|
||||
// weight of each sample in the exponential moving average (EMA) calculation.
|
||||
// The exact formulas are:
|
||||
//
|
||||
// sampleTime = currentTime - prevSampleTime
|
||||
// sampleRate = byteCount / sampleTime
|
||||
// weight = 1 - exp(-sampleTime/windowSize)
|
||||
// newRate = weight*sampleRate + (1-weight)*oldRate
|
||||
// sampleTime = currentTime - prevSampleTime
|
||||
// sampleRate = byteCount / sampleTime
|
||||
// weight = 1 - exp(-sampleTime/windowSize)
|
||||
// newRate = weight*sampleRate + (1-weight)*oldRate
|
||||
//
|
||||
// The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s,
|
||||
// respectively.
|
||||
|
||||
@@ -13,12 +13,12 @@
|
||||
// compatibility with e.g. Javascript (which uses 64-bit floats for numbers, having 53-bit
|
||||
// precision):
|
||||
//
|
||||
// int32(32) // Output: 32
|
||||
// uint32(32) // Output: 32
|
||||
// int64(64) // Output: "64"
|
||||
// uint64(64) // Output: "64"
|
||||
// int(64) // Output: "64"
|
||||
// uint(64) // Output: "64"
|
||||
// int32(32) // Output: 32
|
||||
// uint32(32) // Output: 32
|
||||
// int64(64) // Output: "64"
|
||||
// uint64(64) // Output: "64"
|
||||
// int(64) // Output: "64"
|
||||
// uint(64) // Output: "64"
|
||||
//
|
||||
// Encoding of other scalars follows encoding/json:
|
||||
//
|
||||
@@ -50,7 +50,7 @@
|
||||
// Times are encoded as encoding/json, in RFC3339Nano format, but requiring UTC time zone (with zero
|
||||
// times emitted as "0001-01-01T00:00:00Z" as with encoding/json):
|
||||
//
|
||||
// time.Date(2020, 6, 8, 16, 21, 28, 123, time.FixedZone("UTC+2", 2*60*60))
|
||||
// time.Date(2020, 6, 8, 16, 21, 28, 123, time.FixedZone("UTC+2", 2*60*60))
|
||||
// // Output: "2020-06-08T14:21:28.000000123Z"
|
||||
// time.Time{} // Output: "0001-01-01T00:00:00Z"
|
||||
// (*time.Time)(nil) // Output: null
|
||||
@@ -95,5 +95,4 @@
|
||||
//
|
||||
// Struct{Car: &Car{Wheels: 4}, Vehicle: &Car{Wheels: 4}}
|
||||
// // Output: {"Car": {"Wheels: 4"}, "Vehicle": {"type":"vehicle/car","value":{"Wheels":4}}}
|
||||
//
|
||||
package json
|
||||
|
||||
@@ -69,18 +69,19 @@ func (l *filter) Error(msg string, keyvals ...interface{}) {
|
||||
// Allow*With methods, it is used as the logger's level.
|
||||
//
|
||||
// Examples:
|
||||
// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto"))
|
||||
// logger.With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto"
|
||||
//
|
||||
// logger = log.NewFilter(logger, log.AllowError(),
|
||||
// log.AllowInfoWith("module", "crypto"),
|
||||
// log.AllowNoneWith("user", "Sam"))
|
||||
// logger.With("module", "crypto", "user", "Sam").Info("Hello") # returns nil
|
||||
// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto"))
|
||||
// logger.With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto"
|
||||
//
|
||||
// logger = log.NewFilter(logger,
|
||||
// log.AllowError(),
|
||||
// log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam"))
|
||||
// logger.With("user", "Sam").With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto user=Sam"
|
||||
// logger = log.NewFilter(logger, log.AllowError(),
|
||||
// log.AllowInfoWith("module", "crypto"),
|
||||
// log.AllowNoneWith("user", "Sam"))
|
||||
// logger.With("module", "crypto", "user", "Sam").Info("Hello") # returns nil
|
||||
//
|
||||
// logger = log.NewFilter(logger,
|
||||
// log.AllowError(),
|
||||
// log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam"))
|
||||
// logger.With("user", "Sam").With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto user=Sam"
|
||||
func (l *filter) With(keyvals ...interface{}) Logger {
|
||||
keyInAllowedKeyvals := false
|
||||
|
||||
|
||||
@@ -22,9 +22,9 @@ type Logger interface {
|
||||
//
|
||||
// If w implements the following interface, so does the returned writer.
|
||||
//
|
||||
// interface {
|
||||
// Fd() uintptr
|
||||
// }
|
||||
// interface {
|
||||
// Fd() uintptr
|
||||
// }
|
||||
func NewSyncWriter(w io.Writer) io.Writer {
|
||||
return kitlog.NewSyncWriter(w)
|
||||
}
|
||||
|
||||
@@ -65,7 +65,7 @@ func (l tmfmtLogger) Log(keyvals ...interface{}) error {
|
||||
switch keyvals[i] {
|
||||
case kitlevel.Key():
|
||||
excludeIndexes = append(excludeIndexes, i)
|
||||
switch keyvals[i+1].(type) { // nolint:gocritic
|
||||
switch keyvals[i+1].(type) { //nolint:gocritic
|
||||
case string:
|
||||
lvl = keyvals[i+1].(string)
|
||||
case kitlevel.Value:
|
||||
|
||||
@@ -83,7 +83,6 @@ func benchmarkRunnerKitlog(b *testing.B, logger kitlog.Logger, f func(kitlog.Log
|
||||
}
|
||||
}
|
||||
|
||||
//nolint: errcheck // ignore errors
|
||||
var (
|
||||
baseMessage = func(logger kitlog.Logger) { logger.Log("foo_key", "foo_value") }
|
||||
withMessage = func(logger kitlog.Logger) { kitlog.With(logger, "a", "b").Log("d", "f") }
|
||||
|
||||
@@ -12,26 +12,25 @@
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// q, err := query.New("account.name='John'")
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Second)
|
||||
// defer cancel()
|
||||
// subscription, err := pubsub.Subscribe(ctx, "johns-transactions", q)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// for {
|
||||
// select {
|
||||
// case msg <- subscription.Out():
|
||||
// // handle msg.Data() and msg.Events()
|
||||
// case <-subscription.Cancelled():
|
||||
// return subscription.Err()
|
||||
// }
|
||||
// }
|
||||
// q, err := query.New("account.name='John'")
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Second)
|
||||
// defer cancel()
|
||||
// subscription, err := pubsub.Subscribe(ctx, "johns-transactions", q)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// for {
|
||||
// select {
|
||||
// case msg <- subscription.Out():
|
||||
// // handle msg.Data() and msg.Events()
|
||||
// case <-subscription.Cancelled():
|
||||
// return subscription.Err()
|
||||
// }
|
||||
// }
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package query provides a parser for a custom query format:
|
||||
//
|
||||
// abci.invoice.number=22 AND abci.invoice.owner=Ivan
|
||||
// abci.invoice.number=22 AND abci.invoice.owner=Ivan
|
||||
//
|
||||
// See query.peg for the grammar, which is a https://en.wikipedia.org/wiki/Parsing_expression_grammar.
|
||||
// More: https://github.com/PhilippeSigaud/Pegged/wiki/PEG-Basics
|
||||
|
||||
@@ -43,7 +43,6 @@ func (s *Subscription) Out() <-chan Message {
|
||||
return s.out
|
||||
}
|
||||
|
||||
// nolint: misspell
|
||||
// Cancelled returns a channel that's closed when the subscription is
|
||||
// terminated and supposed to be used in a select statement.
|
||||
func (s *Subscription) Cancelled() <-chan struct{} {
|
||||
@@ -54,7 +53,8 @@ func (s *Subscription) Cancelled() <-chan struct{} {
|
||||
// If the channel is closed, Err returns a non-nil error explaining why:
|
||||
// - ErrUnsubscribed if the subscriber choose to unsubscribe,
|
||||
// - ErrOutOfCapacity if the subscriber is not pulling messages fast enough
|
||||
// and the channel returned by Out became full,
|
||||
// and the channel returned by Out became full,
|
||||
//
|
||||
// After Err returns a non-nil error, successive calls to Err return the same
|
||||
// error.
|
||||
func (s *Subscription) Err() error {
|
||||
|
||||
@@ -48,7 +48,7 @@ func (r *Rand) init() {
|
||||
}
|
||||
|
||||
func (r *Rand) reset(seed int64) {
|
||||
r.rand = mrand.New(mrand.NewSource(seed)) // nolint:gosec // G404: Use of weak random number generator
|
||||
r.rand = mrand.New(mrand.NewSource(seed)) //nolint:gosec // G404: Use of weak random number generator
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
@@ -284,16 +284,16 @@ func (c *Client) restoreTrustedLightBlock() error {
|
||||
|
||||
// if options.Height:
|
||||
//
|
||||
// 1) ahead of trustedLightBlock.Height => fetch light blocks (same height as
|
||||
// 1) ahead of trustedLightBlock.Height => fetch light blocks (same height as
|
||||
// trustedLightBlock) from primary provider and check it's hash matches the
|
||||
// trustedLightBlock's hash (if not, remove trustedLightBlock and all the light blocks
|
||||
// before)
|
||||
//
|
||||
// 2) equals trustedLightBlock.Height => check options.Hash matches the
|
||||
// 2) equals trustedLightBlock.Height => check options.Hash matches the
|
||||
// trustedLightBlock's hash (if not, remove trustedLightBlock and all the light blocks
|
||||
// before)
|
||||
//
|
||||
// 3) behind trustedLightBlock.Height => remove all the light blocks between
|
||||
// 3) behind trustedLightBlock.Height => remove all the light blocks between
|
||||
// options.Height and trustedLightBlock.Height, update trustedLightBlock, then
|
||||
// check options.Hash matches the trustedLightBlock's hash (if not, remove
|
||||
// trustedLightBlock and all the light blocks before)
|
||||
@@ -395,10 +395,10 @@ func (c *Client) initializeWithTrustOptions(ctx context.Context, options TrustOp
|
||||
// TrustedLightBlock returns a trusted light block at the given height (0 - the latest).
|
||||
//
|
||||
// It returns an error if:
|
||||
// - there are some issues with the trusted store, although that should not
|
||||
// happen normally;
|
||||
// - negative height is passed;
|
||||
// - header has not been verified yet and is therefore not in the store
|
||||
// - there are some issues with the trusted store, although that should not
|
||||
// happen normally;
|
||||
// - negative height is passed;
|
||||
// - header has not been verified yet and is therefore not in the store
|
||||
//
|
||||
// Safe for concurrent use by multiple goroutines.
|
||||
func (c *Client) TrustedLightBlock(height int64) (*types.LightBlock, error) {
|
||||
@@ -510,8 +510,9 @@ func (c *Client) VerifyLightBlockAtHeight(ctx context.Context, height int64, now
|
||||
//
|
||||
// If the header, which is older than the currently trusted header, is
|
||||
// requested and the light client does not have it, VerifyHeader will perform:
|
||||
// a) verifySkipping verification if nearest trusted header is found & not expired
|
||||
// b) backwards verification in all other cases
|
||||
//
|
||||
// a) verifySkipping verification if nearest trusted header is found & not expired
|
||||
// b) backwards verification in all other cases
|
||||
//
|
||||
// It returns ErrOldHeaderExpired if the latest trusted header expired.
|
||||
//
|
||||
@@ -980,12 +981,12 @@ func (c *Client) backwards(
|
||||
// lightBlockFromPrimary retrieves the lightBlock from the primary provider
|
||||
// at the specified height. This method also handles provider behavior as follows:
|
||||
//
|
||||
// 1. If the provider does not respond or does not have the block, it tries again
|
||||
// with a different provider
|
||||
// 2. If all providers return the same error, the light client forwards the error to
|
||||
// where the initial request came from
|
||||
// 3. If the provider provides an invalid light block, is deemed unreliable or returns
|
||||
// any other error, the primary is permanently dropped and is replaced by a witness.
|
||||
// 1. If the provider does not respond or does not have the block, it tries again
|
||||
// with a different provider
|
||||
// 2. If all providers return the same error, the light client forwards the error to
|
||||
// where the initial request came from
|
||||
// 3. If the provider provides an invalid light block, is deemed unreliable or returns
|
||||
// any other error, the primary is permanently dropped and is replaced by a witness.
|
||||
func (c *Client) lightBlockFromPrimary(ctx context.Context, height int64) (*types.LightBlock, error) {
|
||||
c.providerMutex.Lock()
|
||||
l, err := c.primary.LightBlock(ctx, height)
|
||||
|
||||
@@ -109,7 +109,9 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig
|
||||
//
|
||||
// 1: errConflictingHeaders -> there may have been an attack on this light client
|
||||
// 2: errBadWitness -> the witness has either not responded, doesn't have the header or has given us an invalid one
|
||||
// Note: In the case of an invalid header we remove the witness
|
||||
//
|
||||
// Note: In the case of an invalid header we remove the witness
|
||||
//
|
||||
// 3: nil -> the hashes of the two headers match
|
||||
func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan error, h *types.SignedHeader,
|
||||
witness provider.Provider, witnessIndex int) {
|
||||
@@ -275,16 +277,16 @@ func (c *Client) handleConflictingHeaders(
|
||||
// it has received from another and preforms verifySkipping at the heights of each of the intermediate
|
||||
// headers in the trace until it reaches the divergentHeader. 1 of 2 things can happen.
|
||||
//
|
||||
// 1. The light client verifies a header that is different to the intermediate header in the trace. This
|
||||
// is the bifurcation point and the light client can create evidence from it
|
||||
// 2. The source stops responding, doesn't have the block or sends an invalid header in which case we
|
||||
// return the error and remove the witness
|
||||
// 1. The light client verifies a header that is different to the intermediate header in the trace. This
|
||||
// is the bifurcation point and the light client can create evidence from it
|
||||
// 2. The source stops responding, doesn't have the block or sends an invalid header in which case we
|
||||
// return the error and remove the witness
|
||||
//
|
||||
// CONTRACT:
|
||||
// 1. Trace can not be empty len(trace) > 0
|
||||
// 2. The last block in the trace can not be of a lower height than the target block
|
||||
// trace[len(trace)-1].Height >= targetBlock.Height
|
||||
// 3. The
|
||||
// 1. Trace can not be empty len(trace) > 0
|
||||
// 2. The last block in the trace can not be of a lower height than the target block
|
||||
// trace[len(trace)-1].Height >= targetBlock.Height
|
||||
// 3. The
|
||||
func (c *Client) examineConflictingHeaderAgainstTrace(
|
||||
ctx context.Context,
|
||||
trace []*types.LightBlock,
|
||||
|
||||
46
light/doc.go
46
light/doc.go
@@ -63,31 +63,31 @@ This package provides three major things:
|
||||
|
||||
Example usage:
|
||||
|
||||
db, err := dbm.NewGoLevelDB("light-client-db", dbDir)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
db, err := dbm.NewGoLevelDB("light-client-db", dbDir)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
|
||||
c, err := NewHTTPClient(
|
||||
chainID,
|
||||
TrustOptions{
|
||||
Period: 504 * time.Hour, // 21 days
|
||||
Height: 100,
|
||||
Hash: header.Hash(),
|
||||
},
|
||||
"http://localhost:26657",
|
||||
[]string{"http://witness1:26657"},
|
||||
dbs.New(db, ""),
|
||||
)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
c, err := NewHTTPClient(
|
||||
chainID,
|
||||
TrustOptions{
|
||||
Period: 504 * time.Hour, // 21 days
|
||||
Height: 100,
|
||||
Hash: header.Hash(),
|
||||
},
|
||||
"http://localhost:26657",
|
||||
[]string{"http://witness1:26657"},
|
||||
dbs.New(db, ""),
|
||||
)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
|
||||
h, err := c.TrustedHeader(100)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
fmt.Println("header", h)
|
||||
h, err := c.TrustedHeader(100)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
fmt.Println("header", h)
|
||||
|
||||
Check out other examples in example_test.go
|
||||
|
||||
|
||||
@@ -216,6 +216,6 @@ func validateHeight(height int64) (*int64, error) {
|
||||
// exponential backoff (with jitter)
|
||||
// 0.5s -> 2s -> 4.5s -> 8s -> 12.5 with 1s variation
|
||||
func backoffTimeout(attempt uint16) time.Duration {
|
||||
// nolint:gosec // G404: Use of weak random number generator
|
||||
//nolint:gosec // G404: Use of weak random number generator
|
||||
return time.Duration(500*attempt*attempt)*time.Millisecond + time.Duration(rand.Intn(1000))*time.Millisecond
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ func makeHealthFunc(c *lrpc.Client) rpcHealthFunc {
|
||||
|
||||
type rpcStatusFunc func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error)
|
||||
|
||||
// nolint: interfacer
|
||||
//nolint: interfacer
|
||||
func makeStatusFunc(c *lrpc.Client) rpcStatusFunc {
|
||||
return func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) {
|
||||
return c.Status(ctx.Context())
|
||||
@@ -278,7 +278,7 @@ func makeABCIInfoFunc(c *lrpc.Client) rpcABCIInfoFunc {
|
||||
|
||||
type rpcBroadcastEvidenceFunc func(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error)
|
||||
|
||||
// nolint: interfacer
|
||||
//nolint: interfacer
|
||||
func makeBroadcastEvidenceFunc(c *lrpc.Client) rpcBroadcastEvidenceFunc {
|
||||
return func(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) {
|
||||
return c.BroadcastEvidence(ctx.Context(), ev)
|
||||
|
||||
@@ -19,13 +19,13 @@ var (
|
||||
// VerifyNonAdjacent verifies non-adjacent untrustedHeader against
|
||||
// trustedHeader. It ensures that:
|
||||
//
|
||||
// a) trustedHeader can still be trusted (if not, ErrOldHeaderExpired is returned)
|
||||
// b) untrustedHeader is valid (if not, ErrInvalidHeader is returned)
|
||||
// c) trustLevel ([1/3, 1]) of trustedHeaderVals (or trustedHeaderNextVals)
|
||||
// signed correctly (if not, ErrNewValSetCantBeTrusted is returned)
|
||||
// d) more than 2/3 of untrustedVals have signed h2
|
||||
// (otherwise, ErrInvalidHeader is returned)
|
||||
// e) headers are non-adjacent.
|
||||
// a) trustedHeader can still be trusted (if not, ErrOldHeaderExpired is returned)
|
||||
// b) untrustedHeader is valid (if not, ErrInvalidHeader is returned)
|
||||
// c) trustLevel ([1/3, 1]) of trustedHeaderVals (or trustedHeaderNextVals)
|
||||
// signed correctly (if not, ErrNewValSetCantBeTrusted is returned)
|
||||
// d) more than 2/3 of untrustedVals have signed h2
|
||||
// (otherwise, ErrInvalidHeader is returned)
|
||||
// e) headers are non-adjacent.
|
||||
//
|
||||
// maxClockDrift defines how much untrustedHeader.Time can drift into the
|
||||
// future.
|
||||
@@ -81,12 +81,12 @@ func VerifyNonAdjacent(
|
||||
// VerifyAdjacent verifies directly adjacent untrustedHeader against
|
||||
// trustedHeader. It ensures that:
|
||||
//
|
||||
// a) trustedHeader can still be trusted (if not, ErrOldHeaderExpired is returned)
|
||||
// b) untrustedHeader is valid (if not, ErrInvalidHeader is returned)
|
||||
// c) untrustedHeader.ValidatorsHash equals trustedHeader.NextValidatorsHash
|
||||
// d) more than 2/3 of new validators (untrustedVals) have signed h2
|
||||
// (otherwise, ErrInvalidHeader is returned)
|
||||
// e) headers are adjacent.
|
||||
// a) trustedHeader can still be trusted (if not, ErrOldHeaderExpired is returned)
|
||||
// b) untrustedHeader is valid (if not, ErrInvalidHeader is returned)
|
||||
// c) untrustedHeader.ValidatorsHash equals trustedHeader.NextValidatorsHash
|
||||
// d) more than 2/3 of new validators (untrustedVals) have signed h2
|
||||
// (otherwise, ErrInvalidHeader is returned)
|
||||
// e) headers are adjacent.
|
||||
//
|
||||
// maxClockDrift defines how much untrustedHeader.Time can drift into the
|
||||
// future.
|
||||
@@ -212,12 +212,12 @@ func HeaderExpired(h *types.SignedHeader, trustingPeriod time.Duration, now time
|
||||
// VerifyBackwards verifies an untrusted header with a height one less than
|
||||
// that of an adjacent trusted header. It ensures that:
|
||||
//
|
||||
// a) untrusted header is valid
|
||||
// b) untrusted header has a time before the trusted header
|
||||
// c) that the LastBlockID hash of the trusted header is the same as the hash
|
||||
// of the trusted header
|
||||
// a) untrusted header is valid
|
||||
// b) untrusted header has a time before the trusted header
|
||||
// c) that the LastBlockID hash of the trusted header is the same as the hash
|
||||
// of the trusted header
|
||||
//
|
||||
// For any of these cases ErrInvalidHeader is returned.
|
||||
// For any of these cases ErrInvalidHeader is returned.
|
||||
func VerifyBackwards(untrustedHeader, trustedHeader *types.Header) error {
|
||||
if err := untrustedHeader.ValidateBasic(); err != nil {
|
||||
return ErrInvalidHeader{err}
|
||||
|
||||
@@ -194,7 +194,9 @@ func (mem *CListMempool) TxsWaitChan() <-chan struct{} {
|
||||
|
||||
// It blocks if we're waiting on Update() or Reap().
|
||||
// cb: A callback from the CheckTx command.
|
||||
// It gets called from another goroutine.
|
||||
//
|
||||
// It gets called from another goroutine.
|
||||
//
|
||||
// CONTRACT: Either cb will get called, or err returned.
|
||||
//
|
||||
// Safe for concurrent use by multiple goroutines.
|
||||
@@ -310,7 +312,7 @@ func (mem *CListMempool) reqResCb(
|
||||
}
|
||||
|
||||
// Called from:
|
||||
// - resCbFirstTime (lock not held) if tx is valid
|
||||
// - resCbFirstTime (lock not held) if tx is valid
|
||||
func (mem *CListMempool) addTx(memTx *mempoolTx) {
|
||||
e := mem.txs.PushBack(memTx)
|
||||
mem.txsMap.Store(memTx.tx.Key(), e)
|
||||
@@ -319,8 +321,8 @@ func (mem *CListMempool) addTx(memTx *mempoolTx) {
|
||||
}
|
||||
|
||||
// Called from:
|
||||
// - Update (lock held) if tx was committed
|
||||
// - resCbRecheck (lock not held) if tx was invalidated
|
||||
// - Update (lock held) if tx was committed
|
||||
// - resCbRecheck (lock not held) if tx was invalidated
|
||||
func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) {
|
||||
mem.txs.Remove(elem)
|
||||
elem.DetachPrev()
|
||||
|
||||
45
node/doc.go
45
node/doc.go
@@ -6,35 +6,34 @@ Adding new p2p.Reactor(s)
|
||||
|
||||
To add a new p2p.Reactor, use the CustomReactors option:
|
||||
|
||||
node, err := NewNode(
|
||||
config,
|
||||
privVal,
|
||||
nodeKey,
|
||||
clientCreator,
|
||||
genesisDocProvider,
|
||||
dbProvider,
|
||||
metricsProvider,
|
||||
logger,
|
||||
CustomReactors(map[string]p2p.Reactor{"CUSTOM": customReactor}),
|
||||
)
|
||||
node, err := NewNode(
|
||||
config,
|
||||
privVal,
|
||||
nodeKey,
|
||||
clientCreator,
|
||||
genesisDocProvider,
|
||||
dbProvider,
|
||||
metricsProvider,
|
||||
logger,
|
||||
CustomReactors(map[string]p2p.Reactor{"CUSTOM": customReactor}),
|
||||
)
|
||||
|
||||
Replacing existing p2p.Reactor(s)
|
||||
|
||||
To replace the built-in p2p.Reactor, use the CustomReactors option:
|
||||
|
||||
node, err := NewNode(
|
||||
config,
|
||||
privVal,
|
||||
nodeKey,
|
||||
clientCreator,
|
||||
genesisDocProvider,
|
||||
dbProvider,
|
||||
metricsProvider,
|
||||
logger,
|
||||
CustomReactors(map[string]p2p.Reactor{"BLOCKCHAIN": customBlockchainReactor}),
|
||||
)
|
||||
node, err := NewNode(
|
||||
config,
|
||||
privVal,
|
||||
nodeKey,
|
||||
clientCreator,
|
||||
genesisDocProvider,
|
||||
dbProvider,
|
||||
metricsProvider,
|
||||
logger,
|
||||
CustomReactors(map[string]p2p.Reactor{"BLOCKCHAIN": customBlockchainReactor}),
|
||||
)
|
||||
|
||||
The list of existing reactors can be found in CustomReactors documentation.
|
||||
|
||||
*/
|
||||
package node
|
||||
|
||||
26
node/node.go
26
node/node.go
@@ -51,7 +51,7 @@ import (
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
|
||||
_ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port
|
||||
_ "net/http/pprof" //nolint: gosec // securely exposed on separate, optional port
|
||||
|
||||
_ "github.com/lib/pq" // provide the psql db driver
|
||||
)
|
||||
@@ -145,12 +145,12 @@ type blockSyncReactor interface {
|
||||
// WARNING: using any name from the below list of the existing reactors will
|
||||
// result in replacing it with the custom one.
|
||||
//
|
||||
// - MEMPOOL
|
||||
// - BLOCKCHAIN
|
||||
// - CONSENSUS
|
||||
// - EVIDENCE
|
||||
// - PEX
|
||||
// - STATESYNC
|
||||
// - MEMPOOL
|
||||
// - BLOCKCHAIN
|
||||
// - CONSENSUS
|
||||
// - EVIDENCE
|
||||
// - PEX
|
||||
// - STATESYNC
|
||||
func CustomReactors(reactors map[string]p2p.Reactor) Option {
|
||||
return func(n *Node) {
|
||||
for name, reactor := range reactors {
|
||||
@@ -430,7 +430,9 @@ func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
|
||||
return nil, nil, err
|
||||
}
|
||||
evidenceLogger := logger.With("module", "evidence")
|
||||
evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB), blockStore)
|
||||
evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: config.Storage.DiscardABCIResponses,
|
||||
}), blockStore)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -714,7 +716,9 @@ func NewNode(config *cfg.Config,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: config.Storage.DiscardABCIResponses,
|
||||
})
|
||||
|
||||
state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider)
|
||||
if err != nil {
|
||||
@@ -1390,7 +1394,9 @@ func LoadStateFromDBOrGenesisDocProvider(
|
||||
return sm.State{}, nil, err
|
||||
}
|
||||
}
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
if err != nil {
|
||||
return sm.State{}, nil, err
|
||||
|
||||
@@ -235,7 +235,9 @@ func TestCreateProposalBlock(t *testing.T) {
|
||||
|
||||
var height int64 = 1
|
||||
state, stateDB, privVals := state(1, height)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
maxBytes := 16384
|
||||
var partSize uint32 = 256
|
||||
maxEvidenceBytes := int64(maxBytes / 2)
|
||||
@@ -340,7 +342,9 @@ func TestMaxProposalBlockSize(t *testing.T) {
|
||||
|
||||
var height int64 = 1
|
||||
state, stateDB, _ := state(1, height)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
var maxBytes int64 = 16384
|
||||
var partSize uint32 = 256
|
||||
state.ConsensusParams.Block.MaxBytes = maxBytes
|
||||
@@ -464,7 +468,9 @@ func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) {
|
||||
|
||||
// save validators to db for 2 heights
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
if err := stateStore.Save(s); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -10,9 +10,13 @@ import (
|
||||
|
||||
// Only Go1.10 has a proper net.Conn implementation that
|
||||
// has the SetDeadline method implemented as per
|
||||
// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706
|
||||
//
|
||||
// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706
|
||||
//
|
||||
// lest we run into problems like
|
||||
// https://github.com/tendermint/tendermint/issues/851
|
||||
//
|
||||
// https://github.com/tendermint/tendermint/issues/851
|
||||
//
|
||||
// so for go versions < Go1.10 use our custom net.Conn creator
|
||||
// that doesn't return an `Unimplemented error` for net.Conn.
|
||||
// Before https://github.com/tendermint/tendermint/commit/49faa79bdce5663894b3febbf4955fb1d172df04
|
||||
|
||||
@@ -62,6 +62,7 @@ The byte id and the relative priorities of each `Channel` are configured upon
|
||||
initialization of the connection.
|
||||
|
||||
There are two methods for sending messages:
|
||||
|
||||
func (m MConnection) Send(chID byte, msgBytes []byte) bool {}
|
||||
func (m MConnection) TrySend(chID byte, msgBytes []byte}) bool {}
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestMConnectionSendFlushStop(t *testing.T) {
|
||||
clientConn := createTestMConnection(client)
|
||||
err := clientConn.Start()
|
||||
require.Nil(t, err)
|
||||
defer clientConn.Stop() // nolint:errcheck // ignore for tests
|
||||
defer clientConn.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
msg := []byte("abc")
|
||||
assert.True(t, clientConn.Send(0x01, msg))
|
||||
@@ -89,7 +89,7 @@ func TestMConnectionSend(t *testing.T) {
|
||||
mconn := createTestMConnection(client)
|
||||
err := mconn.Start()
|
||||
require.Nil(t, err)
|
||||
defer mconn.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconn.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
msg := []byte("Ant-Man")
|
||||
assert.True(t, mconn.Send(0x01, msg))
|
||||
@@ -128,12 +128,12 @@ func TestMConnectionReceive(t *testing.T) {
|
||||
mconn1 := createMConnectionWithCallbacks(client, onReceive, onError)
|
||||
err := mconn1.Start()
|
||||
require.Nil(t, err)
|
||||
defer mconn1.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconn1.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
mconn2 := createTestMConnection(server)
|
||||
err = mconn2.Start()
|
||||
require.Nil(t, err)
|
||||
defer mconn2.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconn2.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
msg := []byte("Cyclops")
|
||||
assert.True(t, mconn2.Send(0x01, msg))
|
||||
@@ -156,7 +156,7 @@ func TestMConnectionStatus(t *testing.T) {
|
||||
mconn := createTestMConnection(client)
|
||||
err := mconn.Start()
|
||||
require.Nil(t, err)
|
||||
defer mconn.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconn.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
status := mconn.Status()
|
||||
assert.NotNil(t, status)
|
||||
@@ -179,7 +179,7 @@ func TestMConnectionPongTimeoutResultsInError(t *testing.T) {
|
||||
mconn := createMConnectionWithCallbacks(client, onReceive, onError)
|
||||
err := mconn.Start()
|
||||
require.Nil(t, err)
|
||||
defer mconn.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconn.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
serverGotPing := make(chan struct{})
|
||||
go func() {
|
||||
@@ -218,7 +218,7 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) {
|
||||
mconn := createMConnectionWithCallbacks(client, onReceive, onError)
|
||||
err := mconn.Start()
|
||||
require.Nil(t, err)
|
||||
defer mconn.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconn.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// sending 3 pongs in a row (abuse)
|
||||
protoWriter := protoio.NewDelimitedWriter(server)
|
||||
@@ -273,7 +273,7 @@ func TestMConnectionMultiplePings(t *testing.T) {
|
||||
mconn := createMConnectionWithCallbacks(client, onReceive, onError)
|
||||
err := mconn.Start()
|
||||
require.Nil(t, err)
|
||||
defer mconn.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconn.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// sending 3 pings in a row (abuse)
|
||||
// see https://github.com/tendermint/tendermint/issues/1190
|
||||
@@ -322,7 +322,7 @@ func TestMConnectionPingPongs(t *testing.T) {
|
||||
mconn := createMConnectionWithCallbacks(client, onReceive, onError)
|
||||
err := mconn.Start()
|
||||
require.Nil(t, err)
|
||||
defer mconn.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconn.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
serverGotPing := make(chan struct{})
|
||||
go func() {
|
||||
@@ -380,7 +380,7 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) {
|
||||
mconn := createMConnectionWithCallbacks(client, onReceive, onError)
|
||||
err := mconn.Start()
|
||||
require.Nil(t, err)
|
||||
defer mconn.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconn.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
if err := client.Close(); err != nil {
|
||||
t.Error(err)
|
||||
@@ -492,8 +492,8 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) {
|
||||
chOnRcv := make(chan struct{})
|
||||
|
||||
mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr)
|
||||
defer mconnClient.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconnServer.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconnClient.Stop() //nolint:errcheck // ignore for tests
|
||||
defer mconnServer.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
mconnServer.onReceive = func(chID byte, msgBytes []byte) {
|
||||
chOnRcv <- struct{}{}
|
||||
@@ -528,8 +528,8 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) {
|
||||
func TestMConnectionReadErrorUnknownMsgType(t *testing.T) {
|
||||
chOnErr := make(chan struct{})
|
||||
mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr)
|
||||
defer mconnClient.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconnServer.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconnClient.Stop() //nolint:errcheck // ignore for tests
|
||||
defer mconnServer.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// send msg with unknown msg type
|
||||
_, err := protoio.NewDelimitedWriter(mconnClient.conn).WriteMsg(&types.Header{ChainID: "x"})
|
||||
@@ -545,7 +545,7 @@ func TestMConnectionTrySend(t *testing.T) {
|
||||
mconn := createTestMConnection(client)
|
||||
err := mconn.Start()
|
||||
require.Nil(t, err)
|
||||
defer mconn.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconn.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
msg := []byte("Semicolon-Woman")
|
||||
resultCh := make(chan string, 2)
|
||||
@@ -564,7 +564,7 @@ func TestMConnectionTrySend(t *testing.T) {
|
||||
assert.Equal(t, "TrySend", <-resultCh)
|
||||
}
|
||||
|
||||
// nolint:lll //ignore line length for tests
|
||||
//nolint:lll //ignore line length for tests
|
||||
func TestConnVectors(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
|
||||
@@ -103,7 +103,7 @@ func (fc *FuzzedConnection) SetWriteDeadline(t time.Time) error {
|
||||
|
||||
func (fc *FuzzedConnection) randomDuration() time.Duration {
|
||||
maxDelayMillis := int(fc.config.MaxDelay.Nanoseconds() / 1000)
|
||||
return time.Millisecond * time.Duration(tmrand.Int()%maxDelayMillis) // nolint: gas
|
||||
return time.Millisecond * time.Duration(tmrand.Int()%maxDelayMillis) //nolint: gas
|
||||
}
|
||||
|
||||
// implements the fuzz (delay, kill conn)
|
||||
|
||||
@@ -33,7 +33,7 @@ func TestNodeInfoValidate(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
"Too Many Channels",
|
||||
func(ni *DefaultNodeInfo) { ni.Channels = append(channels, byte(maxNumChannels)) }, // nolint: gocritic
|
||||
func(ni *DefaultNodeInfo) { ni.Channels = append(channels, byte(maxNumChannels)) }, //nolint: gocritic
|
||||
true,
|
||||
},
|
||||
{"Duplicate Channel", func(ni *DefaultNodeInfo) { ni.Channels = dupChannels }, true},
|
||||
|
||||
@@ -94,17 +94,16 @@ func (ka *knownAddress) removeBucketRef(bucketIdx int) int {
|
||||
}
|
||||
|
||||
/*
|
||||
An address is bad if the address in question is a New address, has not been tried in the last
|
||||
minute, and meets one of the following criteria:
|
||||
An address is bad if the address in question is a New address, has not been tried in the last
|
||||
minute, and meets one of the following criteria:
|
||||
|
||||
1) It claims to be from the future
|
||||
2) It hasn't been seen in over a week
|
||||
3) It has failed at least three times and never succeeded
|
||||
4) It has failed ten times in the last week
|
||||
|
||||
All addresses that meet these criteria are assumed to be worthless and not
|
||||
worth keeping hold of.
|
||||
1) It claims to be from the future
|
||||
2) It hasn't been seen in over a week
|
||||
3) It has failed at least three times and never succeeded
|
||||
4) It has failed ten times in the last week
|
||||
|
||||
All addresses that meet these criteria are assumed to be worthless and not
|
||||
worth keeping hold of.
|
||||
*/
|
||||
func (ka *knownAddress) isBad() bool {
|
||||
// Is Old --> good
|
||||
|
||||
@@ -58,15 +58,15 @@ func TestPEXReactorAddRemovePeer(t *testing.T) {
|
||||
}
|
||||
|
||||
// --- FAIL: TestPEXReactorRunning (11.10s)
|
||||
// pex_reactor_test.go:411: expected all switches to be connected to at
|
||||
// least one peer (switches: 0 => {outbound: 1, inbound: 0}, 1 =>
|
||||
// {outbound: 0, inbound: 1}, 2 => {outbound: 0, inbound: 0}, )
|
||||
//
|
||||
// pex_reactor_test.go:411: expected all switches to be connected to at
|
||||
// least one peer (switches: 0 => {outbound: 1, inbound: 0}, 1 =>
|
||||
// {outbound: 0, inbound: 1}, 2 => {outbound: 0, inbound: 0}, )
|
||||
//
|
||||
// EXPLANATION: peers are getting rejected because in switch#addPeer we check
|
||||
// if any peer (who we already connected to) has the same IP. Even though local
|
||||
// peers have different IP addresses, they all have the same underlying remote
|
||||
// IP: 127.0.0.1.
|
||||
//
|
||||
func TestPEXReactorRunning(t *testing.T) {
|
||||
N := 3
|
||||
switches := make([]*p2p.Switch, N)
|
||||
@@ -214,7 +214,7 @@ func TestCheckSeeds(t *testing.T) {
|
||||
// 1. test creating peer with no seeds works
|
||||
peerSwitch := testCreateDefaultPeer(dir, 0)
|
||||
require.Nil(t, peerSwitch.Start())
|
||||
peerSwitch.Stop() // nolint:errcheck // ignore for tests
|
||||
peerSwitch.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// 2. create seed
|
||||
seed := testCreateSeed(dir, 1, []*p2p.NetAddress{}, []*p2p.NetAddress{})
|
||||
@@ -222,7 +222,7 @@ func TestCheckSeeds(t *testing.T) {
|
||||
// 3. test create peer with online seed works
|
||||
peerSwitch = testCreatePeerWithSeed(dir, 2, seed)
|
||||
require.Nil(t, peerSwitch.Start())
|
||||
peerSwitch.Stop() // nolint:errcheck // ignore for tests
|
||||
peerSwitch.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// 4. test create peer with all seeds having unresolvable DNS fails
|
||||
badPeerConfig := &ReactorConfig{
|
||||
@@ -231,7 +231,7 @@ func TestCheckSeeds(t *testing.T) {
|
||||
}
|
||||
peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig)
|
||||
require.Error(t, peerSwitch.Start())
|
||||
peerSwitch.Stop() // nolint:errcheck // ignore for tests
|
||||
peerSwitch.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// 5. test create peer with one good seed address succeeds
|
||||
badPeerConfig = &ReactorConfig{
|
||||
@@ -241,7 +241,7 @@ func TestCheckSeeds(t *testing.T) {
|
||||
}
|
||||
peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig)
|
||||
require.Nil(t, peerSwitch.Start())
|
||||
peerSwitch.Stop() // nolint:errcheck // ignore for tests
|
||||
peerSwitch.Stop() //nolint:errcheck // ignore for tests
|
||||
}
|
||||
|
||||
func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) {
|
||||
@@ -253,12 +253,12 @@ func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) {
|
||||
// 1. create seed
|
||||
seed := testCreateSeed(dir, 0, []*p2p.NetAddress{}, []*p2p.NetAddress{})
|
||||
require.Nil(t, seed.Start())
|
||||
defer seed.Stop() // nolint:errcheck // ignore for tests
|
||||
defer seed.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// 2. create usual peer with only seed configured.
|
||||
peer := testCreatePeerWithSeed(dir, 1, seed)
|
||||
require.Nil(t, peer.Start())
|
||||
defer peer.Stop() // nolint:errcheck // ignore for tests
|
||||
defer peer.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// 3. check that the peer connects to seed immediately
|
||||
assertPeersWithTimeout(t, []*p2p.Switch{peer}, 10*time.Millisecond, 3*time.Second, 1)
|
||||
@@ -273,18 +273,18 @@ func TestConnectionSpeedForPeerReceivedFromSeed(t *testing.T) {
|
||||
// 1. create peer
|
||||
peerSwitch := testCreateDefaultPeer(dir, 1)
|
||||
require.Nil(t, peerSwitch.Start())
|
||||
defer peerSwitch.Stop() // nolint:errcheck // ignore for tests
|
||||
defer peerSwitch.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// 2. Create seed which knows about the peer
|
||||
peerAddr := peerSwitch.NetAddress()
|
||||
seed := testCreateSeed(dir, 2, []*p2p.NetAddress{peerAddr}, []*p2p.NetAddress{peerAddr})
|
||||
require.Nil(t, seed.Start())
|
||||
defer seed.Stop() // nolint:errcheck // ignore for tests
|
||||
defer seed.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// 3. create another peer with only seed configured.
|
||||
secondPeer := testCreatePeerWithSeed(dir, 3, seed)
|
||||
require.Nil(t, secondPeer.Start())
|
||||
defer secondPeer.Stop() // nolint:errcheck // ignore for tests
|
||||
defer secondPeer.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// 4. check that the second peer connects to seed immediately
|
||||
assertPeersWithTimeout(t, []*p2p.Switch{secondPeer}, 10*time.Millisecond, 3*time.Second, 1)
|
||||
@@ -307,13 +307,13 @@ func TestPEXReactorSeedMode(t *testing.T) {
|
||||
sw.SetAddrBook(book)
|
||||
err = sw.Start()
|
||||
require.NoError(t, err)
|
||||
defer sw.Stop() // nolint:errcheck // ignore for tests
|
||||
defer sw.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
assert.Zero(t, sw.Peers().Size())
|
||||
|
||||
peerSwitch := testCreateDefaultPeer(dir, 1)
|
||||
require.NoError(t, peerSwitch.Start())
|
||||
defer peerSwitch.Stop() // nolint:errcheck // ignore for tests
|
||||
defer peerSwitch.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// 1. Test crawlPeers dials the peer
|
||||
pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()})
|
||||
@@ -346,13 +346,13 @@ func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) {
|
||||
sw.SetAddrBook(book)
|
||||
err = sw.Start()
|
||||
require.NoError(t, err)
|
||||
defer sw.Stop() // nolint:errcheck // ignore for tests
|
||||
defer sw.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
assert.Zero(t, sw.Peers().Size())
|
||||
|
||||
peerSwitch := testCreateDefaultPeer(dir, 1)
|
||||
require.NoError(t, peerSwitch.Start())
|
||||
defer peerSwitch.Stop() // nolint:errcheck // ignore for tests
|
||||
defer peerSwitch.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
err = sw.AddPersistentPeers([]string{peerSwitch.NetAddress().String()})
|
||||
require.NoError(t, err)
|
||||
@@ -618,7 +618,7 @@ func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress)
|
||||
book := NewAddrBook(filepath.Join(dir, "addrbookSeed.json"), false)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
for j := 0; j < len(knownAddrs); j++ {
|
||||
book.AddAddress(knownAddrs[j], srcAddrs[j]) // nolint:errcheck // ignore for tests
|
||||
book.AddAddress(knownAddrs[j], srcAddrs[j]) //nolint:errcheck // ignore for tests
|
||||
book.MarkGood(knownAddrs[j].ID)
|
||||
}
|
||||
sw.SetAddrBook(book)
|
||||
|
||||
@@ -379,8 +379,8 @@ func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) {
|
||||
// to the PEX/Addrbook to find the peer with the addr again
|
||||
// NOTE: this will keep trying even if the handshake or auth fails.
|
||||
// TODO: be more explicit with error types so we only retry on certain failures
|
||||
// - ie. if we're getting ErrDuplicatePeer we can stop
|
||||
// because the addrbook got us the peer back already
|
||||
// - ie. if we're getting ErrDuplicatePeer we can stop
|
||||
// because the addrbook got us the peer back already
|
||||
func (sw *Switch) reconnectToPeer(addr *NetAddress) {
|
||||
if sw.reconnecting.Has(string(addr.ID)) {
|
||||
return
|
||||
|
||||
@@ -529,8 +529,8 @@ func TestTransportMultiplexRejectSelf(t *testing.T) {
|
||||
}
|
||||
|
||||
_, err := mt.Accept(peerConfig{})
|
||||
if err, ok := err.(ErrRejected); ok {
|
||||
if !err.IsSelf() {
|
||||
if e, ok := err.(ErrRejected); ok {
|
||||
if !e.IsSelf() {
|
||||
t.Errorf("expected to reject self, got: %v", err)
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -72,6 +72,7 @@ func TestTrustMetricCopyNilPointer(t *testing.T) {
|
||||
}
|
||||
|
||||
// XXX: This test fails non-deterministically
|
||||
//
|
||||
//nolint:unused,deadcode
|
||||
func _TestTrustMetricStopPause(t *testing.T) {
|
||||
// The TestTicker will provide manual control over
|
||||
|
||||
@@ -202,7 +202,7 @@ func localIPv4() (net.IP, error) {
|
||||
}
|
||||
|
||||
func getServiceURL(rootURL string) (url, urnDomain string, err error) {
|
||||
r, err := http.Get(rootURL) // nolint: gosec
|
||||
r, err := http.Get(rootURL) //nolint: gosec
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
/*
|
||||
|
||||
Package privval provides different implementations of the types.PrivValidator.
|
||||
|
||||
FilePV
|
||||
# FilePV
|
||||
|
||||
FilePV is the simplest implementation and developer default.
|
||||
It uses one file for the private key and another to store state.
|
||||
|
||||
SignerListenerEndpoint
|
||||
# SignerListenerEndpoint
|
||||
|
||||
SignerListenerEndpoint establishes a connection to an external process,
|
||||
like a Key Management Server (KMS), using a socket.
|
||||
@@ -15,15 +14,14 @@ SignerListenerEndpoint listens for the external KMS process to dial in.
|
||||
SignerListenerEndpoint takes a listener, which determines the type of connection
|
||||
(ie. encrypted over tcp, or unencrypted over unix).
|
||||
|
||||
SignerDialerEndpoint
|
||||
# SignerDialerEndpoint
|
||||
|
||||
SignerDialerEndpoint is a simple wrapper around a net.Conn. It's used by both IPCVal and TCPVal.
|
||||
|
||||
SignerClient
|
||||
# SignerClient
|
||||
|
||||
SignerClient handles remote validator connections that provide signing services.
|
||||
In production, it's recommended to wrap it with RetrySignerClient to avoid
|
||||
termination in case of temporary errors.
|
||||
|
||||
*/
|
||||
package privval
|
||||
|
||||
@@ -57,7 +57,6 @@ func exampleProposal() *types.Proposal {
|
||||
}
|
||||
}
|
||||
|
||||
// nolint:lll // ignore line length for tests
|
||||
func TestPrivvalVectors(t *testing.T) {
|
||||
pk := ed25519.GenPrivKeyFromSecret([]byte("it's a secret")).PubKey()
|
||||
ppk, err := cryptoenc.PubKeyToProto(pk)
|
||||
|
||||
@@ -75,10 +75,10 @@ message RequestQuery {
|
||||
}
|
||||
|
||||
message RequestBeginBlock {
|
||||
bytes hash = 1;
|
||||
tendermint.types.Header header = 2 [(gogoproto.nullable) = false];
|
||||
LastCommitInfo last_commit_info = 3 [(gogoproto.nullable) = false];
|
||||
repeated Evidence byzantine_validators = 4 [(gogoproto.nullable) = false];
|
||||
bytes hash = 1;
|
||||
tendermint.types.Header header = 2 [(gogoproto.nullable) = false];
|
||||
LastCommitInfo last_commit_info = 3 [(gogoproto.nullable) = false];
|
||||
repeated Evidence byzantine_validators = 4 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
enum CheckTxType {
|
||||
@@ -234,7 +234,7 @@ message ResponseDeliverTx {
|
||||
}
|
||||
|
||||
message ResponseEndBlock {
|
||||
repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable) = false];
|
||||
repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable) = false];
|
||||
ConsensusParams consensus_param_updates = 2;
|
||||
repeated Event events = 3
|
||||
[(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"];
|
||||
|
||||
@@ -104,7 +104,7 @@ func (m *NewRoundStep) GetLastCommitRound() int32 {
|
||||
}
|
||||
|
||||
// NewValidBlock is sent when a validator observes a valid block B in some round r,
|
||||
//i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r.
|
||||
// i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r.
|
||||
// In case the block is also committed, then IsCommit flag is set to true.
|
||||
type NewValidBlock struct {
|
||||
Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"`
|
||||
|
||||
@@ -18,7 +18,7 @@ message NewRoundStep {
|
||||
}
|
||||
|
||||
// NewValidBlock is sent when a validator observes a valid block B in some round r,
|
||||
//i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r.
|
||||
// i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r.
|
||||
// In case the block is also committed, then IsCommit flag is set to true.
|
||||
message NewValidBlock {
|
||||
int64 height = 1;
|
||||
|
||||
@@ -199,6 +199,58 @@ func (m *ConsensusParamsInfo) GetLastHeightChanged() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
type ABCIResponsesInfo struct {
|
||||
AbciResponses *ABCIResponses `protobuf:"bytes,1,opt,name=abci_responses,json=abciResponses,proto3" json:"abci_responses,omitempty"`
|
||||
Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ABCIResponsesInfo) Reset() { *m = ABCIResponsesInfo{} }
|
||||
func (m *ABCIResponsesInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*ABCIResponsesInfo) ProtoMessage() {}
|
||||
func (*ABCIResponsesInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ccfacf933f22bf93, []int{3}
|
||||
}
|
||||
func (m *ABCIResponsesInfo) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ABCIResponsesInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ABCIResponsesInfo.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ABCIResponsesInfo) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ABCIResponsesInfo.Merge(m, src)
|
||||
}
|
||||
func (m *ABCIResponsesInfo) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ABCIResponsesInfo) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ABCIResponsesInfo.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ABCIResponsesInfo proto.InternalMessageInfo
|
||||
|
||||
func (m *ABCIResponsesInfo) GetAbciResponses() *ABCIResponses {
|
||||
if m != nil {
|
||||
return m.AbciResponses
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ABCIResponsesInfo) GetHeight() int64 {
|
||||
if m != nil {
|
||||
return m.Height
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type Version struct {
|
||||
Consensus version.Consensus `protobuf:"bytes,1,opt,name=consensus,proto3" json:"consensus"`
|
||||
Software string `protobuf:"bytes,2,opt,name=software,proto3" json:"software,omitempty"`
|
||||
@@ -208,7 +260,7 @@ func (m *Version) Reset() { *m = Version{} }
|
||||
func (m *Version) String() string { return proto.CompactTextString(m) }
|
||||
func (*Version) ProtoMessage() {}
|
||||
func (*Version) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ccfacf933f22bf93, []int{3}
|
||||
return fileDescriptor_ccfacf933f22bf93, []int{4}
|
||||
}
|
||||
func (m *Version) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -284,7 +336,7 @@ func (m *State) Reset() { *m = State{} }
|
||||
func (m *State) String() string { return proto.CompactTextString(m) }
|
||||
func (*State) ProtoMessage() {}
|
||||
func (*State) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ccfacf933f22bf93, []int{4}
|
||||
return fileDescriptor_ccfacf933f22bf93, []int{5}
|
||||
}
|
||||
func (m *State) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -415,6 +467,7 @@ func init() {
|
||||
proto.RegisterType((*ABCIResponses)(nil), "tendermint.state.ABCIResponses")
|
||||
proto.RegisterType((*ValidatorsInfo)(nil), "tendermint.state.ValidatorsInfo")
|
||||
proto.RegisterType((*ConsensusParamsInfo)(nil), "tendermint.state.ConsensusParamsInfo")
|
||||
proto.RegisterType((*ABCIResponsesInfo)(nil), "tendermint.state.ABCIResponsesInfo")
|
||||
proto.RegisterType((*Version)(nil), "tendermint.state.Version")
|
||||
proto.RegisterType((*State)(nil), "tendermint.state.State")
|
||||
}
|
||||
@@ -422,55 +475,58 @@ func init() {
|
||||
func init() { proto.RegisterFile("tendermint/state/types.proto", fileDescriptor_ccfacf933f22bf93) }
|
||||
|
||||
var fileDescriptor_ccfacf933f22bf93 = []byte{
|
||||
// 763 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x6f, 0xd3, 0x30,
|
||||
0x14, 0x6e, 0xe8, 0xb6, 0xb6, 0xce, 0xda, 0x0e, 0x8f, 0x43, 0xd6, 0xb1, 0xb4, 0x2b, 0x3f, 0x34,
|
||||
0x71, 0x48, 0xa5, 0x71, 0x40, 0x5c, 0x26, 0x2d, 0x2d, 0x62, 0x95, 0x26, 0x04, 0xd9, 0xb4, 0x03,
|
||||
0x97, 0xc8, 0x6d, 0xbc, 0x24, 0xa2, 0x4d, 0xa2, 0xd8, 0x2d, 0xe3, 0x0f, 0xe0, 0xbe, 0x2b, 0xff,
|
||||
0xd1, 0x8e, 0x3b, 0x22, 0x0e, 0x03, 0xba, 0x7f, 0x04, 0xd9, 0xce, 0x0f, 0xb7, 0x65, 0xd2, 0x10,
|
||||
0x37, 0xfb, 0x7d, 0xdf, 0xfb, 0xfc, 0xf9, 0xf9, 0x3d, 0x19, 0x3c, 0xa6, 0x38, 0x70, 0x70, 0x3c,
|
||||
0xf6, 0x03, 0xda, 0x21, 0x14, 0x51, 0xdc, 0xa1, 0x5f, 0x22, 0x4c, 0x8c, 0x28, 0x0e, 0x69, 0x08,
|
||||
0x37, 0x72, 0xd4, 0xe0, 0x68, 0xe3, 0x91, 0x1b, 0xba, 0x21, 0x07, 0x3b, 0x6c, 0x25, 0x78, 0x8d,
|
||||
0x6d, 0x49, 0x05, 0x0d, 0x86, 0xbe, 0x2c, 0xd2, 0x90, 0x8f, 0xe0, 0xf1, 0x39, 0xb4, 0xb5, 0x84,
|
||||
0x4e, 0xd1, 0xc8, 0x77, 0x10, 0x0d, 0xe3, 0x84, 0xb1, 0xb3, 0xc4, 0x88, 0x50, 0x8c, 0xc6, 0xa9,
|
||||
0x80, 0x2e, 0xc1, 0x53, 0x1c, 0x13, 0x3f, 0x0c, 0xe6, 0x0e, 0x68, 0xba, 0x61, 0xe8, 0x8e, 0x70,
|
||||
0x87, 0xef, 0x06, 0x93, 0xf3, 0x0e, 0xf5, 0xc7, 0x98, 0x50, 0x34, 0x8e, 0x04, 0xa1, 0xfd, 0x43,
|
||||
0x01, 0xd5, 0x43, 0xb3, 0xdb, 0xb7, 0x30, 0x89, 0xc2, 0x80, 0x60, 0x02, 0xbb, 0x40, 0x75, 0xf0,
|
||||
0xc8, 0x9f, 0xe2, 0xd8, 0xa6, 0x17, 0x44, 0x53, 0x5a, 0xc5, 0x3d, 0x75, 0xbf, 0x6d, 0x48, 0xc5,
|
||||
0x60, 0x97, 0x34, 0xd2, 0x84, 0x9e, 0xe0, 0x9e, 0x5e, 0x58, 0xc0, 0x49, 0x97, 0x04, 0x1e, 0x80,
|
||||
0x0a, 0x0e, 0x1c, 0x7b, 0x30, 0x0a, 0x87, 0x9f, 0xb4, 0x07, 0x2d, 0x65, 0x4f, 0xdd, 0xdf, 0xbd,
|
||||
0x53, 0xe2, 0x4d, 0xe0, 0x98, 0x8c, 0x68, 0x95, 0x71, 0xb2, 0x82, 0x3d, 0xa0, 0x0e, 0xb0, 0xeb,
|
||||
0x07, 0x89, 0x42, 0x91, 0x2b, 0x3c, 0xb9, 0x53, 0xc1, 0x64, 0x5c, 0xa1, 0x01, 0x06, 0xd9, 0xba,
|
||||
0xfd, 0x55, 0x01, 0xb5, 0xb3, 0xb4, 0xa0, 0xa4, 0x1f, 0x9c, 0x87, 0xb0, 0x0b, 0xaa, 0x59, 0x89,
|
||||
0x6d, 0x82, 0xa9, 0xa6, 0x70, 0x69, 0x5d, 0x96, 0x16, 0x05, 0xcc, 0x12, 0x4f, 0x30, 0xb5, 0xd6,
|
||||
0xa7, 0xd2, 0x0e, 0x1a, 0x60, 0x73, 0x84, 0x08, 0xb5, 0x3d, 0xec, 0xbb, 0x1e, 0xb5, 0x87, 0x1e,
|
||||
0x0a, 0x5c, 0xec, 0xf0, 0x7b, 0x16, 0xad, 0x87, 0x0c, 0x3a, 0xe2, 0x48, 0x57, 0x00, 0xed, 0x6f,
|
||||
0x0a, 0xd8, 0xec, 0x32, 0x9f, 0x01, 0x99, 0x90, 0xf7, 0xfc, 0xfd, 0xb8, 0x19, 0x0b, 0x6c, 0x0c,
|
||||
0xd3, 0xb0, 0x2d, 0xde, 0x35, 0xf1, 0xb3, 0xbb, 0xec, 0x67, 0x41, 0xc0, 0x5c, 0xb9, 0xba, 0x69,
|
||||
0x16, 0xac, 0xfa, 0x70, 0x3e, 0xfc, 0xcf, 0xde, 0x3c, 0x50, 0x3a, 0x13, 0x8d, 0x03, 0x0f, 0x41,
|
||||
0x25, 0x53, 0x4b, 0x7c, 0xec, 0xc8, 0x3e, 0x92, 0x06, 0xcb, 0x9d, 0x24, 0x1e, 0xf2, 0x2c, 0xd8,
|
||||
0x00, 0x65, 0x12, 0x9e, 0xd3, 0xcf, 0x28, 0xc6, 0xfc, 0xc8, 0x8a, 0x95, 0xed, 0xdb, 0xbf, 0xd7,
|
||||
0xc0, 0xea, 0x09, 0x9b, 0x23, 0xf8, 0x1a, 0x94, 0x12, 0xad, 0xe4, 0x98, 0x2d, 0x63, 0x71, 0xd6,
|
||||
0x8c, 0xc4, 0x54, 0x72, 0x44, 0xca, 0x87, 0xcf, 0x41, 0x79, 0xe8, 0x21, 0x3f, 0xb0, 0x7d, 0x71,
|
||||
0xa7, 0x8a, 0xa9, 0xce, 0x6e, 0x9a, 0xa5, 0x2e, 0x8b, 0xf5, 0x7b, 0x56, 0x89, 0x83, 0x7d, 0x07,
|
||||
0x3e, 0x03, 0x35, 0x3f, 0xf0, 0xa9, 0x8f, 0x46, 0x49, 0x25, 0xb4, 0x1a, 0xaf, 0x40, 0x35, 0x89,
|
||||
0x8a, 0x22, 0xc0, 0x17, 0x80, 0x97, 0x44, 0xb4, 0x59, 0xca, 0x2c, 0x72, 0x66, 0x9d, 0x01, 0xbc,
|
||||
0x8f, 0x12, 0xae, 0x05, 0xaa, 0x12, 0xd7, 0x77, 0xb4, 0x95, 0x65, 0xef, 0xe2, 0xa9, 0x78, 0x56,
|
||||
0xbf, 0x67, 0x6e, 0x32, 0xef, 0xb3, 0x9b, 0xa6, 0x7a, 0x9c, 0x4a, 0xf5, 0x7b, 0x96, 0x9a, 0xe9,
|
||||
0xf6, 0x1d, 0x78, 0x0c, 0xea, 0x92, 0x26, 0x1b, 0x4e, 0x6d, 0x95, 0xab, 0x36, 0x0c, 0x31, 0xb9,
|
||||
0x46, 0x3a, 0xb9, 0xc6, 0x69, 0x3a, 0xb9, 0x66, 0x99, 0xc9, 0x5e, 0xfe, 0x6c, 0x2a, 0x56, 0x35,
|
||||
0xd3, 0x62, 0x28, 0x7c, 0x0b, 0xea, 0x01, 0xbe, 0xa0, 0x76, 0xd6, 0xac, 0x44, 0x5b, 0xbb, 0x57,
|
||||
0x7b, 0xd7, 0x58, 0x5a, 0x3e, 0x29, 0xf0, 0x00, 0x00, 0x49, 0xa3, 0x74, 0x2f, 0x0d, 0x29, 0x83,
|
||||
0x19, 0xe1, 0xd7, 0x92, 0x44, 0xca, 0xf7, 0x33, 0xc2, 0xd2, 0x24, 0x23, 0x5d, 0xa0, 0xcb, 0xdd,
|
||||
0x9c, 0xeb, 0x65, 0x8d, 0x5d, 0xe1, 0x8f, 0xb5, 0x9d, 0x37, 0x76, 0x9e, 0x9d, 0xb4, 0xf8, 0x5f,
|
||||
0xc7, 0x0c, 0xfc, 0xe7, 0x98, 0xbd, 0x03, 0x4f, 0xe7, 0xc6, 0x6c, 0x41, 0x3f, 0xb3, 0xa7, 0x72,
|
||||
0x7b, 0x2d, 0x69, 0xee, 0xe6, 0x85, 0x52, 0x8f, 0x69, 0x23, 0xc6, 0x98, 0x4c, 0x46, 0x94, 0xd8,
|
||||
0x1e, 0x22, 0x9e, 0xb6, 0xde, 0x52, 0xf6, 0xd6, 0x45, 0x23, 0x5a, 0x22, 0x7e, 0x84, 0x88, 0x07,
|
||||
0xb7, 0x40, 0x19, 0x45, 0x91, 0xa0, 0x54, 0x39, 0xa5, 0x84, 0xa2, 0x88, 0x41, 0xe6, 0x87, 0xab,
|
||||
0x99, 0xae, 0x5c, 0xcf, 0x74, 0xe5, 0xd7, 0x4c, 0x57, 0x2e, 0x6f, 0xf5, 0xc2, 0xf5, 0xad, 0x5e,
|
||||
0xf8, 0x7e, 0xab, 0x17, 0x3e, 0xbe, 0x72, 0x7d, 0xea, 0x4d, 0x06, 0xc6, 0x30, 0x1c, 0x77, 0xe4,
|
||||
0x3f, 0x25, 0x5f, 0x8a, 0x8f, 0x6d, 0xf1, 0x4b, 0x1c, 0xac, 0xf1, 0xf8, 0xcb, 0x3f, 0x01, 0x00,
|
||||
0x00, 0xff, 0xff, 0xa5, 0x17, 0xac, 0x23, 0x2d, 0x07, 0x00, 0x00,
|
||||
// 805 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcd, 0x8e, 0xe3, 0x44,
|
||||
0x10, 0x8e, 0xc9, 0x6e, 0x7e, 0xca, 0x93, 0x64, 0xb7, 0x07, 0x21, 0x6f, 0x96, 0x75, 0xb2, 0xe1,
|
||||
0x47, 0x23, 0x0e, 0x8e, 0xb4, 0x1c, 0x10, 0x97, 0x95, 0xd6, 0x09, 0xb0, 0x91, 0x56, 0x08, 0x3c,
|
||||
0xa3, 0x39, 0x70, 0xb1, 0x3a, 0x71, 0x8f, 0x6d, 0x91, 0xd8, 0x96, 0xbb, 0x13, 0x86, 0x07, 0xe0,
|
||||
0x3e, 0x57, 0xde, 0x68, 0x8e, 0x73, 0x44, 0x1c, 0x06, 0xc8, 0xbc, 0x08, 0xea, 0x1f, 0xdb, 0x9d,
|
||||
0x84, 0x91, 0x06, 0xed, 0xad, 0x5d, 0xf5, 0xd5, 0x57, 0x5f, 0x55, 0x57, 0xb5, 0xe1, 0x63, 0x46,
|
||||
0x92, 0x80, 0xe4, 0xab, 0x38, 0x61, 0x63, 0xca, 0x30, 0x23, 0x63, 0xf6, 0x6b, 0x46, 0xa8, 0x93,
|
||||
0xe5, 0x29, 0x4b, 0xd1, 0x93, 0xca, 0xeb, 0x08, 0x6f, 0xff, 0xc3, 0x30, 0x0d, 0x53, 0xe1, 0x1c,
|
||||
0xf3, 0x93, 0xc4, 0xf5, 0x9f, 0x6b, 0x2c, 0x78, 0xbe, 0x88, 0x75, 0x92, 0xbe, 0x9e, 0x42, 0xd8,
|
||||
0x77, 0xbc, 0xc3, 0x03, 0xef, 0x06, 0x2f, 0xe3, 0x00, 0xb3, 0x34, 0x57, 0x88, 0x17, 0x07, 0x88,
|
||||
0x0c, 0xe7, 0x78, 0x55, 0x10, 0xd8, 0x9a, 0x7b, 0x43, 0x72, 0x1a, 0xa7, 0xc9, 0x4e, 0x82, 0x41,
|
||||
0x98, 0xa6, 0xe1, 0x92, 0x8c, 0xc5, 0xd7, 0x7c, 0x7d, 0x31, 0x66, 0xf1, 0x8a, 0x50, 0x86, 0x57,
|
||||
0x99, 0x04, 0x8c, 0xfe, 0x34, 0xa0, 0xf3, 0xc6, 0x9d, 0xcc, 0x3c, 0x42, 0xb3, 0x34, 0xa1, 0x84,
|
||||
0xa2, 0x09, 0x98, 0x01, 0x59, 0xc6, 0x1b, 0x92, 0xfb, 0xec, 0x92, 0x5a, 0xc6, 0xb0, 0x7e, 0x62,
|
||||
0xbe, 0x1a, 0x39, 0x5a, 0x33, 0x78, 0x91, 0x4e, 0x11, 0x30, 0x95, 0xd8, 0xb3, 0x4b, 0x0f, 0x82,
|
||||
0xe2, 0x48, 0xd1, 0x6b, 0x68, 0x93, 0x24, 0xf0, 0xe7, 0xcb, 0x74, 0xf1, 0xb3, 0xf5, 0xc1, 0xd0,
|
||||
0x38, 0x31, 0x5f, 0xbd, 0xbc, 0x97, 0xe2, 0x9b, 0x24, 0x70, 0x39, 0xd0, 0x6b, 0x11, 0x75, 0x42,
|
||||
0x53, 0x30, 0xe7, 0x24, 0x8c, 0x13, 0xc5, 0x50, 0x17, 0x0c, 0x9f, 0xdc, 0xcb, 0xe0, 0x72, 0xac,
|
||||
0xe4, 0x80, 0x79, 0x79, 0x1e, 0xfd, 0x66, 0x40, 0xf7, 0xbc, 0x68, 0x28, 0x9d, 0x25, 0x17, 0x29,
|
||||
0x9a, 0x40, 0xa7, 0x6c, 0xb1, 0x4f, 0x09, 0xb3, 0x0c, 0x41, 0x6d, 0xeb, 0xd4, 0xb2, 0x81, 0x65,
|
||||
0xe0, 0x29, 0x61, 0xde, 0xd1, 0x46, 0xfb, 0x42, 0x0e, 0x1c, 0x2f, 0x31, 0x65, 0x7e, 0x44, 0xe2,
|
||||
0x30, 0x62, 0xfe, 0x22, 0xc2, 0x49, 0x48, 0x02, 0x51, 0x67, 0xdd, 0x7b, 0xca, 0x5d, 0x6f, 0x85,
|
||||
0x67, 0x22, 0x1d, 0xa3, 0xdf, 0x0d, 0x38, 0x9e, 0x70, 0x9d, 0x09, 0x5d, 0xd3, 0x1f, 0xc4, 0xfd,
|
||||
0x09, 0x31, 0x1e, 0x3c, 0x59, 0x14, 0x66, 0x5f, 0xde, 0xab, 0xd2, 0xf3, 0xf2, 0x50, 0xcf, 0x1e,
|
||||
0x81, 0xfb, 0xe8, 0xfa, 0x76, 0x50, 0xf3, 0x7a, 0x8b, 0x5d, 0xf3, 0xff, 0xd6, 0x46, 0xe1, 0xe9,
|
||||
0xce, 0xfd, 0x0b, 0x61, 0xdf, 0x42, 0x97, 0xf7, 0xd7, 0xcf, 0x0b, 0xab, 0x92, 0x35, 0x70, 0xf6,
|
||||
0x77, 0xc2, 0xd9, 0x09, 0xf6, 0x3a, 0x3c, 0xac, 0x9a, 0xa5, 0x8f, 0xa0, 0x21, 0x75, 0xa8, 0xfc,
|
||||
0xea, 0x6b, 0x14, 0x41, 0xf3, 0x5c, 0x4e, 0x2b, 0x7a, 0x03, 0xed, 0xb2, 0x04, 0x95, 0xe5, 0x85,
|
||||
0x9e, 0x45, 0x4d, 0x75, 0x55, 0xbe, 0x2a, 0xbc, 0x8a, 0x42, 0x7d, 0x68, 0xd1, 0xf4, 0x82, 0xfd,
|
||||
0x82, 0x73, 0x22, 0xf2, 0xb4, 0xbd, 0xf2, 0x7b, 0xf4, 0x4f, 0x03, 0x1e, 0x9f, 0x72, 0xa1, 0xe8,
|
||||
0x6b, 0x68, 0x2a, 0x2e, 0x95, 0xe6, 0xd9, 0x61, 0x31, 0x4a, 0x94, 0x4a, 0x51, 0xe0, 0xd1, 0xe7,
|
||||
0xd0, 0x5a, 0x44, 0x38, 0x4e, 0xfc, 0x58, 0x36, 0xb2, 0xed, 0x9a, 0xdb, 0xdb, 0x41, 0x73, 0xc2,
|
||||
0x6d, 0xb3, 0xa9, 0xd7, 0x14, 0xce, 0x59, 0x80, 0x3e, 0x83, 0x6e, 0x9c, 0xc4, 0x2c, 0xc6, 0x4b,
|
||||
0xd5, 0x7e, 0xab, 0x2b, 0xca, 0xee, 0x28, 0xab, 0xec, 0x3c, 0xfa, 0x02, 0xc4, 0x3d, 0xc8, 0xd9,
|
||||
0x2e, 0x90, 0x75, 0x81, 0xec, 0x71, 0x87, 0x18, 0x5e, 0x85, 0xf5, 0xa0, 0xa3, 0x61, 0xe3, 0xc0,
|
||||
0x7a, 0x74, 0xa8, 0x5d, 0xce, 0x87, 0x88, 0x9a, 0x4d, 0xdd, 0x63, 0xae, 0x7d, 0x7b, 0x3b, 0x30,
|
||||
0xdf, 0x15, 0x54, 0xb3, 0xa9, 0x67, 0x96, 0xbc, 0xb3, 0x00, 0xbd, 0x83, 0x9e, 0xc6, 0xc9, 0x5f,
|
||||
0x04, 0xeb, 0xb1, 0x60, 0xed, 0x3b, 0xf2, 0xb9, 0x70, 0x8a, 0xe7, 0xc2, 0x39, 0x2b, 0x9e, 0x0b,
|
||||
0xb7, 0xc5, 0x69, 0xaf, 0xfe, 0x1a, 0x18, 0x5e, 0xa7, 0xe4, 0xe2, 0x5e, 0xf4, 0x1d, 0xf4, 0x12,
|
||||
0x72, 0xc9, 0xfc, 0x72, 0x43, 0xa8, 0xd5, 0x78, 0xd0, 0x4e, 0x75, 0x79, 0x58, 0xb5, 0x9e, 0xe8,
|
||||
0x35, 0x80, 0xc6, 0xd1, 0x7c, 0x10, 0x87, 0x16, 0xc1, 0x85, 0x88, 0xb2, 0x34, 0x92, 0xd6, 0xc3,
|
||||
0x84, 0xf0, 0x30, 0x4d, 0xc8, 0x04, 0x6c, 0x7d, 0x85, 0x2a, 0xbe, 0x72, 0x9b, 0xda, 0xe2, 0xb2,
|
||||
0x9e, 0x57, 0xdb, 0x54, 0x45, 0xab, 0xbd, 0xfa, 0xcf, 0xdd, 0x86, 0xf7, 0xdc, 0xed, 0xef, 0xe1,
|
||||
0xd3, 0x9d, 0xdd, 0xde, 0xe3, 0x2f, 0xe5, 0x99, 0x42, 0xde, 0x50, 0x5b, 0xf6, 0x5d, 0xa2, 0x42,
|
||||
0x63, 0x31, 0x88, 0x39, 0xa1, 0xeb, 0x25, 0xa3, 0x7e, 0x84, 0x69, 0x64, 0x1d, 0x0d, 0x8d, 0x93,
|
||||
0x23, 0x39, 0x88, 0x9e, 0xb4, 0xbf, 0xc5, 0x34, 0x42, 0xcf, 0xa0, 0x85, 0xb3, 0x4c, 0x42, 0x3a,
|
||||
0x02, 0xd2, 0xc4, 0x59, 0xc6, 0x5d, 0xee, 0x8f, 0xd7, 0x5b, 0xdb, 0xb8, 0xd9, 0xda, 0xc6, 0xdf,
|
||||
0x5b, 0xdb, 0xb8, 0xba, 0xb3, 0x6b, 0x37, 0x77, 0x76, 0xed, 0x8f, 0x3b, 0xbb, 0xf6, 0xd3, 0x57,
|
||||
0x61, 0xcc, 0xa2, 0xf5, 0xdc, 0x59, 0xa4, 0xab, 0xb1, 0xfe, 0x23, 0xab, 0x8e, 0xf2, 0x6f, 0xba,
|
||||
0xff, 0x1f, 0x9e, 0x37, 0x84, 0xfd, 0xcb, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x1a, 0xb9,
|
||||
0x2e, 0xa2, 0x07, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *ABCIResponses) Marshal() (dAtA []byte, err error) {
|
||||
@@ -612,6 +668,46 @@ func (m *ConsensusParamsInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *ABCIResponsesInfo) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *ABCIResponsesInfo) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *ABCIResponsesInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Height != 0 {
|
||||
i = encodeVarintTypes(dAtA, i, uint64(m.Height))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if m.AbciResponses != nil {
|
||||
{
|
||||
size, err := m.AbciResponses.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *Version) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
@@ -747,12 +843,12 @@ func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i--
|
||||
dAtA[i] = 0x32
|
||||
}
|
||||
n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):])
|
||||
if err10 != nil {
|
||||
return 0, err10
|
||||
n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):])
|
||||
if err11 != nil {
|
||||
return 0, err11
|
||||
}
|
||||
i -= n10
|
||||
i = encodeVarintTypes(dAtA, i, uint64(n10))
|
||||
i -= n11
|
||||
i = encodeVarintTypes(dAtA, i, uint64(n11))
|
||||
i--
|
||||
dAtA[i] = 0x2a
|
||||
{
|
||||
@@ -854,6 +950,22 @@ func (m *ConsensusParamsInfo) Size() (n int) {
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ABCIResponsesInfo) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.AbciResponses != nil {
|
||||
l = m.AbciResponses.Size()
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
if m.Height != 0 {
|
||||
n += 1 + sovTypes(uint64(m.Height))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *Version) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
@@ -1291,6 +1403,111 @@ func (m *ConsensusParamsInfo) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *ABCIResponsesInfo) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: ABCIResponsesInfo: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: ABCIResponsesInfo: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field AbciResponses", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.AbciResponses == nil {
|
||||
m.AbciResponses = &ABCIResponses{}
|
||||
}
|
||||
if err := m.AbciResponses.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType)
|
||||
}
|
||||
m.Height = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Height |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTypes(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *Version) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
|
||||
@@ -32,6 +32,11 @@ message ConsensusParamsInfo {
|
||||
int64 last_height_changed = 2;
|
||||
}
|
||||
|
||||
message ABCIResponsesInfo {
|
||||
ABCIResponses abci_responses = 1;
|
||||
int64 height = 2;
|
||||
}
|
||||
|
||||
message Version {
|
||||
tendermint.version.Consensus consensus = 1 [(gogoproto.nullable) = false];
|
||||
string software = 2;
|
||||
|
||||
@@ -17,20 +17,20 @@ message Evidence {
|
||||
|
||||
// DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes.
|
||||
message DuplicateVoteEvidence {
|
||||
tendermint.types.Vote vote_a = 1;
|
||||
tendermint.types.Vote vote_b = 2;
|
||||
int64 total_voting_power = 3;
|
||||
int64 validator_power = 4;
|
||||
google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
|
||||
tendermint.types.Vote vote_a = 1;
|
||||
tendermint.types.Vote vote_b = 2;
|
||||
int64 total_voting_power = 3;
|
||||
int64 validator_power = 4;
|
||||
google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
|
||||
}
|
||||
|
||||
// LightClientAttackEvidence contains evidence of a set of validators attempting to mislead a light client.
|
||||
message LightClientAttackEvidence {
|
||||
tendermint.types.LightBlock conflicting_block = 1;
|
||||
int64 common_height = 2;
|
||||
tendermint.types.LightBlock conflicting_block = 1;
|
||||
int64 common_height = 2;
|
||||
repeated tendermint.types.Validator byzantine_validators = 3;
|
||||
int64 total_voting_power = 4;
|
||||
google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
|
||||
int64 total_voting_power = 4;
|
||||
google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
|
||||
}
|
||||
|
||||
message EvidenceList {
|
||||
|
||||
@@ -106,10 +106,10 @@ message Vote {
|
||||
|
||||
// Commit contains the evidence that a block was committed by a set of validators.
|
||||
message Commit {
|
||||
int64 height = 1;
|
||||
int32 round = 2;
|
||||
BlockID block_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"];
|
||||
repeated CommitSig signatures = 4 [(gogoproto.nullable) = false];
|
||||
int64 height = 1;
|
||||
int32 round = 2;
|
||||
BlockID block_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"];
|
||||
repeated CommitSig signatures = 4 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
// CommitSig is a part of the Vote included in a Commit.
|
||||
|
||||
@@ -53,7 +53,7 @@ func TestAppConns_Failure(t *testing.T) {
|
||||
}()
|
||||
|
||||
quitCh := make(chan struct{})
|
||||
var recvQuitCh <-chan struct{} // nolint:gosimple
|
||||
var recvQuitCh <-chan struct{} //nolint:gosimple
|
||||
recvQuitCh = quitCh
|
||||
|
||||
clientCreatorMock := &mocks.ClientCreator{}
|
||||
|
||||
@@ -39,24 +39,24 @@ the example for more details.
|
||||
|
||||
Example:
|
||||
|
||||
c, err := New("http://192.168.1.10:26657", "/websocket")
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
c, err := New("http://192.168.1.10:26657", "/websocket")
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
|
||||
// call Start/Stop if you're subscribing to events
|
||||
err = c.Start()
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
defer c.Stop()
|
||||
// call Start/Stop if you're subscribing to events
|
||||
err = c.Start()
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
defer c.Stop()
|
||||
|
||||
res, err := c.Status()
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
res, err := c.Status()
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
|
||||
// handle result
|
||||
// handle result
|
||||
*/
|
||||
type HTTP struct {
|
||||
remote string
|
||||
|
||||
@@ -47,7 +47,6 @@ var _ client.Client = Client{}
|
||||
|
||||
// Call is used by recorders to save a call and response.
|
||||
// It can also be used to configure mock responses.
|
||||
//
|
||||
type Call struct {
|
||||
Name string
|
||||
Args interface{}
|
||||
|
||||
@@ -81,7 +81,9 @@ func TestBlockResults(t *testing.T) {
|
||||
}
|
||||
|
||||
env = &Environment{}
|
||||
env.StateStore = sm.NewStore(dbm.NewMemDB())
|
||||
env.StateStore = sm.NewStore(dbm.NewMemDB(), sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
err := env.StateStore.SaveABCIResponses(100, results)
|
||||
require.NoError(t, err)
|
||||
env.BlockStore = mockBlockStore{height: 100}
|
||||
|
||||
@@ -69,7 +69,6 @@ type peers interface {
|
||||
Peers() p2p.IPeerSet
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// Environment contains objects and interfaces used by the RPC. It is expected
|
||||
// to be setup once during startup.
|
||||
type Environment struct {
|
||||
|
||||
@@ -30,7 +30,7 @@ const (
|
||||
// the remote server.
|
||||
//
|
||||
// WSClient is safe for concurrent use by multiple goroutines.
|
||||
type WSClient struct { // nolint: maligned
|
||||
type WSClient struct { //nolint: maligned
|
||||
conn *websocket.Conn
|
||||
|
||||
Address string // IP:PORT or /path/to/socket
|
||||
@@ -265,7 +265,7 @@ func (c *WSClient) dial() error {
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
}
|
||||
rHeader := http.Header{}
|
||||
conn, _, err := dialer.Dial(c.protocol+"://"+c.Address+c.Endpoint, rHeader) // nolint:bodyclose
|
||||
conn, _, err := dialer.Dial(c.protocol+"://"+c.Address+c.Endpoint, rHeader) //nolint:bodyclose
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -72,7 +72,7 @@ func TestWSClientReconnectsAfterReadFailure(t *testing.T) {
|
||||
defer s.Close()
|
||||
|
||||
c := startClient(t, "//"+s.Listener.Addr().String())
|
||||
defer c.Stop() // nolint:errcheck // ignore for tests
|
||||
defer c.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
wg.Add(1)
|
||||
go callWgDoneOnResult(t, c, &wg)
|
||||
@@ -104,7 +104,7 @@ func TestWSClientReconnectsAfterWriteFailure(t *testing.T) {
|
||||
s := httptest.NewServer(h)
|
||||
|
||||
c := startClient(t, "//"+s.Listener.Addr().String())
|
||||
defer c.Stop() // nolint:errcheck // ignore for tests
|
||||
defer c.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
wg.Add(2)
|
||||
go callWgDoneOnResult(t, c, &wg)
|
||||
@@ -132,7 +132,7 @@ func TestWSClientReconnectFailure(t *testing.T) {
|
||||
s := httptest.NewServer(h)
|
||||
|
||||
c := startClient(t, "//"+s.Listener.Addr().String())
|
||||
defer c.Stop() // nolint:errcheck // ignore for tests
|
||||
defer c.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
go func() {
|
||||
for {
|
||||
@@ -181,7 +181,7 @@ func TestNotBlockingOnStop(t *testing.T) {
|
||||
timeout := 2 * time.Second
|
||||
s := httptest.NewServer(&myHandler{})
|
||||
c := startClient(t, "//"+s.Listener.Addr().String())
|
||||
c.Call(context.Background(), "a", make(map[string]interface{})) // nolint:errcheck // ignore for tests
|
||||
c.Call(context.Background(), "a", make(map[string]interface{})) //nolint:errcheck // ignore for tests
|
||||
// Let the readRoutine get around to blocking
|
||||
time.Sleep(time.Second)
|
||||
passCh := make(chan struct{})
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// HTTP RPC server supporting calls via uri params, jsonrpc over HTTP, and jsonrpc over
|
||||
// websockets
|
||||
//
|
||||
// Client Requests
|
||||
// # Client Requests
|
||||
//
|
||||
// Suppose we want to expose the rpc function `HelloWorld(name string, num int)`.
|
||||
//
|
||||
@@ -9,12 +9,12 @@
|
||||
//
|
||||
// As a GET request, it would have URI encoded parameters, and look like:
|
||||
//
|
||||
// curl 'http://localhost:8008/hello_world?name="my_world"&num=5'
|
||||
// curl 'http://localhost:8008/hello_world?name="my_world"&num=5'
|
||||
//
|
||||
// Note the `'` around the url, which is just so bash doesn't ignore the quotes in `"my_world"`.
|
||||
// This should also work:
|
||||
//
|
||||
// curl http://localhost:8008/hello_world?name=\"my_world\"&num=5
|
||||
// curl http://localhost:8008/hello_world?name=\"my_world\"&num=5
|
||||
//
|
||||
// A GET request to `/` returns a list of available endpoints.
|
||||
// For those which take arguments, the arguments will be listed in order, with `_` where the actual value should be.
|
||||
@@ -23,20 +23,19 @@
|
||||
//
|
||||
// As a POST request, we use JSONRPC. For instance, the same request would have this as the body:
|
||||
//
|
||||
// {
|
||||
// "jsonrpc": "2.0",
|
||||
// "id": "anything",
|
||||
// "method": "hello_world",
|
||||
// "params": {
|
||||
// "name": "my_world",
|
||||
// "num": 5
|
||||
// }
|
||||
// }
|
||||
// {
|
||||
// "jsonrpc": "2.0",
|
||||
// "id": "anything",
|
||||
// "method": "hello_world",
|
||||
// "params": {
|
||||
// "name": "my_world",
|
||||
// "num": 5
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// With the above saved in file `data.json`, we can make the request with
|
||||
//
|
||||
// curl --data @data.json http://localhost:8008
|
||||
//
|
||||
// curl --data @data.json http://localhost:8008
|
||||
//
|
||||
// WebSocket (JSONRPC)
|
||||
//
|
||||
@@ -44,42 +43,42 @@
|
||||
// Websocket connections are available at their own endpoint, typically `/websocket`,
|
||||
// though this is configurable when starting the server.
|
||||
//
|
||||
// Server Definition
|
||||
// # Server Definition
|
||||
//
|
||||
// Define some types and routes:
|
||||
//
|
||||
// type ResultStatus struct {
|
||||
// Value string
|
||||
// }
|
||||
// type ResultStatus struct {
|
||||
// Value string
|
||||
// }
|
||||
//
|
||||
// Define some routes
|
||||
//
|
||||
// var Routes = map[string]*rpcserver.RPCFunc{
|
||||
// "status": rpcserver.NewRPCFunc(Status, "arg"),
|
||||
// }
|
||||
// var Routes = map[string]*rpcserver.RPCFunc{
|
||||
// "status": rpcserver.NewRPCFunc(Status, "arg"),
|
||||
// }
|
||||
//
|
||||
// An rpc function:
|
||||
//
|
||||
// func Status(v string) (*ResultStatus, error) {
|
||||
// return &ResultStatus{v}, nil
|
||||
// }
|
||||
// func Status(v string) (*ResultStatus, error) {
|
||||
// return &ResultStatus{v}, nil
|
||||
// }
|
||||
//
|
||||
// Now start the server:
|
||||
//
|
||||
// mux := http.NewServeMux()
|
||||
// rpcserver.RegisterRPCFuncs(mux, Routes)
|
||||
// wm := rpcserver.NewWebsocketManager(Routes)
|
||||
// mux.HandleFunc("/websocket", wm.WebsocketHandler)
|
||||
// logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
// listener, err := rpc.Listen("0.0.0.0:8080", rpcserver.Config{})
|
||||
// if err != nil { panic(err) }
|
||||
// go rpcserver.Serve(listener, mux, logger)
|
||||
// mux := http.NewServeMux()
|
||||
// rpcserver.RegisterRPCFuncs(mux, Routes)
|
||||
// wm := rpcserver.NewWebsocketManager(Routes)
|
||||
// mux.HandleFunc("/websocket", wm.WebsocketHandler)
|
||||
// logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
// listener, err := rpc.Listen("0.0.0.0:8080", rpcserver.Config{})
|
||||
// if err != nil { panic(err) }
|
||||
// go rpcserver.Serve(listener, mux, logger)
|
||||
//
|
||||
// Note that unix sockets are supported as well (eg. `/path/to/socket` instead of `0.0.0.0:8008`)
|
||||
// Now see all available endpoints by sending a GET request to `0.0.0.0:8008`.
|
||||
// Each route is available as a GET request, as a JSONRPCv2 POST request, and via JSONRPCv2 over websockets.
|
||||
//
|
||||
// Examples
|
||||
// # Examples
|
||||
//
|
||||
// - [Tendermint](https://github.com/tendermint/tendermint/blob/master/rpc/core/routes.go)
|
||||
package jsonrpc
|
||||
|
||||
@@ -176,8 +176,9 @@ func arrayParamsToArgs(
|
||||
// array.
|
||||
//
|
||||
// Example:
|
||||
// rpcFunc.args = [rpctypes.Context string]
|
||||
// rpcFunc.argNames = ["arg"]
|
||||
//
|
||||
// rpcFunc.args = [rpctypes.Context string]
|
||||
// rpcFunc.argNames = ["arg"]
|
||||
func jsonParamsToArgs(rpcFunc *RPCFunc, raw []byte) ([]reflect.Value, error) {
|
||||
const argsOffset = 1
|
||||
|
||||
@@ -237,5 +238,5 @@ func writeListOfEndpoints(w http.ResponseWriter, r *http.Request, funcMap map[st
|
||||
buf.WriteString("</body></html>")
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
w.WriteHeader(200)
|
||||
w.Write(buf.Bytes()) // nolint: errcheck
|
||||
w.Write(buf.Bytes()) //nolint: errcheck
|
||||
}
|
||||
|
||||
@@ -215,15 +215,17 @@ func (resp RPCResponse) String() string {
|
||||
}
|
||||
|
||||
// From the JSON-RPC 2.0 spec:
|
||||
//
|
||||
// If there was an error in detecting the id in the Request object (e.g. Parse
|
||||
// error/Invalid Request), it MUST be Null.
|
||||
// error/Invalid Request), it MUST be Null.
|
||||
func RPCParseError(err error) RPCResponse {
|
||||
return NewRPCErrorResponse(nil, -32700, "Parse error. Invalid JSON", err.Error())
|
||||
}
|
||||
|
||||
// From the JSON-RPC 2.0 spec:
|
||||
//
|
||||
// If there was an error in detecting the id in the Request object (e.g. Parse
|
||||
// error/Invalid Request), it MUST be Null.
|
||||
// error/Invalid Request), it MUST be Null.
|
||||
func RPCInvalidRequestError(id jsonrpcid, err error) RPCResponse {
|
||||
return NewRPCErrorResponse(id, -32600, "Invalid Request", err.Error())
|
||||
}
|
||||
@@ -276,9 +278,12 @@ type Context struct {
|
||||
// RemoteAddr returns the remote address (usually a string "IP:port").
|
||||
// If neither HTTPReq nor WSConn is set, an empty string is returned.
|
||||
// HTTP:
|
||||
// http.Request#RemoteAddr
|
||||
//
|
||||
// http.Request#RemoteAddr
|
||||
//
|
||||
// WS:
|
||||
// result of GetRemoteAddr
|
||||
//
|
||||
// result of GetRemoteAddr
|
||||
func (ctx *Context) RemoteAddr() string {
|
||||
if ctx.HTTPReq != nil {
|
||||
return ctx.HTTPReq.RemoteAddr
|
||||
@@ -291,10 +296,13 @@ func (ctx *Context) RemoteAddr() string {
|
||||
// Context returns the request's context.
|
||||
// The returned context is always non-nil; it defaults to the background context.
|
||||
// HTTP:
|
||||
// The context is canceled when the client's connection closes, the request
|
||||
// is canceled (with HTTP/2), or when the ServeHTTP method returns.
|
||||
//
|
||||
// The context is canceled when the client's connection closes, the request
|
||||
// is canceled (with HTTP/2), or when the ServeHTTP method returns.
|
||||
//
|
||||
// WS:
|
||||
// The context is canceled when the client's connections closes.
|
||||
//
|
||||
// The context is canceled when the client's connections closes.
|
||||
func (ctx *Context) Context() context.Context {
|
||||
if ctx.HTTPReq != nil {
|
||||
return ctx.HTTPReq.Context()
|
||||
@@ -307,7 +315,6 @@ func (ctx *Context) Context() context.Context {
|
||||
//----------------------------------------
|
||||
// SOCKETS
|
||||
|
||||
//
|
||||
// Determine if its a unix or tcp socket.
|
||||
// If tcp, must specify the port; `0.0.0.0` will return incorrectly as "unix" since there's no port
|
||||
// TODO: deprecate
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
package state
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type (
|
||||
ErrInvalidBlock error
|
||||
@@ -99,3 +102,5 @@ func (e ErrNoConsensusParamsForHeight) Error() string {
|
||||
func (e ErrNoABCIResponsesForHeight) Error() string {
|
||||
return fmt.Sprintf("could not find results for height #%d", e.Height)
|
||||
}
|
||||
|
||||
var ErrABCIResponsesNotPersisted = errors.New("node is not persisting abci responses")
|
||||
|
||||
@@ -41,7 +41,9 @@ func TestApplyBlock(t *testing.T) {
|
||||
defer proxyApp.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
state, stateDB, _ := makeState(1, 1)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mmock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
@@ -67,7 +69,9 @@ func TestBeginBlockValidators(t *testing.T) {
|
||||
defer proxyApp.Stop() //nolint:errcheck // no need to check error again
|
||||
|
||||
state, stateDB, _ := makeState(2, 2)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
|
||||
prevHash := state.LastBlockID.Hash
|
||||
prevParts := types.PartSetHeader{}
|
||||
@@ -130,7 +134,9 @@ func TestBeginBlockByzantineValidators(t *testing.T) {
|
||||
defer proxyApp.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
state, stateDB, privVals := makeState(1, 1)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
|
||||
defaultEvidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
privVal := privVals[state.Validators.Validators[0].Address.String()]
|
||||
@@ -354,7 +360,9 @@ func TestEndBlockValidatorUpdates(t *testing.T) {
|
||||
defer proxyApp.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
state, stateDB, _ := makeState(1, 1)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
stateStore,
|
||||
@@ -425,7 +433,9 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) {
|
||||
defer proxyApp.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
state, stateDB, _ := makeState(1, 1)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
stateStore,
|
||||
log.TestingLogger(),
|
||||
|
||||
@@ -43,6 +43,6 @@ func ValidateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, params tmproto
|
||||
// SaveValidatorsInfo is an alias for the private saveValidatorsInfo method in
|
||||
// store.go, exported exclusively and explicitly for testing.
|
||||
func SaveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *types.ValidatorSet) error {
|
||||
stateStore := dbStore{db}
|
||||
stateStore := dbStore{db, StoreOptions{DiscardABCIResponses: false}}
|
||||
return stateStore.saveValidatorsInfo(height, lastHeightChanged, valSet)
|
||||
}
|
||||
|
||||
@@ -115,7 +115,9 @@ func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValida
|
||||
})
|
||||
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
if err := stateStore.Save(s); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -153,6 +153,29 @@ func (_m *Store) LoadFromDBOrGenesisFile(_a0 string) (state.State, error) {
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// LoadLastABCIResponse provides a mock function with given fields: _a0
|
||||
func (_m *Store) LoadLastABCIResponse(_a0 int64) (*tendermintstate.ABCIResponses, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *tendermintstate.ABCIResponses
|
||||
if rf, ok := ret.Get(0).(func(int64) *tendermintstate.ABCIResponses); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*tendermintstate.ABCIResponses)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(int64) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// LoadValidators provides a mock function with given fields: _a0
|
||||
func (_m *Store) LoadValidators(_a0 int64) (*tenderminttypes.ValidatorSet, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
@@ -82,7 +82,10 @@ func TestRollback(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRollbackNoState(t *testing.T) {
|
||||
stateStore := state.NewStore(dbm.NewMemDB())
|
||||
stateStore := state.NewStore(dbm.NewMemDB(),
|
||||
state.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
blockStore := &mocks.BlockStore{}
|
||||
|
||||
_, _, err := state.Rollback(blockStore, stateStore)
|
||||
@@ -115,7 +118,7 @@ func TestRollbackDifferentStateHeight(t *testing.T) {
|
||||
}
|
||||
|
||||
func setupStateStore(t *testing.T, height int64) state.Store {
|
||||
stateStore := state.NewStore(dbm.NewMemDB())
|
||||
stateStore := state.NewStore(dbm.NewMemDB(), state.StoreOptions{DiscardABCIResponses: false})
|
||||
valSet, _ := types.RandValidatorSet(5, 10)
|
||||
|
||||
params := types.DefaultConsensusParams()
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
|
||||
// database keys
|
||||
// database key
|
||||
var (
|
||||
stateKey = []byte("stateKey")
|
||||
)
|
||||
|
||||
@@ -29,7 +29,9 @@ func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) {
|
||||
config := cfg.ResetTestRoot("state_")
|
||||
dbType := dbm.BackendType(config.DBBackend)
|
||||
stateDB, err := dbm.NewDB("state", dbType, config.DBDir())
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile())
|
||||
assert.NoError(t, err, "expected no error on LoadStateFromDBOrGenesisFile")
|
||||
@@ -76,7 +78,9 @@ func TestMakeGenesisStateNilValidators(t *testing.T) {
|
||||
func TestStateSaveLoad(t *testing.T) {
|
||||
tearDown, stateDB, state := setupTestCase(t)
|
||||
defer tearDown(t)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
assert := assert.New(t)
|
||||
|
||||
state.LastBlockHeight++
|
||||
@@ -95,7 +99,9 @@ func TestStateSaveLoad(t *testing.T) {
|
||||
func TestABCIResponsesSaveLoad1(t *testing.T) {
|
||||
tearDown, stateDB, state := setupTestCase(t)
|
||||
defer tearDown(t)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
assert := assert.New(t)
|
||||
|
||||
state.LastBlockHeight++
|
||||
@@ -128,7 +134,9 @@ func TestABCIResponsesSaveLoad2(t *testing.T) {
|
||||
defer tearDown(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
|
||||
cases := [...]struct {
|
||||
// Height is implied to equal index+2,
|
||||
@@ -216,7 +224,9 @@ func TestValidatorSimpleSaveLoad(t *testing.T) {
|
||||
defer tearDown(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
statestore := sm.NewStore(stateDB)
|
||||
statestore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
|
||||
// Can't load anything for height 0.
|
||||
_, err := statestore.LoadValidators(0)
|
||||
@@ -249,7 +259,9 @@ func TestValidatorSimpleSaveLoad(t *testing.T) {
|
||||
func TestOneValidatorChangesSaveLoad(t *testing.T) {
|
||||
tearDown, stateDB, state := setupTestCase(t)
|
||||
defer tearDown(t)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
|
||||
// Change vals at these heights.
|
||||
changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20}
|
||||
@@ -901,7 +913,9 @@ func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) {
|
||||
const valSetSize = 2
|
||||
tearDown, stateDB, state := setupTestCase(t)
|
||||
t.Cleanup(func() { tearDown(t) })
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state.Validators = genValSet(valSetSize)
|
||||
state.NextValidators = state.Validators.CopyIncrementProposerPriority(1)
|
||||
err := stateStore.Save(state)
|
||||
@@ -926,7 +940,9 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) {
|
||||
const valSetSize = 7
|
||||
tearDown, stateDB, state := setupTestCase(t)
|
||||
defer tearDown(t)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
require.Equal(t, int64(0), state.LastBlockHeight)
|
||||
state.Validators = genValSet(valSetSize)
|
||||
state.NextValidators = state.Validators.CopyIncrementProposerPriority(1)
|
||||
@@ -990,7 +1006,9 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) {
|
||||
tearDown, stateDB, state := setupTestCase(t)
|
||||
defer tearDown(t)
|
||||
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
|
||||
// Change vals at these heights.
|
||||
changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20}
|
||||
|
||||
@@ -39,6 +39,10 @@ func calcABCIResponsesKey(height int64) []byte {
|
||||
|
||||
//----------------------
|
||||
|
||||
var (
|
||||
lastABCIResponseKey = []byte("lastABCIResponseKey")
|
||||
)
|
||||
|
||||
//go:generate ../scripts/mockery_generate.sh Store
|
||||
|
||||
// Store defines the state store interface
|
||||
@@ -58,6 +62,8 @@ type Store interface {
|
||||
LoadValidators(int64) (*types.ValidatorSet, error)
|
||||
// LoadABCIResponses loads the abciResponse for a given height
|
||||
LoadABCIResponses(int64) (*tmstate.ABCIResponses, error)
|
||||
// LoadLastABCIResponse loads the last abciResponse for a given height
|
||||
LoadLastABCIResponse(int64) (*tmstate.ABCIResponses, error)
|
||||
// LoadConsensusParams loads the consensus params for a given height
|
||||
LoadConsensusParams(int64) (tmproto.ConsensusParams, error)
|
||||
// Save overwrites the previous state with the updated one
|
||||
@@ -75,13 +81,24 @@ type Store interface {
|
||||
// dbStore wraps a db (github.com/tendermint/tm-db)
|
||||
type dbStore struct {
|
||||
db dbm.DB
|
||||
|
||||
StoreOptions
|
||||
}
|
||||
|
||||
type StoreOptions struct {
|
||||
|
||||
// DiscardABCIResponses determines whether or not the store
|
||||
// retains all ABCIResponses. If DiscardABCiResponses is enabled,
|
||||
// the store will maintain only the response object from the latest
|
||||
// height.
|
||||
DiscardABCIResponses bool
|
||||
}
|
||||
|
||||
var _ Store = (*dbStore)(nil)
|
||||
|
||||
// NewStore creates the dbStore of the state pkg.
|
||||
func NewStore(db dbm.DB) Store {
|
||||
return dbStore{db}
|
||||
func NewStore(db dbm.DB, options StoreOptions) Store {
|
||||
return dbStore{db, options}
|
||||
}
|
||||
|
||||
// LoadStateFromDBOrGenesisFile loads the most recent state from the database,
|
||||
@@ -358,12 +375,13 @@ func ABCIResponsesResultsHash(ar *tmstate.ABCIResponses) []byte {
|
||||
}
|
||||
|
||||
// LoadABCIResponses loads the ABCIResponses for the given height from the
|
||||
// database. If not found, ErrNoABCIResponsesForHeight is returned.
|
||||
//
|
||||
// This is useful for recovering from crashes where we called app.Commit and
|
||||
// before we called s.Save(). It can also be used to produce Merkle proofs of
|
||||
// the result of txs.
|
||||
// database. If the node has DiscardABCIResponses set to true, ErrABCIResponsesNotPersisted
|
||||
// is persisted. If not found, ErrNoABCIResponsesForHeight is returned.
|
||||
func (store dbStore) LoadABCIResponses(height int64) (*tmstate.ABCIResponses, error) {
|
||||
if store.DiscardABCIResponses {
|
||||
return nil, ErrABCIResponsesNotPersisted
|
||||
}
|
||||
|
||||
buf, err := store.db.Get(calcABCIResponsesKey(height))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -385,12 +403,43 @@ func (store dbStore) LoadABCIResponses(height int64) (*tmstate.ABCIResponses, er
|
||||
return abciResponses, nil
|
||||
}
|
||||
|
||||
// LoadLastABCIResponses loads the ABCIResponses from the most recent height.
|
||||
// The height parameter is used to ensure that the response corresponds to the latest height.
|
||||
// If not, an error is returned.
|
||||
//
|
||||
// This method is used for recovering in the case that we called the Commit ABCI
|
||||
// method on the application but crashed before persisting the results.
|
||||
func (store dbStore) LoadLastABCIResponse(height int64) (*tmstate.ABCIResponses, error) {
|
||||
bz, err := store.db.Get(lastABCIResponseKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(bz) == 0 {
|
||||
return nil, errors.New("no last ABCI response has been persisted")
|
||||
}
|
||||
|
||||
abciResponse := new(tmstate.ABCIResponsesInfo)
|
||||
err = abciResponse.Unmarshal(bz)
|
||||
if err != nil {
|
||||
tmos.Exit(fmt.Sprintf(`LoadLastABCIResponses: Data has been corrupted or its spec has
|
||||
changed: %v\n`, err))
|
||||
}
|
||||
|
||||
// Here we validate the result by comparing its height to the expected height.
|
||||
if height != abciResponse.GetHeight() {
|
||||
return nil, errors.New("expected height %d but last stored abci responses was at height %d")
|
||||
}
|
||||
|
||||
return abciResponse.AbciResponses, nil
|
||||
}
|
||||
|
||||
// SaveABCIResponses persists the ABCIResponses to the database.
|
||||
// This is useful in case we crash after app.Commit and before s.Save().
|
||||
// Responses are indexed by height so they can also be loaded later to produce
|
||||
// Merkle proofs.
|
||||
//
|
||||
// Exposed for testing.
|
||||
// CONTRACT: height must be monotonically increasing every time this is called.
|
||||
func (store dbStore) SaveABCIResponses(height int64, abciResponses *tmstate.ABCIResponses) error {
|
||||
var dtxs []*abci.ResponseDeliverTx
|
||||
// strip nil values,
|
||||
@@ -401,17 +450,30 @@ func (store dbStore) SaveABCIResponses(height int64, abciResponses *tmstate.ABCI
|
||||
}
|
||||
abciResponses.DeliverTxs = dtxs
|
||||
|
||||
bz, err := abciResponses.Marshal()
|
||||
// If the flag is false then we save the ABCIResponse. This can be used for the /BlockResults
|
||||
// query or to reindex an event using the command line.
|
||||
if !store.DiscardABCIResponses {
|
||||
bz, err := abciResponses.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := store.db.Set(calcABCIResponsesKey(height), bz); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// We always save the last ABCI response for crash recovery.
|
||||
// This overwrites the previous saved ABCI Response.
|
||||
response := &tmstate.ABCIResponsesInfo{
|
||||
AbciResponses: abciResponses,
|
||||
Height: height,
|
||||
}
|
||||
bz, err := response.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = store.db.SetSync(calcABCIResponsesKey(height), bz)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return store.db.SetSync(lastABCIResponseKey, bz)
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@@ -471,7 +533,7 @@ func loadValidatorsInfo(db dbm.DB, height int64) (*tmstate.ValidatorsInfo, error
|
||||
}
|
||||
|
||||
if len(buf) == 0 {
|
||||
return nil, errors.New("value retrieved from db is empty")
|
||||
return nil, errors.New("no last ABCI response has been persisted")
|
||||
}
|
||||
|
||||
v := new(tmstate.ValidatorsInfo)
|
||||
@@ -479,7 +541,7 @@ func loadValidatorsInfo(db dbm.DB, height int64) (*tmstate.ValidatorsInfo, error
|
||||
if err != nil {
|
||||
// DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED
|
||||
tmos.Exit(fmt.Sprintf(`LoadValidators: Data has been corrupted or its spec has changed:
|
||||
%v\n`, err))
|
||||
%v\n`, err))
|
||||
}
|
||||
// TODO: ensure that buf is completely read.
|
||||
|
||||
@@ -557,7 +619,7 @@ func (store dbStore) loadConsensusParamsInfo(height int64) (*tmstate.ConsensusPa
|
||||
return nil, err
|
||||
}
|
||||
if len(buf) == 0 {
|
||||
return nil, errors.New("value retrieved from db is empty")
|
||||
return nil, errors.New("no last ABCI response has been persisted")
|
||||
}
|
||||
|
||||
paramsInfo := new(tmstate.ConsensusParamsInfo)
|
||||
|
||||
@@ -23,7 +23,9 @@ import (
|
||||
|
||||
func TestStoreLoadValidators(t *testing.T) {
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
val, _ := types.RandValidator(true, 10)
|
||||
vals := types.NewValidatorSet([]*types.Validator{val})
|
||||
|
||||
@@ -54,7 +56,9 @@ func BenchmarkLoadValidators(b *testing.B) {
|
||||
dbType := dbm.BackendType(config.DBBackend)
|
||||
stateDB, err := dbm.NewDB("state", dbType, config.DBDir())
|
||||
require.NoError(b, err)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile())
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
@@ -107,7 +111,9 @@ func TestPruneStates(t *testing.T) {
|
||||
tc := tc
|
||||
t.Run(name, func(t *testing.T) {
|
||||
db := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(db)
|
||||
stateStore := sm.NewStore(db, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
pk := ed25519.GenPrivKey().PubKey()
|
||||
|
||||
// Generate a bunch of state data. Validators change for heights ending with 3, and
|
||||
@@ -229,3 +235,72 @@ func sliceToMap(s []int64) map[int64]bool {
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func TestLastABCIResponses(t *testing.T) {
|
||||
// create an empty state store.
|
||||
t.Run("Not persisting responses", func(t *testing.T) {
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
responses, err := stateStore.LoadABCIResponses(1)
|
||||
require.Error(t, err)
|
||||
require.Nil(t, responses)
|
||||
// stub the abciresponses.
|
||||
response1 := &tmstate.ABCIResponses{
|
||||
BeginBlock: &abci.ResponseBeginBlock{},
|
||||
DeliverTxs: []*abci.ResponseDeliverTx{
|
||||
{Code: 32, Data: []byte("Hello"), Log: "Huh?"},
|
||||
},
|
||||
EndBlock: &abci.ResponseEndBlock{},
|
||||
}
|
||||
// create new db and state store and set discard abciresponses to false.
|
||||
stateDB = dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(stateDB, sm.StoreOptions{DiscardABCIResponses: false})
|
||||
height := int64(10)
|
||||
// save the last abci response.
|
||||
err = stateStore.SaveABCIResponses(height, response1)
|
||||
require.NoError(t, err)
|
||||
// search for the last abciresponse and check if it has saved.
|
||||
lastResponse, err := stateStore.LoadLastABCIResponse(height)
|
||||
require.NoError(t, err)
|
||||
// check to see if the saved response height is the same as the loaded height.
|
||||
assert.Equal(t, lastResponse, response1)
|
||||
// use an incorret height to make sure the state store errors.
|
||||
_, err = stateStore.LoadLastABCIResponse(height + 1)
|
||||
assert.Error(t, err)
|
||||
// check if the abci response didnt save in the abciresponses.
|
||||
responses, err = stateStore.LoadABCIResponses(height)
|
||||
require.NoError(t, err, responses)
|
||||
require.Equal(t, response1, responses)
|
||||
})
|
||||
|
||||
t.Run("persisting responses", func(t *testing.T) {
|
||||
stateDB := dbm.NewMemDB()
|
||||
height := int64(10)
|
||||
// stub the second abciresponse.
|
||||
response2 := &tmstate.ABCIResponses{
|
||||
BeginBlock: &abci.ResponseBeginBlock{},
|
||||
DeliverTxs: []*abci.ResponseDeliverTx{
|
||||
{Code: 44, Data: []byte("Hello again"), Log: "????"},
|
||||
},
|
||||
EndBlock: &abci.ResponseEndBlock{},
|
||||
}
|
||||
// create a new statestore with the responses on.
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: true,
|
||||
})
|
||||
// save an additional response.
|
||||
err := stateStore.SaveABCIResponses(height+1, response2)
|
||||
require.NoError(t, err)
|
||||
// check to see if the response saved by calling the last response.
|
||||
lastResponse2, err := stateStore.LoadLastABCIResponse(height + 1)
|
||||
require.NoError(t, err)
|
||||
// check to see if the saved response height is the same as the loaded height.
|
||||
assert.Equal(t, response2, lastResponse2)
|
||||
// should error as we are no longer saving the response.
|
||||
_, err = stateStore.LoadABCIResponses(height + 1)
|
||||
assert.Equal(t, sm.ErrABCIResponsesNotPersisted, err)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
@@ -33,7 +33,9 @@ func TestTxFilter(t *testing.T) {
|
||||
for i, tc := range testCases {
|
||||
stateDB, err := dbm.NewDB("state", "memdb", os.TempDir())
|
||||
require.NoError(t, err)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user