mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-13 16:22:53 +00:00
Compare commits
14 Commits
rpc-header
...
removing-f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5c624eae9d | ||
|
|
849fd79df8 | ||
|
|
3f34cafd33 | ||
|
|
5e31b29e32 | ||
|
|
ecc2b7baca | ||
|
|
dab1abe078 | ||
|
|
4be78e3125 | ||
|
|
1069ffc6aa | ||
|
|
3003e05581 | ||
|
|
498657f128 | ||
|
|
c322b89b2a | ||
|
|
cbc7a1abcf | ||
|
|
f36999e484 | ||
|
|
ae1fc74f80 |
@@ -2,7 +2,7 @@ linters:
|
||||
enable:
|
||||
- asciicheck
|
||||
- bodyclose
|
||||
- deadcode
|
||||
# - deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
@@ -18,15 +18,17 @@ linters:
|
||||
- ineffassign
|
||||
- misspell
|
||||
- nakedret
|
||||
- nolintlint
|
||||
# - nolintlint
|
||||
- prealloc
|
||||
- staticcheck
|
||||
# - structcheck // to be fixed by golangci-lint
|
||||
- structcheck
|
||||
- stylecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
disable:
|
||||
- unused
|
||||
- varcheck
|
||||
- deadcode
|
||||
- nolintlint
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
@@ -40,7 +42,5 @@ linters-settings:
|
||||
max-blank-identifiers: 3
|
||||
golint:
|
||||
min-confidence: 0
|
||||
maligned:
|
||||
suggest-new: true
|
||||
misspell:
|
||||
locale: US
|
||||
|
||||
@@ -10,6 +10,8 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
|
||||
- CLI/RPC/Config
|
||||
|
||||
- [config] \#9259 Rename the fastsync section and the fast_sync key blocksync and block_sync respectively
|
||||
|
||||
- Apps
|
||||
|
||||
- [abci/counter] \#6684 Delete counter example app
|
||||
@@ -29,6 +31,8 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [config] \#9054 Flag added to overwrite abciresponses.
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
[docker] \#9073 enable cross platform build using docker buildx
|
||||
|
||||
@@ -17,14 +17,6 @@ NOTE: v0.35 was recalled and v0.36 was skipped
|
||||
values, which was inconsistent with the `uint64` varint length delimiters used
|
||||
in the P2P layer. Both now consistently use `uint64` varint length delimiters.
|
||||
|
||||
|
||||
## v0.37 (Unreleased)
|
||||
|
||||
This version requires a coordinated network upgrade. It alters the elements in the predigest of the `LastResultsHash` and thus
|
||||
all nodes must upgrade together (see #9175).
|
||||
|
||||
NOTE: v0.35 was recalled and v0.36 was skipped
|
||||
|
||||
## v0.34.20
|
||||
|
||||
### Feature: Priority Mempool
|
||||
|
||||
@@ -162,7 +162,7 @@ func TestValUpdates(t *testing.T) {
|
||||
|
||||
makeApplyBlock(t, kvstore, 2, diff, tx1, tx2, tx3)
|
||||
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1]) // nolint: gocritic
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1]) //nolint: gocritic
|
||||
vals2 = kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
|
||||
@@ -2,9 +2,8 @@
|
||||
Package server is used to start a new ABCI server.
|
||||
|
||||
It contains two server implementation:
|
||||
* gRPC server
|
||||
* socket server
|
||||
|
||||
- gRPC server
|
||||
- socket server
|
||||
*/
|
||||
package server
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package blockchain
|
||||
package blocksync
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package blockchain
|
||||
package blocksync_test
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
@@ -9,7 +9,8 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/blocksync"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -28,7 +29,7 @@ func TestBcBlockRequestMessageValidateBasic(t *testing.T) {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
request := bcproto.BlockRequest{Height: tc.requestHeight}
|
||||
assert.Equal(t, tc.expectErr, ValidateMsg(&request) != nil, "Validate Basic had an unexpected result")
|
||||
assert.Equal(t, tc.expectErr, blocksync.ValidateMsg(&request) != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -48,14 +49,14 @@ func TestBcNoBlockResponseMessageValidateBasic(t *testing.T) {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
nonResponse := bcproto.NoBlockResponse{Height: tc.nonResponseHeight}
|
||||
assert.Equal(t, tc.expectErr, ValidateMsg(&nonResponse) != nil, "Validate Basic had an unexpected result")
|
||||
assert.Equal(t, tc.expectErr, blocksync.ValidateMsg(&nonResponse) != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBcStatusRequestMessageValidateBasic(t *testing.T) {
|
||||
request := bcproto.StatusRequest{}
|
||||
assert.NoError(t, ValidateMsg(&request))
|
||||
assert.NoError(t, blocksync.ValidateMsg(&request))
|
||||
}
|
||||
|
||||
func TestBcStatusResponseMessageValidateBasic(t *testing.T) {
|
||||
@@ -73,12 +74,12 @@ func TestBcStatusResponseMessageValidateBasic(t *testing.T) {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
response := bcproto.StatusResponse{Height: tc.responseHeight}
|
||||
assert.Equal(t, tc.expectErr, ValidateMsg(&response) != nil, "Validate Basic had an unexpected result")
|
||||
assert.Equal(t, tc.expectErr, blocksync.ValidateMsg(&response) != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// nolint:lll // ignore line length in tests
|
||||
//nolint:lll // ignore line length in tests
|
||||
func TestBlockchainMessageVectors(t *testing.T) {
|
||||
block := types.MakeBlock(int64(3), []types.Tx{types.Tx("Hello World")}, nil, nil)
|
||||
block.Version.Block = 11 // overwrite updated protocol version
|
||||
@@ -1,4 +1,4 @@
|
||||
package blockchain
|
||||
package blocksync
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@@ -58,7 +58,7 @@ var peerTimeout = 15 * time.Second // not const so we can override with tests
|
||||
are not at peer limits, we can probably switch to consensus reactor
|
||||
*/
|
||||
|
||||
// BlockPool keeps track of the fast sync peers, block requests and block responses.
|
||||
// BlockPool keeps track of the block sync peers, block requests and block responses.
|
||||
type BlockPool struct {
|
||||
service.BaseService
|
||||
startTime time.Time
|
||||
@@ -410,7 +410,7 @@ func (pool *BlockPool) sendError(err error, peerID p2p.ID) {
|
||||
}
|
||||
|
||||
// for debugging purposes
|
||||
//nolint:unused
|
||||
|
||||
func (pool *BlockPool) debug() string {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
@@ -1,4 +1,4 @@
|
||||
package blockchain
|
||||
package blocksync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -1,4 +1,4 @@
|
||||
package blockchain
|
||||
package blocksync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -7,15 +7,15 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
|
||||
BlockchainChannel = byte(0x40)
|
||||
// BlocksyncChannel is a channel for blocks and status updates (`BlockStore` height)
|
||||
BlocksyncChannel = byte(0x40)
|
||||
|
||||
trySyncIntervalMS = 10
|
||||
|
||||
@@ -30,7 +30,7 @@ const (
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
// for when we switch from blockchain reactor and fast sync to
|
||||
// for when we switch from blockchain reactor and block sync to
|
||||
// the consensus machine
|
||||
SwitchToConsensus(state sm.State, skipWAL bool)
|
||||
}
|
||||
@@ -54,7 +54,7 @@ type Reactor struct {
|
||||
blockExec *sm.BlockExecutor
|
||||
store *store.BlockStore
|
||||
pool *BlockPool
|
||||
fastSync bool
|
||||
blockSync bool
|
||||
|
||||
requestsCh <-chan BlockRequest
|
||||
errorsCh <-chan peerError
|
||||
@@ -62,7 +62,7 @@ type Reactor struct {
|
||||
|
||||
// NewReactor returns new reactor instance.
|
||||
func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
|
||||
fastSync bool) *Reactor {
|
||||
blockSync bool) *Reactor {
|
||||
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
|
||||
@@ -85,7 +85,7 @@ func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockS
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
pool: pool,
|
||||
fastSync: fastSync,
|
||||
blockSync: blockSync,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
}
|
||||
@@ -101,7 +101,7 @@ func (bcR *Reactor) SetLogger(l log.Logger) {
|
||||
|
||||
// OnStart implements service.Service.
|
||||
func (bcR *Reactor) OnStart() error {
|
||||
if bcR.fastSync {
|
||||
if bcR.blockSync {
|
||||
err := bcR.pool.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -111,9 +111,9 @@ func (bcR *Reactor) OnStart() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
|
||||
func (bcR *Reactor) SwitchToFastSync(state sm.State) error {
|
||||
bcR.fastSync = true
|
||||
// SwitchToBlockSync is called by the state sync reactor when switching to block sync.
|
||||
func (bcR *Reactor) SwitchToBlockSync(state sm.State) error {
|
||||
bcR.blockSync = true
|
||||
bcR.initialState = state
|
||||
|
||||
bcR.pool.height = state.LastBlockHeight + 1
|
||||
@@ -127,7 +127,7 @@ func (bcR *Reactor) SwitchToFastSync(state sm.State) error {
|
||||
|
||||
// OnStop implements service.Service.
|
||||
func (bcR *Reactor) OnStop() {
|
||||
if bcR.fastSync {
|
||||
if bcR.blockSync {
|
||||
if err := bcR.pool.Stop(); err != nil {
|
||||
bcR.Logger.Error("Error stopping pool", "err", err)
|
||||
}
|
||||
@@ -138,7 +138,7 @@ func (bcR *Reactor) OnStop() {
|
||||
func (bcR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
return []*p2p.ChannelDescriptor{
|
||||
{
|
||||
ID: BlockchainChannel,
|
||||
ID: BlocksyncChannel,
|
||||
Priority: 5,
|
||||
SendQueueCapacity: 1000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
@@ -157,7 +157,7 @@ func (bcR *Reactor) AddPeer(peer p2p.Peer) {
|
||||
return
|
||||
}
|
||||
|
||||
peer.Send(BlockchainChannel, msgBytes)
|
||||
peer.Send(BlocksyncChannel, msgBytes)
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
@@ -188,7 +188,7 @@ func (bcR *Reactor) respondToPeer(msg *bcproto.BlockRequest,
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
return src.TrySend(BlocksyncChannel, msgBytes)
|
||||
}
|
||||
|
||||
bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
@@ -199,7 +199,7 @@ func (bcR *Reactor) respondToPeer(msg *bcproto.BlockRequest,
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
return src.TrySend(BlocksyncChannel, msgBytes)
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling 4 types of messages (look below).
|
||||
@@ -239,7 +239,7 @@ func (bcR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
bcR.Logger.Error("could not convert msg to protobut", "err", err)
|
||||
return
|
||||
}
|
||||
src.TrySend(BlockchainChannel, msgBytes)
|
||||
src.TrySend(BlocksyncChannel, msgBytes)
|
||||
case *bcproto.StatusResponse:
|
||||
// Got a peer status. Unverified.
|
||||
bcR.pool.SetPeerRange(src.ID(), msg.Base, msg.Height)
|
||||
@@ -291,7 +291,7 @@ func (bcR *Reactor) poolRoutine(stateSynced bool) {
|
||||
continue
|
||||
}
|
||||
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
queued := peer.TrySend(BlocksyncChannel, msgBytes)
|
||||
if !queued {
|
||||
bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height)
|
||||
}
|
||||
@@ -303,7 +303,7 @@ func (bcR *Reactor) poolRoutine(stateSynced bool) {
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// ask for status updates
|
||||
go bcR.BroadcastStatusRequest() // nolint: errcheck
|
||||
go bcR.BroadcastStatusRequest() //nolint: errcheck
|
||||
|
||||
}
|
||||
}
|
||||
@@ -409,7 +409,7 @@ FOR_LOOP:
|
||||
|
||||
if blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
|
||||
bcR.Logger.Info("Block Sync Rate", "height", bcR.pool.height,
|
||||
"max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
@@ -430,7 +430,7 @@ func (bcR *Reactor) BroadcastStatusRequest() error {
|
||||
return fmt.Errorf("could not convert msg to proto: %w", err)
|
||||
}
|
||||
|
||||
bcR.Switch.Broadcast(BlockchainChannel, bm)
|
||||
bcR.Switch.Broadcast(BlocksyncChannel, bm)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package blockchain
|
||||
package blocksync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -70,7 +70,9 @@ func newReactor(
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
@@ -83,7 +85,9 @@ func newReactor(
|
||||
// pool.height is determined from the store.
|
||||
fastSync := true
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db)
|
||||
stateStore = sm.NewStore(db, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
@@ -104,7 +104,7 @@ func killProc(pid uint64, dir string) error {
|
||||
// pipe STDERR output from tailing the Tendermint process to a file
|
||||
//
|
||||
// NOTE: This will only work on UNIX systems.
|
||||
cmd := exec.Command("tail", "-f", fmt.Sprintf("/proc/%d/fd/2", pid)) // nolint: gosec
|
||||
cmd := exec.Command("tail", "-f", fmt.Sprintf("/proc/%d/fd/2", pid)) //nolint: gosec
|
||||
|
||||
outFile, err := os.Create(filepath.Join(dir, "stacktrace.out"))
|
||||
if err != nil {
|
||||
|
||||
@@ -67,7 +67,7 @@ func copyConfig(home, dir string) error {
|
||||
func dumpProfile(dir, addr, profile string, debug int) error {
|
||||
endpoint := fmt.Sprintf("%s/debug/pprof/%s?debug=%d", addr, profile, debug)
|
||||
|
||||
resp, err := http.Get(endpoint) // nolint: gosec
|
||||
resp, err := http.Get(endpoint) //nolint: gosec
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query for %s profile: %w", profile, err)
|
||||
}
|
||||
|
||||
@@ -40,6 +40,9 @@ replace the backend. The default start-height is 0, meaning the tooling will sta
|
||||
reindex from the base block height(inclusive); and the default end-height is 0, meaning
|
||||
the tooling will reindex until the latest block height(inclusive). User can omit
|
||||
either or both arguments.
|
||||
|
||||
Note: This operation requires ABCIResponses. Do not set DiscardABCIResponses to true if you
|
||||
want to use this command.
|
||||
`,
|
||||
Example: `
|
||||
tendermint reindex-event
|
||||
|
||||
@@ -77,7 +77,9 @@ func loadStateAndBlockStore(config *cfg.Config) (*store.BlockStore, state.Store,
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
stateStore := state.NewStore(stateDB)
|
||||
stateStore := state.NewStore(stateDB, state.StoreOptions{
|
||||
DiscardABCIResponses: config.Storage.DiscardABCIResponses,
|
||||
})
|
||||
|
||||
return blockStore, stateStore, nil
|
||||
}
|
||||
|
||||
@@ -53,6 +53,11 @@ func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) {
|
||||
if err := conf.ValidateBasic(); err != nil {
|
||||
return nil, fmt.Errorf("error in config file: %v", err)
|
||||
}
|
||||
if warnings := conf.CheckDeprecated(); len(warnings) > 0 {
|
||||
for _, warning := range warnings {
|
||||
logger.Info("deprecated usage found in configuration file", "usage", warning)
|
||||
}
|
||||
}
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
"socket address to listen on for connections from external priv_validator process")
|
||||
|
||||
// node flags
|
||||
cmd.Flags().Bool("fast_sync", config.FastSyncMode, "fast blockchain syncing")
|
||||
cmd.Flags().Bool("block_sync", config.BlockSyncMode, "sync the block chain using the blocksync algorithm")
|
||||
cmd.Flags().BytesHexVar(
|
||||
&genesisHash,
|
||||
"genesis_hash",
|
||||
|
||||
112
config/config.go
112
config/config.go
@@ -68,14 +68,18 @@ type Config struct {
|
||||
BaseConfig `mapstructure:",squash"`
|
||||
|
||||
// Options for services
|
||||
RPC *RPCConfig `mapstructure:"rpc"`
|
||||
P2P *P2PConfig `mapstructure:"p2p"`
|
||||
Mempool *MempoolConfig `mapstructure:"mempool"`
|
||||
StateSync *StateSyncConfig `mapstructure:"statesync"`
|
||||
FastSync *FastSyncConfig `mapstructure:"fastsync"`
|
||||
Consensus *ConsensusConfig `mapstructure:"consensus"`
|
||||
TxIndex *TxIndexConfig `mapstructure:"tx_index"`
|
||||
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
|
||||
RPC *RPCConfig `mapstructure:"rpc"`
|
||||
P2P *P2PConfig `mapstructure:"p2p"`
|
||||
Mempool *MempoolConfig `mapstructure:"mempool"`
|
||||
StateSync *StateSyncConfig `mapstructure:"statesync"`
|
||||
BlockSync *BlockSyncConfig `mapstructure:"blocksync"`
|
||||
//TODO(williambanfield): remove this field once v0.37 is released.
|
||||
// https://github.com/tendermint/tendermint/issues/9279
|
||||
DeprecatedFastSyncConfig map[interface{}]interface{} `mapstructure:"fastsync"`
|
||||
Consensus *ConsensusConfig `mapstructure:"consensus"`
|
||||
Storage *StorageConfig `mapstructure:"storage"`
|
||||
TxIndex *TxIndexConfig `mapstructure:"tx_index"`
|
||||
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
|
||||
}
|
||||
|
||||
// DefaultConfig returns a default configuration for a Tendermint node
|
||||
@@ -86,8 +90,9 @@ func DefaultConfig() *Config {
|
||||
P2P: DefaultP2PConfig(),
|
||||
Mempool: DefaultMempoolConfig(),
|
||||
StateSync: DefaultStateSyncConfig(),
|
||||
FastSync: DefaultFastSyncConfig(),
|
||||
BlockSync: DefaultBlockSyncConfig(),
|
||||
Consensus: DefaultConsensusConfig(),
|
||||
Storage: DefaultStorageConfig(),
|
||||
TxIndex: DefaultTxIndexConfig(),
|
||||
Instrumentation: DefaultInstrumentationConfig(),
|
||||
}
|
||||
@@ -101,8 +106,9 @@ func TestConfig() *Config {
|
||||
P2P: TestP2PConfig(),
|
||||
Mempool: TestMempoolConfig(),
|
||||
StateSync: TestStateSyncConfig(),
|
||||
FastSync: TestFastSyncConfig(),
|
||||
BlockSync: TestBlockSyncConfig(),
|
||||
Consensus: TestConsensusConfig(),
|
||||
Storage: TestStorageConfig(),
|
||||
TxIndex: TestTxIndexConfig(),
|
||||
Instrumentation: TestInstrumentationConfig(),
|
||||
}
|
||||
@@ -136,8 +142,8 @@ func (cfg *Config) ValidateBasic() error {
|
||||
if err := cfg.StateSync.ValidateBasic(); err != nil {
|
||||
return fmt.Errorf("error in [statesync] section: %w", err)
|
||||
}
|
||||
if err := cfg.FastSync.ValidateBasic(); err != nil {
|
||||
return fmt.Errorf("error in [fastsync] section: %w", err)
|
||||
if err := cfg.BlockSync.ValidateBasic(); err != nil {
|
||||
return fmt.Errorf("error in [blocksync] section: %w", err)
|
||||
}
|
||||
if err := cfg.Consensus.ValidateBasic(); err != nil {
|
||||
return fmt.Errorf("error in [consensus] section: %w", err)
|
||||
@@ -148,6 +154,17 @@ func (cfg *Config) ValidateBasic() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfg *Config) CheckDeprecated() []string {
|
||||
var warnings []string
|
||||
if cfg.DeprecatedFastSyncConfig != nil {
|
||||
warnings = append(warnings, "[fastsync] table detected. This section has been renamed to [blocksync]. The values in this deprecated section will be disregarded.")
|
||||
}
|
||||
if cfg.BaseConfig.DeprecatedFastSyncMode != nil {
|
||||
warnings = append(warnings, "fast_sync key detected. This key has been renamed to block_sync. The value of this deprecated key will be disregarded.")
|
||||
}
|
||||
return warnings
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// BaseConfig
|
||||
|
||||
@@ -167,10 +184,14 @@ type BaseConfig struct { //nolint: maligned
|
||||
// A custom human readable name for this node
|
||||
Moniker string `mapstructure:"moniker"`
|
||||
|
||||
// If this node is many blocks behind the tip of the chain, FastSync
|
||||
// If this node is many blocks behind the tip of the chain, Blocksync
|
||||
// allows them to catchup quickly by downloading blocks in parallel
|
||||
// and verifying their commits
|
||||
FastSyncMode bool `mapstructure:"fast_sync"`
|
||||
BlockSyncMode bool `mapstructure:"block_sync"`
|
||||
|
||||
//TODO(williambanfield): remove this field once v0.37 is released.
|
||||
// https://github.com/tendermint/tendermint/issues/9279
|
||||
DeprecatedFastSyncMode interface{} `mapstructure:"fast_sync"`
|
||||
|
||||
// Database backend: goleveldb | cleveldb | boltdb | rocksdb
|
||||
// * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
|
||||
@@ -238,7 +259,7 @@ func DefaultBaseConfig() BaseConfig {
|
||||
ABCI: "socket",
|
||||
LogLevel: DefaultLogLevel,
|
||||
LogFormat: LogFormatPlain,
|
||||
FastSyncMode: true,
|
||||
BlockSyncMode: true,
|
||||
FilterPeers: false,
|
||||
DBBackend: "goleveldb",
|
||||
DBPath: "data",
|
||||
@@ -250,7 +271,7 @@ func TestBaseConfig() BaseConfig {
|
||||
cfg := DefaultBaseConfig()
|
||||
cfg.chainID = "tendermint_test"
|
||||
cfg.ProxyApp = "kvstore"
|
||||
cfg.FastSyncMode = false
|
||||
cfg.BlockSyncMode = false
|
||||
cfg.DBBackend = "memdb"
|
||||
return cfg
|
||||
}
|
||||
@@ -817,7 +838,7 @@ func DefaultStateSyncConfig() *StateSyncConfig {
|
||||
}
|
||||
}
|
||||
|
||||
// TestFastSyncConfig returns a default configuration for the state sync service
|
||||
// TestStateSyncConfig returns a default configuration for the state sync service
|
||||
func TestStateSyncConfig() *StateSyncConfig {
|
||||
return DefaultStateSyncConfig()
|
||||
}
|
||||
@@ -873,34 +894,34 @@ func (cfg *StateSyncConfig) ValidateBasic() error {
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// FastSyncConfig
|
||||
// BlockSyncConfig
|
||||
|
||||
// FastSyncConfig defines the configuration for the Tendermint fast sync service
|
||||
type FastSyncConfig struct {
|
||||
// BlockSyncConfig (formerly known as FastSync) defines the configuration for the Tendermint block sync service
|
||||
type BlockSyncConfig struct {
|
||||
Version string `mapstructure:"version"`
|
||||
}
|
||||
|
||||
// DefaultFastSyncConfig returns a default configuration for the fast sync service
|
||||
func DefaultFastSyncConfig() *FastSyncConfig {
|
||||
return &FastSyncConfig{
|
||||
// DefaultBlockSyncConfig returns a default configuration for the block sync service
|
||||
func DefaultBlockSyncConfig() *BlockSyncConfig {
|
||||
return &BlockSyncConfig{
|
||||
Version: "v0",
|
||||
}
|
||||
}
|
||||
|
||||
// TestFastSyncConfig returns a default configuration for the fast sync.
|
||||
func TestFastSyncConfig() *FastSyncConfig {
|
||||
return DefaultFastSyncConfig()
|
||||
// TestBlockSyncConfig returns a default configuration for the block sync.
|
||||
func TestBlockSyncConfig() *BlockSyncConfig {
|
||||
return DefaultBlockSyncConfig()
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (cfg *FastSyncConfig) ValidateBasic() error {
|
||||
func (cfg *BlockSyncConfig) ValidateBasic() error {
|
||||
switch cfg.Version {
|
||||
case "v0":
|
||||
return nil
|
||||
case "v1", "v2":
|
||||
return fmt.Errorf("fast sync version %s has been deprecated. Please use v0 instead", cfg.Version)
|
||||
return fmt.Errorf("blocksync version %s has been deprecated. Please use v0 instead", cfg.Version)
|
||||
default:
|
||||
return fmt.Errorf("unknown fastsync version %s", cfg.Version)
|
||||
return fmt.Errorf("unknown blocksync version %s", cfg.Version)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1069,11 +1090,40 @@ func (cfg *ConsensusConfig) ValidateBasic() error {
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// StorageConfig
|
||||
|
||||
// StorageConfig allows more fine-grained control over certain storage-related
|
||||
// behavior.
|
||||
type StorageConfig struct {
|
||||
// Set to false to ensure ABCI responses are persisted. ABCI responses are
|
||||
// required for `/block_results` RPC queries, and to reindex events in the
|
||||
// command-line tool.
|
||||
DiscardABCIResponses bool `mapstructure:"discard_abci_responses"`
|
||||
}
|
||||
|
||||
// DefaultStorageConfig returns the default configuration options relating to
|
||||
// Tendermint storage optimization.
|
||||
func DefaultStorageConfig() *StorageConfig {
|
||||
return &StorageConfig{
|
||||
DiscardABCIResponses: false,
|
||||
}
|
||||
}
|
||||
|
||||
// TestStorageConfig returns storage configuration that can be used for
|
||||
// testing.
|
||||
func TestStorageConfig() *StorageConfig {
|
||||
return &StorageConfig{
|
||||
DiscardABCIResponses: false,
|
||||
}
|
||||
}
|
||||
|
||||
// TxIndexConfig
|
||||
// Remember that Event has the following structure:
|
||||
// type: [
|
||||
// key: value,
|
||||
// ...
|
||||
// ...
|
||||
// key: value,
|
||||
// ...
|
||||
//
|
||||
// ]
|
||||
//
|
||||
// CompositeKeys are constructed by `type.key`
|
||||
|
||||
@@ -128,8 +128,8 @@ func TestStateSyncConfigValidateBasic(t *testing.T) {
|
||||
require.NoError(t, cfg.ValidateBasic())
|
||||
}
|
||||
|
||||
func TestFastSyncConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestFastSyncConfig()
|
||||
func TestBlockSyncConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestBlockSyncConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
// tamper with version
|
||||
@@ -140,8 +140,8 @@ func TestFastSyncConfigValidateBasic(t *testing.T) {
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
}
|
||||
|
||||
// nolint: lll
|
||||
func TestConsensusConfig_ValidateBasic(t *testing.T) {
|
||||
// nolint: lll
|
||||
testcases := map[string]struct {
|
||||
modify func(*ConsensusConfig)
|
||||
expectErr bool
|
||||
@@ -166,6 +166,7 @@ func TestConsensusConfig_ValidateBasic(t *testing.T) {
|
||||
"PeerQueryMaj23SleepDuration negative": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = -1 }, true},
|
||||
"DoubleSignCheckHeight negative": {func(c *ConsensusConfig) { c.DoubleSignCheckHeight = -1 }, true},
|
||||
}
|
||||
|
||||
for desc, tc := range testcases {
|
||||
tc := tc // appease linter
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
|
||||
@@ -87,10 +87,10 @@ proxy_app = "{{ .BaseConfig.ProxyApp }}"
|
||||
# A custom human readable name for this node
|
||||
moniker = "{{ .BaseConfig.Moniker }}"
|
||||
|
||||
# If this node is many blocks behind the tip of the chain, FastSync
|
||||
# If this node is many blocks behind the tip of the chain, BlockSync
|
||||
# allows them to catchup quickly by downloading blocks in parallel
|
||||
# and verifying their commits
|
||||
fast_sync = {{ .BaseConfig.FastSyncMode }}
|
||||
block_sync = {{ .BaseConfig.BlockSyncMode }}
|
||||
|
||||
# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb
|
||||
# * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
|
||||
@@ -429,17 +429,17 @@ chunk_request_timeout = "{{ .StateSync.ChunkRequestTimeout }}"
|
||||
chunk_fetchers = "{{ .StateSync.ChunkFetchers }}"
|
||||
|
||||
#######################################################
|
||||
### Fast Sync Configuration Connections ###
|
||||
### Block Sync Configuration Options ###
|
||||
#######################################################
|
||||
[fastsync]
|
||||
[blocksync]
|
||||
|
||||
# Fast Sync version to use:
|
||||
# Block Sync version to use:
|
||||
#
|
||||
# In v0.37, v1 and v2 of the fast sync protocol were deprecated.
|
||||
# In v0.37, v1 and v2 of the block sync protocols were deprecated.
|
||||
# Please use v0 instead.
|
||||
#
|
||||
# 1) "v0" - the default fast sync implementation
|
||||
version = "{{ .FastSync.Version }}"
|
||||
# 1) "v0" - the default block sync implementation
|
||||
version = "{{ .BlockSync.Version }}"
|
||||
|
||||
#######################################################
|
||||
### Consensus Configuration Options ###
|
||||
@@ -482,6 +482,16 @@ create_empty_blocks_interval = "{{ .Consensus.CreateEmptyBlocksInterval }}"
|
||||
peer_gossip_sleep_duration = "{{ .Consensus.PeerGossipSleepDuration }}"
|
||||
peer_query_maj23_sleep_duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
|
||||
|
||||
#######################################################
|
||||
### Storage Configuration Options ###
|
||||
#######################################################
|
||||
|
||||
# Set to true to discard ABCI responses from the state store, which can save a
|
||||
# considerable amount of disk space. Set to false to ensure ABCI responses are
|
||||
# persisted. ABCI responses are required for /block_results RPC queries, and to
|
||||
# reindex events in the command-line tool.
|
||||
discard_abci_responses = {{ .Storage.DiscardABCIResponses}}
|
||||
|
||||
#######################################################
|
||||
### Transaction Indexer Configuration Options ###
|
||||
#######################################################
|
||||
|
||||
@@ -50,7 +50,9 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
for i := 0; i < nValidators; i++ {
|
||||
logger := consensusLogger().With("test", "byzantine", "validator", i)
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
defer os.RemoveAll(thisConfig.RootDir)
|
||||
|
||||
@@ -426,7 +426,9 @@ func newStateWithConfigAndBlockStore(
|
||||
|
||||
// Make State
|
||||
stateDB := blockDB
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
if err := stateStore.Save(state); err != nil { // for save height 1's validators info
|
||||
panic(err)
|
||||
}
|
||||
@@ -716,7 +718,9 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
|
||||
configRootDirs := make([]string, 0, nValidators)
|
||||
for i := 0; i < nValidators; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
configRootDirs = append(configRootDirs, thisConfig.RootDir)
|
||||
@@ -754,7 +758,9 @@ func randConsensusNetWithPeers(
|
||||
configRootDirs := make([]string, 0, nPeers)
|
||||
for i := 0; i < nPeers; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
configRootDirs = append(configRootDirs, thisConfig.RootDir)
|
||||
|
||||
@@ -113,7 +113,7 @@ func deliverTxsRange(cs *State, start, end int) {
|
||||
func TestMempoolTxConcurrentWithCommit(t *testing.T) {
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(blockDB)
|
||||
stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false})
|
||||
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockDB)
|
||||
err := stateStore.Save(state)
|
||||
require.NoError(t, err)
|
||||
@@ -138,7 +138,7 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
app := NewCounterApplication()
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(blockDB)
|
||||
stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false})
|
||||
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB)
|
||||
err := stateStore.Save(state)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -118,11 +118,11 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
Name: "latest_block_height",
|
||||
Help: "The latest block height.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
FastSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
BlockSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "fast_syncing",
|
||||
Help: "Whether or not a node is fast syncing. 1 if yes, 0 if no.",
|
||||
Name: "block_syncing",
|
||||
Help: "Whether or not a node is block syncing. 1 if yes, 0 if no.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
StateSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
@@ -184,7 +184,7 @@ func NopMetrics() *Metrics {
|
||||
BlockSizeBytes: discard.NewGauge(),
|
||||
TotalTxs: discard.NewGauge(),
|
||||
CommittedHeight: discard.NewGauge(),
|
||||
FastSyncing: discard.NewGauge(),
|
||||
BlockSyncing: discard.NewGauge(),
|
||||
StateSyncing: discard.NewGauge(),
|
||||
BlockParts: discard.NewCounter(),
|
||||
StepDurationSeconds: discard.NewHistogram(),
|
||||
|
||||
@@ -59,8 +59,8 @@ type Metrics struct {
|
||||
TotalTxs metrics.Gauge
|
||||
// The latest block height.
|
||||
CommittedHeight metrics.Gauge `metrics_name:"latest_block_height"`
|
||||
// Whether or not a node is fast syncing. 1 if yes, 0 if no.
|
||||
FastSyncing metrics.Gauge
|
||||
// Whether or not a node is block syncing. 1 if yes, 0 if no.
|
||||
BlockSyncing metrics.Gauge
|
||||
// Whether or not a node is state syncing. 1 if yes, 0 if no.
|
||||
StateSyncing metrics.Gauge
|
||||
|
||||
|
||||
@@ -314,7 +314,6 @@ func TestWALMsgProto(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// nolint:lll //ignore line length for tests
|
||||
func TestConsMsgsVectors(t *testing.T) {
|
||||
date := time.Date(2018, 8, 30, 12, 0, 0, 0, time.UTC)
|
||||
psh := types.PartSetHeader{
|
||||
|
||||
@@ -72,7 +72,7 @@ func NewReactor(consensusState *State, waitSync bool, options ...ReactorOption)
|
||||
}
|
||||
|
||||
// OnStart implements BaseService by subscribing to events, which later will be
|
||||
// broadcasted to other peers and starting state if we're not in fast sync.
|
||||
// broadcasted to other peers and starting state if we're not in block sync.
|
||||
func (conR *Reactor) OnStart() error {
|
||||
conR.Logger.Info("Reactor ", "waitSync", conR.WaitSync())
|
||||
|
||||
@@ -104,8 +104,8 @@ func (conR *Reactor) OnStop() {
|
||||
}
|
||||
}
|
||||
|
||||
// SwitchToConsensus switches from fast_sync mode to consensus mode.
|
||||
// It resets the state, turns off fast_sync, and starts the consensus state-machine
|
||||
// SwitchToConsensus switches from block_sync mode to consensus mode.
|
||||
// It resets the state, turns off block_sync, and starts the consensus state-machine
|
||||
func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) {
|
||||
conR.Logger.Info("SwitchToConsensus")
|
||||
|
||||
@@ -121,7 +121,7 @@ func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) {
|
||||
conR.mtx.Lock()
|
||||
conR.waitSync = false
|
||||
conR.mtx.Unlock()
|
||||
conR.Metrics.FastSyncing.Set(0)
|
||||
conR.Metrics.BlockSyncing.Set(0)
|
||||
conR.Metrics.StateSyncing.Set(0)
|
||||
|
||||
if skipWAL {
|
||||
@@ -198,7 +198,7 @@ func (conR *Reactor) AddPeer(peer p2p.Peer) {
|
||||
go conR.queryMaj23Routine(peer, peerState)
|
||||
|
||||
// Send our state to peer.
|
||||
// If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
|
||||
// If we're block_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
|
||||
if !conR.WaitSync() {
|
||||
conR.sendNewRoundStepMessage(peer)
|
||||
}
|
||||
@@ -218,7 +218,7 @@ func (conR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
}
|
||||
|
||||
// Receive implements Reactor
|
||||
// NOTE: We process these messages even when we're fast_syncing.
|
||||
// NOTE: We process these messages even when we're block_syncing.
|
||||
// Messages affect either a peer state or the consensus state.
|
||||
// Peer state updates can happen in parallel, but processing of
|
||||
// proposals, block parts, and votes are ordered by the receiveRoutine
|
||||
@@ -386,7 +386,7 @@ func (conR *Reactor) SetEventBus(b *types.EventBus) {
|
||||
conR.conS.SetEventBus(b)
|
||||
}
|
||||
|
||||
// WaitSync returns whether the consensus reactor is waiting for state/fast sync.
|
||||
// WaitSync returns whether the consensus reactor is waiting for state/block sync.
|
||||
func (conR *Reactor) WaitSync() bool {
|
||||
conR.mtx.RLock()
|
||||
defer conR.mtx.RUnlock()
|
||||
|
||||
@@ -138,7 +138,9 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
logger := consensusLogger()
|
||||
for i := 0; i < nValidators; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
defer os.RemoveAll(thisConfig.RootDir)
|
||||
@@ -689,7 +691,7 @@ func capture() {
|
||||
// Ensure basic validation of structs is functioning
|
||||
|
||||
func TestNewRoundStepMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct { // nolint: maligned
|
||||
testCases := []struct {
|
||||
expectErr bool
|
||||
messageRound int32
|
||||
messageLastCommitRound int32
|
||||
@@ -728,7 +730,7 @@ func TestNewRoundStepMessageValidateBasic(t *testing.T) {
|
||||
|
||||
func TestNewRoundStepMessageValidateHeight(t *testing.T) {
|
||||
initialHeight := int64(10)
|
||||
testCases := []struct { // nolint: maligned
|
||||
testCases := []struct { //nolint: maligned
|
||||
expectErr bool
|
||||
messageLastCommitRound int32
|
||||
messageHeight int64
|
||||
@@ -878,7 +880,7 @@ func TestHasVoteMessageValidateBasic(t *testing.T) {
|
||||
invalidSignedMsgType tmproto.SignedMsgType = 0x03
|
||||
)
|
||||
|
||||
testCases := []struct { // nolint: maligned
|
||||
testCases := []struct { //nolint: maligned
|
||||
expectErr bool
|
||||
messageRound int32
|
||||
messageIndex int32
|
||||
@@ -923,7 +925,7 @@ func TestVoteSetMaj23MessageValidateBasic(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct { // nolint: maligned
|
||||
testCases := []struct { //nolint: maligned
|
||||
expectErr bool
|
||||
messageRound int32
|
||||
messageHeight int64
|
||||
|
||||
@@ -418,7 +418,7 @@ func (h *Handshaker) ReplayBlocks(
|
||||
|
||||
case appBlockHeight == storeBlockHeight:
|
||||
// We ran Commit, but didn't save the state, so replayBlock with mock app.
|
||||
abciResponses, err := h.stateStore.LoadABCIResponses(storeBlockHeight)
|
||||
abciResponses, err := h.stateStore.LoadLastABCIResponse(storeBlockHeight)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -297,7 +297,9 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
|
||||
if err != nil {
|
||||
tmos.Exit(err.Error())
|
||||
}
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile())
|
||||
if err != nil {
|
||||
tmos.Exit(err.Error())
|
||||
|
||||
@@ -158,7 +158,9 @@ LOOP:
|
||||
logger := log.NewNopLogger()
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := blockDB
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
privValidator := loadPrivValidator(consensusReplayConfig)
|
||||
@@ -289,7 +291,6 @@ func (w *crashingWAL) Start() error { return w.next.Start() }
|
||||
func (w *crashingWAL) Stop() error { return w.next.Stop() }
|
||||
func (w *crashingWAL) Wait() { w.next.Wait() }
|
||||
|
||||
//------------------------------------------------------------------------------------------
|
||||
type testSim struct {
|
||||
GenesisState sm.State
|
||||
Config *cfg.Config
|
||||
@@ -692,7 +693,9 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
|
||||
stateDB, genesisState, store = stateAndStore(config, pubKey, kvstore.ProtocolVersion)
|
||||
|
||||
}
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
store.chain = chain
|
||||
store.commits = commits
|
||||
|
||||
@@ -711,7 +714,9 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
|
||||
// use a throwaway tendermint state
|
||||
proxyApp := proxy.NewAppConns(clientCreator2, proxy.NopMetrics())
|
||||
stateDB1 := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB1)
|
||||
stateStore := sm.NewStore(stateDB1, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
err := stateStore.Save(genesisState)
|
||||
require.NoError(t, err)
|
||||
buildAppStateFromChain(proxyApp, stateStore, genesisState, chain, nBlocks, mode)
|
||||
@@ -890,7 +895,9 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
|
||||
pubKey, err := privVal.GetPubKey()
|
||||
require.NoError(t, err)
|
||||
stateDB, state, store := stateAndStore(config, pubKey, appVersion)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile())
|
||||
state.LastValidators = state.Validators.Copy()
|
||||
// mode = 0 for committing all the blocks
|
||||
@@ -1147,7 +1154,9 @@ func stateAndStore(
|
||||
pubKey crypto.PubKey,
|
||||
appVersion uint64) (dbm.DB, sm.State, *mockBlockStore) {
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile())
|
||||
state.Version.Consensus.App = appVersion
|
||||
store := newMockBlockStore(config, state.ConsensusParams)
|
||||
@@ -1181,7 +1190,6 @@ func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain
|
||||
func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block {
|
||||
return bs.chain[int64(len(bs.chain))-1]
|
||||
}
|
||||
func (bs *mockBlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { return nil }
|
||||
func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
|
||||
block := bs.chain[height-1]
|
||||
return &types.BlockMeta{
|
||||
@@ -1225,7 +1233,9 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
|
||||
pubKey, err := privVal.GetPubKey()
|
||||
require.NoError(t, err)
|
||||
stateDB, state, store := stateAndStore(config, pubKey, 0x0)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
|
||||
oldValAddr := state.Validators.Validators[0].Address
|
||||
|
||||
|
||||
@@ -978,7 +978,9 @@ func (cs *State) handleTxsAvailable() {
|
||||
// Used internally by handleTimeout and handleMsg to make state transitions
|
||||
|
||||
// Enter: `timeoutNewHeight` by startTime (commitTime+timeoutCommit),
|
||||
// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1)
|
||||
//
|
||||
// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1)
|
||||
//
|
||||
// Enter: `timeoutPrecommits` after any +2/3 precommits from (height,round-1)
|
||||
// Enter: +2/3 precommits for nil at (height,round-1)
|
||||
// Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round)
|
||||
@@ -1060,7 +1062,9 @@ func (cs *State) needProofBlock(height int64) bool {
|
||||
|
||||
// Enter (CreateEmptyBlocks): from enterNewRound(height,round)
|
||||
// Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ):
|
||||
// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval
|
||||
//
|
||||
// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval
|
||||
//
|
||||
// Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool
|
||||
func (cs *State) enterPropose(height int64, round int32) {
|
||||
logger := cs.Logger.With("height", height, "round", round)
|
||||
@@ -1964,7 +1968,7 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) {
|
||||
// If the vote height is off, we'll just ignore it,
|
||||
// But if it's a conflicting sig, add it to the cs.evpool.
|
||||
// If it's otherwise invalid, punish peer.
|
||||
// nolint: gocritic
|
||||
//nolint: gocritic
|
||||
if voteErr, ok := err.(*types.ErrVoteConflictingVotes); ok {
|
||||
if cs.privValidatorPubKey == nil {
|
||||
return false, errPubKeyIsNotSet
|
||||
|
||||
@@ -47,7 +47,9 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||
}
|
||||
blockStoreDB := db.NewMemDB()
|
||||
stateDB := blockStoreDB
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make genesis state: %w", err)
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"golang.org/x/crypto/openpgp/armor" // nolint: staticcheck
|
||||
"golang.org/x/crypto/openpgp/armor"
|
||||
)
|
||||
|
||||
func EncodeArmor(blockType string, headers map[string]string, data []byte) string {
|
||||
|
||||
@@ -12,20 +12,19 @@ second pre-image attacks. Hence, use this library with caution.
|
||||
Otherwise you might run into similar issues as, e.g., in early Bitcoin:
|
||||
https://bitcointalk.org/?topic=102395
|
||||
|
||||
*
|
||||
/ \
|
||||
/ \
|
||||
/ \
|
||||
/ \
|
||||
* *
|
||||
/ \ / \
|
||||
/ \ / \
|
||||
/ \ / \
|
||||
* * * h6
|
||||
/ \ / \ / \
|
||||
h0 h1 h2 h3 h4 h5
|
||||
*
|
||||
/ \
|
||||
/ \
|
||||
/ \
|
||||
/ \
|
||||
* *
|
||||
/ \ / \
|
||||
/ \ / \
|
||||
/ \ / \
|
||||
* * * h6
|
||||
/ \ / \ / \
|
||||
h0 h1 h2 h3 h4 h5
|
||||
|
||||
TODO(ismail): add 2nd pre-image protection or clarify further on how we use this and why this secure.
|
||||
|
||||
*/
|
||||
package merkle
|
||||
|
||||
@@ -85,8 +85,8 @@ func (op ValueOp) Run(args [][]byte) ([][]byte, error) {
|
||||
|
||||
bz := new(bytes.Buffer)
|
||||
// Wrap <op.Key, vhash> to hash the KVPair.
|
||||
encodeByteSlice(bz, op.key) // nolint: errcheck // does not error
|
||||
encodeByteSlice(bz, vhash) // nolint: errcheck // does not error
|
||||
encodeByteSlice(bz, op.key)
|
||||
encodeByteSlice(bz, vhash)
|
||||
kvhash := leafHash(bz.Bytes())
|
||||
|
||||
if !bytes.Equal(kvhash, op.Proof.LeafHash) {
|
||||
|
||||
@@ -47,10 +47,10 @@ func HashFromByteSlices(items [][]byte) []byte {
|
||||
//
|
||||
// These preliminary results suggest:
|
||||
//
|
||||
// 1. The performance of the HashFromByteSlice is pretty good
|
||||
// 2. Go has low overhead for recursive functions
|
||||
// 3. The performance of the HashFromByteSlice routine is dominated
|
||||
// by the actual hashing of data
|
||||
// 1. The performance of the HashFromByteSlice is pretty good
|
||||
// 2. Go has low overhead for recursive functions
|
||||
// 3. The performance of the HashFromByteSlice routine is dominated
|
||||
// by the actual hashing of data
|
||||
//
|
||||
// Although this work is in no way exhaustive, point #3 suggests that
|
||||
// optimization of this routine would need to take an alternative
|
||||
|
||||
@@ -9,13 +9,12 @@ import (
|
||||
"math/big"
|
||||
|
||||
secp256k1 "github.com/btcsuite/btcd/btcec"
|
||||
"golang.org/x/crypto/ripemd160" // nolint: staticcheck // necessary for Bitcoin address format
|
||||
"golang.org/x/crypto/ripemd160" //nolint: staticcheck // necessary for Bitcoin address format
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
)
|
||||
|
||||
//-------------------------------------
|
||||
const (
|
||||
PrivKeyName = "tendermint/PrivKeySecp256k1"
|
||||
PubKeyName = "tendermint/PubKeySecp256k1"
|
||||
@@ -124,8 +123,8 @@ func GenPrivKeySecp256k1(secret []byte) PrivKey {
|
||||
|
||||
// used to reject malleable signatures
|
||||
// see:
|
||||
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
|
||||
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39
|
||||
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
|
||||
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39
|
||||
var secp256k1halfN = new(big.Int).Rsh(secp256k1.S256().N, 1)
|
||||
|
||||
// Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg.
|
||||
|
||||
@@ -60,5 +60,6 @@ sections.
|
||||
- [RFC-020: Onboarding Projects](./rfc-020-onboarding-projects.rst)
|
||||
- [RFC-021: The Future of the Socket Protocol](./rfc-021-socket-protocol.md)
|
||||
- [RFC-023: Semi-permanent Testnet](./rfc-023-semi-permanent-testnet.md)
|
||||
- [RFC-024: Block Structure Consolidation](./rfc-024-block-structure-consolidation.md)
|
||||
|
||||
<!-- - [RFC-NNN: Title](./rfc-NNN-title.md) -->
|
||||
|
||||
364
docs/rfc/rfc-024-block-structure-consolidation.md
Normal file
364
docs/rfc/rfc-024-block-structure-consolidation.md
Normal file
@@ -0,0 +1,364 @@
|
||||
# RFC 024: Block Structure Consolidation
|
||||
|
||||
## Changelog
|
||||
|
||||
- 19-Apr-2022: Initial draft started (@williambanfield).
|
||||
- 3-May-2022: Initial draft complete (@williambanfield).
|
||||
|
||||
## Abstract
|
||||
|
||||
The `Block` data structure is a very central structure within Tendermint. Because
|
||||
of its centrality, it has gained several fields over the years through accretion.
|
||||
Not all of these fields may be necessary any more. This document examines which
|
||||
of these fields may no longer be necessary for inclusion in the block and makes
|
||||
recommendations about how to proceed with each of them.
|
||||
|
||||
## Background
|
||||
|
||||
The current block structure contains multiple fields that are not required for
|
||||
validation or execution of a Tendermint block. Some of these fields had vestigial
|
||||
purposes that they no longer serve and some of these fields exist as a result of
|
||||
internal Tendermint domain objects leaking out into the external data structure.
|
||||
|
||||
In so far as is possible, we should consolidate and prune these superfluous
|
||||
fields before releasing a 1.0 version of Tendermint. All pruning of these
|
||||
fields should be done with the aim of simplifying the structures to what
|
||||
is needed while preserving information that aids with debugging and that also
|
||||
allow external protocols to function more efficiently than if they were removed.
|
||||
|
||||
### Current Block Structure
|
||||
|
||||
The current block structures are included here to aid discussion.
|
||||
|
||||
```proto
|
||||
message Block {
|
||||
Header header = 1;
|
||||
Data data = 2;
|
||||
tendermint.types.EvidenceList evidence = 3;
|
||||
Commit last_commit = 4;
|
||||
}
|
||||
```
|
||||
|
||||
```proto
|
||||
message Header {
|
||||
tendermint.version.Consensus version = 1;
|
||||
string chain_id = 2;
|
||||
int64 height = 3;
|
||||
google.protobuf.Timestamp time = 4;
|
||||
BlockID last_block_id = 5;
|
||||
bytes last_commit_hash = 6;
|
||||
bytes data_hash = 7;
|
||||
bytes validators_hash = 8;
|
||||
bytes next_validators_hash = 9;
|
||||
bytes consensus_hash = 10;
|
||||
bytes app_hash = 11;
|
||||
bytes last_results_hash = 12;
|
||||
bytes evidence_hash = 13;
|
||||
bytes proposer_address = 14;
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
```proto
|
||||
message Data {
|
||||
repeated bytes txs = 1;
|
||||
}
|
||||
```
|
||||
|
||||
```proto
|
||||
message EvidenceList {
|
||||
repeated Evidence evidence = 1;
|
||||
}
|
||||
```
|
||||
|
||||
```proto
|
||||
message Commit {
|
||||
int64 height = 1;
|
||||
int32 round = 2;
|
||||
BlockID block_id = 3;
|
||||
repeated CommitSig signatures = 4;
|
||||
}
|
||||
```
|
||||
|
||||
```proto
|
||||
message CommitSig {
|
||||
BlockIDFlag block_id_flag = 1;
|
||||
bytes validator_address = 2;
|
||||
google.protobuf.Timestamp timestamp = 3;
|
||||
bytes signature = 4;
|
||||
}
|
||||
```
|
||||
|
||||
```proto
|
||||
message BlockID {
|
||||
bytes hash = 1;
|
||||
PartSetHeader part_set_header = 2;
|
||||
}
|
||||
```
|
||||
|
||||
### On Tendermint Blocks
|
||||
|
||||
#### What is a Tendermint 'Block'?
|
||||
|
||||
A block is the structure produced as the result of an instance of the Tendermint
|
||||
consensus algorithm. At its simplest, the 'block' can be represented as a Merkle
|
||||
root hash of all of the data used to construct and produce the hash. Our current
|
||||
block proto structure includes _far_ from all of the data used to produce the
|
||||
hashes included in the block.
|
||||
|
||||
It does not contain the full `AppState`, it does not contain the `ConsensusParams`,
|
||||
nor the `LastResults`, nor the `ValidatorSet`. Additionally, the layout of
|
||||
the block structure is not inherently tied to this Merkle root hash. Different
|
||||
layouts of the same set of data could trivially be used to construct the
|
||||
exact same hash. The thing we currently call the 'Block' is really just a view
|
||||
into a subset of the data used to construct the root hash. Sections of the
|
||||
structure can be modified as long as alternative methods exist to query and
|
||||
retrieve the constituent values.
|
||||
|
||||
#### Why this digression?
|
||||
|
||||
This digression is aimed at informing what it means to consolidate 'fields' in the
|
||||
'block'. The discussion of what should be included in the block can be teased
|
||||
apart into a few different lines of inquiry.
|
||||
|
||||
1. What values need to be included as part of the Merkle tree so that the
|
||||
consensus algorithm can use proof-of-stake consensus to validate all of the
|
||||
properties of the chain that we would like?
|
||||
2. How can we create views of the data that can be easily retrieved, stored, and
|
||||
verified by the relevant protocols?
|
||||
|
||||
These two concerns are intertwined at the moment as a result of how we store
|
||||
and propagate our data but they don't necessarily need to be. This document
|
||||
focuses primarily on the first concern by suggesting fields that can be
|
||||
completely removed without any loss in the function of our consensus algorithm.
|
||||
|
||||
This document also suggests ways that we may update our storage and propagation
|
||||
mechanisms to better take advantage of Merkle tree nature of our data although
|
||||
these are not its primary concern.
|
||||
|
||||
## Discussion
|
||||
|
||||
### Data to consider removing
|
||||
|
||||
This section proposes a list of data that could be completely removed from the
|
||||
Merkle tree with no loss to the functionality of our consensus algorithm.
|
||||
|
||||
Where the change is possible but would hamper external protocols or make
|
||||
debugging more difficult, that is noted in discussion.
|
||||
|
||||
#### CommitSig.Timestamp
|
||||
|
||||
This field contains the timestamp included in the precommit message that was
|
||||
issued for the block. The field was once used to produce the timestamp of the block.
|
||||
With the introduction of Proposer-Based Timestamps, This field is no longer used
|
||||
in any Tendermint algorithms and can be completely removed.
|
||||
|
||||
#### CommitSig.ValidatorAddress
|
||||
|
||||
The `ValidatorAddress` is included in each `CommitSig` structure. This field
|
||||
is hashed along with all of the other fields of the `CommitSig`s in the block
|
||||
to form the `LastCommitHash` field in the `Header`. The `ValidatorAddress` is
|
||||
somewhat redundant in the hash. Each validator has a unique position in the
|
||||
`CommitSig` and the hash is built preserving this order. Therefore, the
|
||||
information of which signature corresponds to which validator is included in
|
||||
the root hash, even if the address is absent.
|
||||
|
||||
It's worth noting that the validator address could still be included in the
|
||||
_hash_ even if it is absent from the `CommitSig` structure in the block by
|
||||
simply hashing it locally at each validator but not including it in the block.
|
||||
The reverse is also true. It would be perfectly possible to not include the
|
||||
`ValidatorAddress` data in the `LastCommitHash` but still include the field in
|
||||
the block.
|
||||
|
||||
#### BlockID.PartSetHeader
|
||||
|
||||
The [BlockID][block-id] field comprises the [PartSetHeader][part-set-header] and the hash of the block.
|
||||
The `PartSetHeader` is used by nodes to gossip the block by dividing it into
|
||||
parts. Nodes receive the `PartSetHeader` from their peers, informing them of
|
||||
what pieces of the block to gather. There is no strong reason to include this
|
||||
value in the block. Validators will still be able to gossip and validate the
|
||||
blocks that they received from their peers using this mechanism even if it is
|
||||
not written into the block. The `BlockID` can therefore be consolidated into
|
||||
just the hash of the block. This is by far the most uncontroversial change
|
||||
and there appears to be no good reason _not_ to do it. Further evidence that
|
||||
the field is not meaningful can be found in the fact that the field is not
|
||||
actually validated to ensure it is correct during block validation. Validation
|
||||
only checks that the [field is well formed][psh-check].
|
||||
|
||||
#### ChainID
|
||||
|
||||
The `ChainID` is a string selected by the chain operators, usually a
|
||||
human-readable name for the network. This value is immutable for the lifetime
|
||||
of the chain and is defined in the genesis file. It is therefore hashed into the
|
||||
original block and therefore transitively included as in the Merkle root hash of
|
||||
every block. The redundant field is a candidate for removal from the root hash
|
||||
of each block. However, aesthetically, it's somewhat nice to include in each
|
||||
block, as if the block was 'stamped' with the ID. Additionally, re-validating
|
||||
the value from genesis would be painful and require reconstituting potentially
|
||||
large chains. I'm therefore mildly in favor of maintaining this redundant
|
||||
piece of information. We pay almost no storage cost for maintaining this
|
||||
identical data, so the only cost is in the time required to hash it into the
|
||||
structure.
|
||||
|
||||
#### LastResultsHash
|
||||
|
||||
`LastResultsHash` is a hash covering the result of executing the transactions
|
||||
from the previous block. It covers the response `Code`, `Data`, `GasWanted`,
|
||||
and `GasUsed` with the aim of ensuring that execution of all of the transactions
|
||||
was performed identically on each node. The data covered by this field _should_
|
||||
be also reflected in the `AppHash`. The `AppHash` is provided by the application
|
||||
and should be deterministically calculated by each node. This field could
|
||||
therefore be removed on the grounds that its data is already reflected elsewhere.
|
||||
|
||||
I would advocate for keeping this field. This field provides an additional check
|
||||
for determinism across nodes. Logic to update the application hash is more
|
||||
complicated for developers to implement because it relies either on building a
|
||||
complete view of the state of the application data. The `Results` returned by
|
||||
the application contain simple response codes and deterministic data bytes.
|
||||
Leaving the field will allow for transaction execution issues that are not
|
||||
correctly reflected in the `AppHash` to be more completely diagnosed.
|
||||
|
||||
Take the case of mismatch of `LastResultsHash` between two nodes, A and B, where both
|
||||
nodes report divergent values. If `A` and `B` both report
|
||||
the same `AppHash`, then some non-deterministic behavior occurred that was not
|
||||
accurately reflected in the `AppHash`. The issue caused by this
|
||||
non-determinism may not show itself for several blocks, but catching the divergent
|
||||
state earlier will improve the chances that a chain is able to recover.
|
||||
|
||||
#### ValidatorsHash
|
||||
|
||||
Both `ValidatorsHash` and `NextValidatorsHash` are included in the block
|
||||
header. `Validatorshash` contains the hash of the [public key and voting power][val-hash]
|
||||
of each validator in the active set for the current block and `NextValidatorsHash`
|
||||
contains the same data but for the next height.
|
||||
|
||||
This data is effectively redundant. Having both values present in the block
|
||||
structure is helpful for light client verification. The light client is able to
|
||||
easily determine if two sequential blocks used the same validator set by querying
|
||||
only one header.
|
||||
|
||||
`ValidatorsHash` is also important to the light client algorithm for performing block
|
||||
validation. The light client uses this field to ensure that the validator set
|
||||
it fetched from a full node is correct. It can be sure of the correctness of
|
||||
the retrieved structure by hashing it and checking the hash against the `ValidatorsHash`
|
||||
of the block it is verifying. Because a validator that the light client trusts
|
||||
signed over the `ValidatorsHash`, it can be certain of the validity of the
|
||||
structure. Without this check, phony validator sets could be handed to the light
|
||||
client and the code tricked into believing a different validator set was present
|
||||
at a height, opening up a major hole in the light client security model.
|
||||
|
||||
This creates a recursive problem. To verify the validator set that signed the
|
||||
block at height `H`, what information do we need? We could fetch the
|
||||
`NextValidatorsHash` from height `H-1`, but how do we verify that that hash is correct?
|
||||
|
||||
#### ProposerAddress
|
||||
|
||||
The section below details a change to allow the `ProposerAddress` to be calculated
|
||||
from a field added to the block. This would allow the `Address` to be dropped
|
||||
from the block. Consumers of the chain could run the proposer selection [algorithm][proposer-selection]
|
||||
to determine who proposed each block.
|
||||
|
||||
I would advocate against this. Any consumer of the chain that wanted to
|
||||
know which validator proposed a block would have to run the proposer selection
|
||||
algorithm. This algorithm is not widely implemented, meaning that consumers
|
||||
in other languages would need to implement the algorithm to determine a piece
|
||||
of basic information about the chain.
|
||||
|
||||
### Data to consider adding
|
||||
|
||||
#### ProofOfLockRound
|
||||
|
||||
The *proof of lock round* is the round of consensus for a height in which the
|
||||
Tendermint algorithm observed a super majority of voting power on the network for
|
||||
a block.
|
||||
|
||||
Including this value in the block will allow validation of currently
|
||||
un-validated metadata. Specifically, including this value will allow Tendermint
|
||||
to validate that the `ProposerAddress` in the block is correct. Without knowing
|
||||
the locked round number, Tendermint cannot calculate which validator was supposed
|
||||
to propose a height. Because of this, our [validation logic][proposer-check] does not check that
|
||||
the `ProposerAddress` included in the block corresponds to the validator that
|
||||
proposed the height. Instead, the validation logic simply checks that the value
|
||||
is an address of one of the known validators.
|
||||
|
||||
Currently, we maintain the _committed round_ in the `Commit` for height `H`, which is
|
||||
written into the block at height `H+1`. This value corresponds to the round in
|
||||
which the proposer of height `H+1` received the commit for height `H`. The proof
|
||||
of lock round would not subsume this value.
|
||||
|
||||
### Additional possible updates
|
||||
|
||||
#### Updates to storage
|
||||
|
||||
Currently we store the [every piece of each block][save-block] in the `BlockStore`.
|
||||
I suspect that this has lead to some mistakes in reasoning around the merits of
|
||||
consolidating fields in the block. We could update the storage scheme we use to
|
||||
store only some pieces of each block and still achieve a space savings without having
|
||||
to change the block structure at all.
|
||||
|
||||
The main way to achieve this would be by _no longer saving data that does not change_.
|
||||
At each height we save a set of data that is unlikely to have changed from the
|
||||
previous height in the block structure, this includes the `ValidatorAddress`es,
|
||||
the `ValidatorsHash`, the `ChainID`. These do not need to be saved along with
|
||||
_each_ block. We could easily save the value and the height at which the value
|
||||
was updated and construct each block using the data that existed at the time.
|
||||
|
||||
This document does not make any specific recommendations around storage since
|
||||
that is likely to change with upcoming improvements to to the database infrastructure.
|
||||
However, it's important to note that removing fields from the block for the
|
||||
purposes of 'saving space' may not be that meaningful. We should instead focus
|
||||
our attention of removing fields from the block that are no longer needed
|
||||
for correct functioning of the protocol.
|
||||
|
||||
#### Updates to propagation
|
||||
|
||||
Block propagation suffers from the same issue that plagues block storage, we
|
||||
propagate all of the contents of each block proto _even when these contents are redundant
|
||||
or unchanged from previous blocks_. For example, we propagate the `ValidatorAddress`es
|
||||
for each block in the `CommitSig` structure even when it never changed from a
|
||||
previous height. We could achieve a speed-up in many cases by communicating the
|
||||
hashes _first_ and letting peers request additional information when they do not
|
||||
recognize the communicated hash.
|
||||
|
||||
For example, in the case of the `ValidatorAddress`es, the node would first
|
||||
communicate the `ValidatorsHash` of the block to its peers. The peers would
|
||||
check their storage for a validator set matching the provided hash. If the peer
|
||||
has a matching set, it would populate its local block structure with the
|
||||
appropriate values from its store. If peer did not have a matching set, it would
|
||||
issue a request to its peers, either via P2P or RPC for the data it did not have.
|
||||
|
||||
Conceptually, this is very similar to how content addressing works in protocols
|
||||
such as git where pushing a commit does not require pushing the entire contents
|
||||
of the tree referenced by the commit.
|
||||
|
||||
### Impact on light clients
|
||||
|
||||
As outlined in the section [On Tendermint Blocks](#on-tendermint-blocks), there
|
||||
is a distinction between what data is referenced in the Merkle root hash and the
|
||||
contents of the proto structure we currently call the `Block`.
|
||||
|
||||
Any changes to the Merkle root hash will necessarily be breaking for legacy light clients.
|
||||
Either a soft-upgrades scheme will need to be implemented or a hard fork will
|
||||
be required for chains and light clients to function with the new hashes.
|
||||
This means that all of the additions and deletions from the Merkle root hash
|
||||
proposed by this document will be light client breaking.
|
||||
|
||||
Changes to the block structure alone are not necessarily light client breaking if the
|
||||
data being hashed are identical and legacy views into the data are provided
|
||||
for old light clients during transitions. For example, a newer version of the
|
||||
block structure could move the `ValidatorAddress` field to a different field
|
||||
in the block while still including it in the hashed data of the `LastCommitHash`.
|
||||
As long as old light clients could still fetch the old data structure, then
|
||||
this would not be light client breaking.
|
||||
|
||||
## References
|
||||
|
||||
[light-verify-trusting]: https://github.com/tendermint/tendermint/blob/208a15dadf01e4e493c187d8c04a55a61758c3cc/types/validation.go#L124
|
||||
[part-set-header]: https://github.com/tendermint/tendermint/blob/208a15dadf01e4e493c187d8c04a55a61758c3cc/types/part_set.go#L94
|
||||
[block-id]: https://github.com/tendermint/tendermint/blob/208a15dadf01e4e493c187d8c04a55a61758c3cc/types/block.go#L1090
|
||||
[psh-check]: https://github.com/tendermint/tendermint/blob/208a15dadf01e4e493c187d8c04a55a61758c3cc/types/part_set.go#L116
|
||||
[proposer-selection]: https://github.com/tendermint/tendermint/blob/208a15dadf01e4e493c187d8c04a55a61758c3cc/spec/consensus/proposer-selection.md
|
||||
[chain-experiment]: https://github.com/williambanfield/tmtools/blob/master/hash-changes/RUN.txt
|
||||
[val-hash]: https://github.com/tendermint/tendermint/blob/29e5fbcc648510e4763bd0af0b461aed92c21f30/types/validator.go#L160
|
||||
[proposer-check]: https://github.com/tendermint/tendermint/blob/29e5fbcc648510e4763bd0af0b461aed92c21f30/internal/state/validation.go#L102
|
||||
[save-block]: https://github.com/tendermint/tendermint/blob/59f0236b845c83009bffa62ed44053b04370b8a9/internal/store/store.go#L490
|
||||
@@ -17,7 +17,7 @@ This section dives into the internals of Tendermint the implementation.
|
||||
- [Subscribing to events](./subscription.md)
|
||||
- [Block Structure](./block-structure.md)
|
||||
- [RPC](./rpc.md)
|
||||
- [Fast Sync](./fast-sync.md)
|
||||
- [Block Sync](./block-sync.md)
|
||||
- [State Sync](./state-sync.md)
|
||||
- [Mempool](./mempool.md)
|
||||
- [Light Client](./light-client.md)
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
order: 10
|
||||
---
|
||||
|
||||
# Fast Sync
|
||||
# Block Sync
|
||||
*Formerly known as Fast Sync*
|
||||
|
||||
In a proof of work blockchain, syncing with the chain is the same
|
||||
process as staying up-to-date with the consensus: download blocks, and
|
||||
@@ -14,35 +15,37 @@ scratch can take a very long time. It's much faster to just download
|
||||
blocks and check the merkle tree of validators than to run the real-time
|
||||
consensus gossip protocol.
|
||||
|
||||
## Using Fast Sync
|
||||
## Using Block Sync
|
||||
|
||||
To support faster syncing, Tendermint offers a `fast-sync` mode, which
|
||||
To support faster syncing, Tendermint offers a `block-sync` mode, which
|
||||
is enabled by default, and can be toggled in the `config.toml` or via
|
||||
`--fast_sync=false`.
|
||||
`--block_sync=false`.
|
||||
|
||||
In this mode, the Tendermint daemon will sync hundreds of times faster
|
||||
than if it used the real-time consensus process. Once caught up, the
|
||||
daemon will switch out of fast sync and into the normal consensus mode.
|
||||
daemon will switch out of Block Sync and into the normal consensus mode.
|
||||
After running for some time, the node is considered `caught up` if it
|
||||
has at least one peer and it's height is at least as high as the max
|
||||
reported peer height. See [the IsCaughtUp
|
||||
method](https://github.com/tendermint/tendermint/blob/b467515719e686e4678e6da4e102f32a491b85a0/blockchain/pool.go#L128).
|
||||
|
||||
Note: There are three versions of fast sync. We recommend using v0 as v1 and v2 are still in beta.
|
||||
If you would like to use a different version you can do so by changing the version in the `config.toml`:
|
||||
Note: While there have historically been multiple versions of blocksync, v0, v1, and v2, all versions
|
||||
other than v0 have been deprecated in favor of the simplest and most well understood algorithm.
|
||||
|
||||
```toml
|
||||
#######################################################
|
||||
### Fast Sync Configuration Connections ###
|
||||
### Block Sync Configuration Options ###
|
||||
#######################################################
|
||||
[fastsync]
|
||||
[blocksync]
|
||||
|
||||
# Fast Sync version to use:
|
||||
# 1) "v0" (default) - the legacy fast sync implementation
|
||||
# 2) "v1" - refactor of v0 version for better testability
|
||||
# 2) "v2" - complete redesign of v0, optimized for testability & readability
|
||||
# Block Sync version to use:
|
||||
#
|
||||
# In v0.37, v1 and v2 of the block sync protocols were deprecated.
|
||||
# Please use v0 instead.
|
||||
#
|
||||
# 1) "v0" - the default block sync implementation
|
||||
version = "v0"
|
||||
```
|
||||
|
||||
If we're lagging sufficiently, we should go back to fast syncing, but
|
||||
If we're lagging sufficiently, we should go back to block syncing, but
|
||||
this is an [open issue](https://github.com/tendermint/tendermint/issues/129).
|
||||
@@ -36,10 +36,10 @@ proxy_app = "tcp://127.0.0.1:26658"
|
||||
# A custom human readable name for this node
|
||||
moniker = "anonymous"
|
||||
|
||||
# If this node is many blocks behind the tip of the chain, FastSync
|
||||
# If this node is many blocks behind the tip of the chain, BlockSync
|
||||
# allows them to catchup quickly by downloading blocks in parallel
|
||||
# and verifying their commits
|
||||
fast_sync = true
|
||||
block_sync = true
|
||||
|
||||
# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb
|
||||
# * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
|
||||
@@ -319,16 +319,16 @@ trust_period = "0s"
|
||||
temp_dir = ""
|
||||
|
||||
#######################################################
|
||||
### Fast Sync Configuration Connections ###
|
||||
### Block Sync Configuration Options ###
|
||||
#######################################################
|
||||
[fastsync]
|
||||
[blocksync]
|
||||
|
||||
# Fast Sync version to use:
|
||||
#
|
||||
# In v0.37, the v1 and v2 fast sync protocols were deprecated.
|
||||
# Block Sync version to use:
|
||||
#
|
||||
# In v0.37, v1 and v2 of the block sync protocols were deprecated.
|
||||
# Please use v0 instead.
|
||||
#
|
||||
# 1) "v0" (default) - the legacy fast sync implementation
|
||||
# 1) "v0" - the default block sync implementation
|
||||
version = "v0"
|
||||
|
||||
#######################################################
|
||||
|
||||
@@ -37,7 +37,7 @@ The following metrics are available:
|
||||
| consensus_total_txs | Gauge | | Total number of transactions committed |
|
||||
| consensus_block_parts | counter | peer_id | number of blockparts transmitted by peer |
|
||||
| consensus_latest_block_height | gauge | | /status sync_info number |
|
||||
| consensus_fast_syncing | gauge | | either 0 (not fast syncing) or 1 (syncing) |
|
||||
| consensus_block_syncing | gauge | | either 0 (not block syncing) or 1 (syncing) |
|
||||
| consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) |
|
||||
| consensus_block_size_bytes | Gauge | | Block size in bytes |
|
||||
| consensus_step_duration | Histogram | step | Histogram of durations for each step in the consensus protocol |
|
||||
|
||||
@@ -4,7 +4,7 @@ order: 11
|
||||
|
||||
# State Sync
|
||||
|
||||
With fast sync a node is downloading all of the data of an application from genesis and verifying it.
|
||||
With block sync a node is downloading all of the data of an application from genesis and verifying it.
|
||||
With state sync your node will download data related to the head or near the head of the chain and verify the data.
|
||||
This leads to drastically shorter times for joining a network.
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ Package evidence handles all evidence storage and gossiping from detection to bl
|
||||
For the different types of evidence refer to the `evidence.go` file in the types package
|
||||
or https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md.
|
||||
|
||||
Gossiping
|
||||
# Gossiping
|
||||
|
||||
The core functionality begins with the evidence reactor (see reactor.
|
||||
go) which operates both the sending and receiving of evidence.
|
||||
@@ -29,7 +29,7 @@ There are two buckets that evidence can be stored in: Pending & Committed.
|
||||
|
||||
All evidence is proto encoded to disk.
|
||||
|
||||
Proposing
|
||||
# Proposing
|
||||
|
||||
When a new block is being proposed (in state/execution.go#CreateProposalBlock),
|
||||
`PendingEvidence(maxBytes)` is called to send up to the maxBytes of uncommitted evidence, from the evidence store,
|
||||
@@ -42,12 +42,11 @@ Once the proposed evidence is submitted,
|
||||
the evidence is marked as committed and is moved from the broadcasted set to the committed set.
|
||||
As a result it is also removed from the concurrent list so that it is no longer gossiped.
|
||||
|
||||
Minor Functionality
|
||||
# Minor Functionality
|
||||
|
||||
As all evidence (including POLC's) are bounded by an expiration date, those that exceed this are no longer needed
|
||||
and hence pruned. Currently, only committed evidence in which a marker to the height that the evidence was committed
|
||||
and hence very small is saved. All updates are made from the `Update(block, state)` function which should be called
|
||||
when a new block is committed.
|
||||
|
||||
*/
|
||||
package evidence
|
||||
|
||||
@@ -97,11 +97,11 @@ func (evpool *Pool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64) {
|
||||
|
||||
// Update takes both the new state and the evidence committed at that height and performs
|
||||
// the following operations:
|
||||
// 1. Take any conflicting votes from consensus and use the state's LastBlockTime to form
|
||||
// DuplicateVoteEvidence and add it to the pool.
|
||||
// 2. Update the pool's state which contains evidence params relating to expiry.
|
||||
// 3. Moves pending evidence that has now been committed into the committed pool.
|
||||
// 4. Removes any expired evidence based on both height and time.
|
||||
// 1. Take any conflicting votes from consensus and use the state's LastBlockTime to form
|
||||
// DuplicateVoteEvidence and add it to the pool.
|
||||
// 2. Update the pool's state which contains evidence params relating to expiry.
|
||||
// 3. Moves pending evidence that has now been committed into the committed pool.
|
||||
// 4. Removes any expired evidence based on both height and time.
|
||||
func (evpool *Pool) Update(state sm.State, ev types.EvidenceList) {
|
||||
// sanity check
|
||||
if state.LastBlockHeight <= evpool.state.LastBlockHeight {
|
||||
|
||||
@@ -348,7 +348,9 @@ func TestRecoverPendingEvidence(t *testing.T) {
|
||||
|
||||
func initializeStateFromValidatorSet(valSet *types.ValidatorSet, height int64) sm.Store {
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state := sm.State{
|
||||
ChainID: evidenceChainID,
|
||||
InitialHeight: 1,
|
||||
|
||||
@@ -367,7 +367,6 @@ func exampleVote(t byte) *types.Vote {
|
||||
}
|
||||
}
|
||||
|
||||
// nolint:lll //ignore line length for tests
|
||||
func TestEvidenceVectors(t *testing.T) {
|
||||
|
||||
val := &types.Validator{
|
||||
|
||||
@@ -102,13 +102,13 @@ func (evpool *Pool) verify(evidence types.Evidence) error {
|
||||
|
||||
// VerifyLightClientAttack verifies LightClientAttackEvidence against the state of the full node. This involves
|
||||
// the following checks:
|
||||
// - the common header from the full node has at least 1/3 voting power which is also present in
|
||||
// the conflicting header's commit
|
||||
// - 2/3+ of the conflicting validator set correctly signed the conflicting block
|
||||
// - the nodes trusted header at the same height as the conflicting header has a different hash
|
||||
//
|
||||
// - the common header from the full node has at least 1/3 voting power which is also present in
|
||||
// the conflicting header's commit
|
||||
// - 2/3+ of the conflicting validator set correctly signed the conflicting block
|
||||
// - the nodes trusted header at the same height as the conflicting header has a different hash
|
||||
|
||||
// CONTRACT: must run ValidateBasic() on the evidence before verifying
|
||||
// must check that the evidence has not expired (i.e. is outside the maximum age threshold)
|
||||
// must check that the evidence has not expired (i.e. is outside the maximum age threshold)
|
||||
func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, trustedHeader *types.SignedHeader,
|
||||
commonVals *types.ValidatorSet, now time.Time, trustPeriod time.Duration) error {
|
||||
// In the case of lunatic attack there will be a different commonHeader height. Therefore the node perform a single
|
||||
@@ -154,10 +154,10 @@ func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, t
|
||||
|
||||
// VerifyDuplicateVote verifies DuplicateVoteEvidence against the state of full node. This involves the
|
||||
// following checks:
|
||||
// - the validator is in the validator set at the height of the evidence
|
||||
// - the height, round, type and validator address of the votes must be the same
|
||||
// - the block ID's must be different
|
||||
// - The signatures must both be valid
|
||||
// - the validator is in the validator set at the height of the evidence
|
||||
// - the height, round, type and validator address of the votes must be the same
|
||||
// - the block ID's must be different
|
||||
// - The signatures must both be valid
|
||||
func VerifyDuplicateVote(e *types.DuplicateVoteEvidence, chainID string, valSet *types.ValidatorSet) error {
|
||||
_, val := valSet.GetByAddress(e.VoteA.ValidatorAddress)
|
||||
if val == nil {
|
||||
|
||||
4
go.mod
4
go.mod
@@ -268,4 +268,6 @@ require (
|
||||
mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 // indirect
|
||||
)
|
||||
|
||||
retract [v0.35.0, v0.35.9] // See https://github.com/tendermint/tendermint/discussions/9155
|
||||
retract (
|
||||
[v0.35.0,v0.35.9] // See https://github.com/tendermint/tendermint/discussions/9155
|
||||
)
|
||||
|
||||
1
go.sum
1
go.sum
@@ -1031,6 +1031,7 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
|
||||
@@ -17,7 +17,8 @@ const (
|
||||
// all other modules).
|
||||
//
|
||||
// Example:
|
||||
// ParseLogLevel("consensus:debug,mempool:debug,*:error", log.NewTMLogger(os.Stdout), "info")
|
||||
//
|
||||
// ParseLogLevel("consensus:debug,mempool:debug,*:error", log.NewTMLogger(os.Stdout), "info")
|
||||
func ParseLogLevel(lvl string, logger log.Logger, defaultLogLevelValue string) (log.Logger, error) {
|
||||
if lvl == "" {
|
||||
return nil, errors.New("empty log level")
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
const MaxLength = int(^uint(0) >> 1)
|
||||
|
||||
/*
|
||||
|
||||
CElement is an element of a linked-list
|
||||
Traversal from a CElement is goroutine-safe.
|
||||
|
||||
@@ -41,7 +40,6 @@ the for-loop. Use sync.Cond when you need serial access to the
|
||||
"condition". In our case our condition is if `next != nil || removed`,
|
||||
and there's no reason to serialize that condition for goroutines
|
||||
waiting on NextWait() (since it's just a read operation).
|
||||
|
||||
*/
|
||||
type CElement struct {
|
||||
mtx tmsync.RWMutex
|
||||
|
||||
@@ -68,7 +68,6 @@ func TestSmall(t *testing.T) {
|
||||
|
||||
// This test is quite hacky because it relies on SetFinalizer
|
||||
// which isn't guaranteed to run at all.
|
||||
//nolint:unused,deadcode
|
||||
func _TestGCFifo(t *testing.T) {
|
||||
if runtime.GOARCH != "amd64" {
|
||||
t.Skipf("Skipping on non-amd64 machine")
|
||||
@@ -117,6 +116,7 @@ func _TestGCFifo(t *testing.T) {
|
||||
|
||||
// This test is quite hacky because it relies on SetFinalizer
|
||||
// which isn't guaranteed to run at all.
|
||||
//
|
||||
//nolint:unused,deadcode
|
||||
func _TestGCRandom(t *testing.T) {
|
||||
if runtime.GOARCH != "amd64" {
|
||||
|
||||
@@ -39,10 +39,10 @@ type Monitor struct {
|
||||
// weight of each sample in the exponential moving average (EMA) calculation.
|
||||
// The exact formulas are:
|
||||
//
|
||||
// sampleTime = currentTime - prevSampleTime
|
||||
// sampleRate = byteCount / sampleTime
|
||||
// weight = 1 - exp(-sampleTime/windowSize)
|
||||
// newRate = weight*sampleRate + (1-weight)*oldRate
|
||||
// sampleTime = currentTime - prevSampleTime
|
||||
// sampleRate = byteCount / sampleTime
|
||||
// weight = 1 - exp(-sampleTime/windowSize)
|
||||
// newRate = weight*sampleRate + (1-weight)*oldRate
|
||||
//
|
||||
// The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s,
|
||||
// respectively.
|
||||
|
||||
@@ -13,12 +13,12 @@
|
||||
// compatibility with e.g. Javascript (which uses 64-bit floats for numbers, having 53-bit
|
||||
// precision):
|
||||
//
|
||||
// int32(32) // Output: 32
|
||||
// uint32(32) // Output: 32
|
||||
// int64(64) // Output: "64"
|
||||
// uint64(64) // Output: "64"
|
||||
// int(64) // Output: "64"
|
||||
// uint(64) // Output: "64"
|
||||
// int32(32) // Output: 32
|
||||
// uint32(32) // Output: 32
|
||||
// int64(64) // Output: "64"
|
||||
// uint64(64) // Output: "64"
|
||||
// int(64) // Output: "64"
|
||||
// uint(64) // Output: "64"
|
||||
//
|
||||
// Encoding of other scalars follows encoding/json:
|
||||
//
|
||||
@@ -50,7 +50,7 @@
|
||||
// Times are encoded as encoding/json, in RFC3339Nano format, but requiring UTC time zone (with zero
|
||||
// times emitted as "0001-01-01T00:00:00Z" as with encoding/json):
|
||||
//
|
||||
// time.Date(2020, 6, 8, 16, 21, 28, 123, time.FixedZone("UTC+2", 2*60*60))
|
||||
// time.Date(2020, 6, 8, 16, 21, 28, 123, time.FixedZone("UTC+2", 2*60*60))
|
||||
// // Output: "2020-06-08T14:21:28.000000123Z"
|
||||
// time.Time{} // Output: "0001-01-01T00:00:00Z"
|
||||
// (*time.Time)(nil) // Output: null
|
||||
@@ -95,5 +95,4 @@
|
||||
//
|
||||
// Struct{Car: &Car{Wheels: 4}, Vehicle: &Car{Wheels: 4}}
|
||||
// // Output: {"Car": {"Wheels: 4"}, "Vehicle": {"type":"vehicle/car","value":{"Wheels":4}}}
|
||||
//
|
||||
package json
|
||||
|
||||
@@ -69,18 +69,19 @@ func (l *filter) Error(msg string, keyvals ...interface{}) {
|
||||
// Allow*With methods, it is used as the logger's level.
|
||||
//
|
||||
// Examples:
|
||||
// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto"))
|
||||
// logger.With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto"
|
||||
//
|
||||
// logger = log.NewFilter(logger, log.AllowError(),
|
||||
// log.AllowInfoWith("module", "crypto"),
|
||||
// log.AllowNoneWith("user", "Sam"))
|
||||
// logger.With("module", "crypto", "user", "Sam").Info("Hello") # returns nil
|
||||
// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto"))
|
||||
// logger.With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto"
|
||||
//
|
||||
// logger = log.NewFilter(logger,
|
||||
// log.AllowError(),
|
||||
// log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam"))
|
||||
// logger.With("user", "Sam").With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto user=Sam"
|
||||
// logger = log.NewFilter(logger, log.AllowError(),
|
||||
// log.AllowInfoWith("module", "crypto"),
|
||||
// log.AllowNoneWith("user", "Sam"))
|
||||
// logger.With("module", "crypto", "user", "Sam").Info("Hello") # returns nil
|
||||
//
|
||||
// logger = log.NewFilter(logger,
|
||||
// log.AllowError(),
|
||||
// log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam"))
|
||||
// logger.With("user", "Sam").With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto user=Sam"
|
||||
func (l *filter) With(keyvals ...interface{}) Logger {
|
||||
keyInAllowedKeyvals := false
|
||||
|
||||
|
||||
@@ -22,9 +22,9 @@ type Logger interface {
|
||||
//
|
||||
// If w implements the following interface, so does the returned writer.
|
||||
//
|
||||
// interface {
|
||||
// Fd() uintptr
|
||||
// }
|
||||
// interface {
|
||||
// Fd() uintptr
|
||||
// }
|
||||
func NewSyncWriter(w io.Writer) io.Writer {
|
||||
return kitlog.NewSyncWriter(w)
|
||||
}
|
||||
|
||||
@@ -65,7 +65,7 @@ func (l tmfmtLogger) Log(keyvals ...interface{}) error {
|
||||
switch keyvals[i] {
|
||||
case kitlevel.Key():
|
||||
excludeIndexes = append(excludeIndexes, i)
|
||||
switch keyvals[i+1].(type) { // nolint:gocritic
|
||||
switch keyvals[i+1].(type) { //nolint:gocritic
|
||||
case string:
|
||||
lvl = keyvals[i+1].(string)
|
||||
case kitlevel.Value:
|
||||
|
||||
@@ -83,7 +83,6 @@ func benchmarkRunnerKitlog(b *testing.B, logger kitlog.Logger, f func(kitlog.Log
|
||||
}
|
||||
}
|
||||
|
||||
//nolint: errcheck // ignore errors
|
||||
var (
|
||||
baseMessage = func(logger kitlog.Logger) { logger.Log("foo_key", "foo_value") }
|
||||
withMessage = func(logger kitlog.Logger) { kitlog.With(logger, "a", "b").Log("d", "f") }
|
||||
|
||||
@@ -12,26 +12,25 @@
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// q, err := query.New("account.name='John'")
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Second)
|
||||
// defer cancel()
|
||||
// subscription, err := pubsub.Subscribe(ctx, "johns-transactions", q)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// for {
|
||||
// select {
|
||||
// case msg <- subscription.Out():
|
||||
// // handle msg.Data() and msg.Events()
|
||||
// case <-subscription.Cancelled():
|
||||
// return subscription.Err()
|
||||
// }
|
||||
// }
|
||||
// q, err := query.New("account.name='John'")
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Second)
|
||||
// defer cancel()
|
||||
// subscription, err := pubsub.Subscribe(ctx, "johns-transactions", q)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// for {
|
||||
// select {
|
||||
// case msg <- subscription.Out():
|
||||
// // handle msg.Data() and msg.Events()
|
||||
// case <-subscription.Cancelled():
|
||||
// return subscription.Err()
|
||||
// }
|
||||
// }
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package query provides a parser for a custom query format:
|
||||
//
|
||||
// abci.invoice.number=22 AND abci.invoice.owner=Ivan
|
||||
// abci.invoice.number=22 AND abci.invoice.owner=Ivan
|
||||
//
|
||||
// See query.peg for the grammar, which is a https://en.wikipedia.org/wiki/Parsing_expression_grammar.
|
||||
// More: https://github.com/PhilippeSigaud/Pegged/wiki/PEG-Basics
|
||||
|
||||
@@ -43,7 +43,6 @@ func (s *Subscription) Out() <-chan Message {
|
||||
return s.out
|
||||
}
|
||||
|
||||
// nolint: misspell
|
||||
// Cancelled returns a channel that's closed when the subscription is
|
||||
// terminated and supposed to be used in a select statement.
|
||||
func (s *Subscription) Cancelled() <-chan struct{} {
|
||||
@@ -54,7 +53,8 @@ func (s *Subscription) Cancelled() <-chan struct{} {
|
||||
// If the channel is closed, Err returns a non-nil error explaining why:
|
||||
// - ErrUnsubscribed if the subscriber choose to unsubscribe,
|
||||
// - ErrOutOfCapacity if the subscriber is not pulling messages fast enough
|
||||
// and the channel returned by Out became full,
|
||||
// and the channel returned by Out became full,
|
||||
//
|
||||
// After Err returns a non-nil error, successive calls to Err return the same
|
||||
// error.
|
||||
func (s *Subscription) Err() error {
|
||||
|
||||
@@ -48,7 +48,7 @@ func (r *Rand) init() {
|
||||
}
|
||||
|
||||
func (r *Rand) reset(seed int64) {
|
||||
r.rand = mrand.New(mrand.NewSource(seed)) // nolint:gosec // G404: Use of weak random number generator
|
||||
r.rand = mrand.New(mrand.NewSource(seed)) //nolint:gosec // G404: Use of weak random number generator
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
@@ -284,16 +284,16 @@ func (c *Client) restoreTrustedLightBlock() error {
|
||||
|
||||
// if options.Height:
|
||||
//
|
||||
// 1) ahead of trustedLightBlock.Height => fetch light blocks (same height as
|
||||
// 1) ahead of trustedLightBlock.Height => fetch light blocks (same height as
|
||||
// trustedLightBlock) from primary provider and check it's hash matches the
|
||||
// trustedLightBlock's hash (if not, remove trustedLightBlock and all the light blocks
|
||||
// before)
|
||||
//
|
||||
// 2) equals trustedLightBlock.Height => check options.Hash matches the
|
||||
// 2) equals trustedLightBlock.Height => check options.Hash matches the
|
||||
// trustedLightBlock's hash (if not, remove trustedLightBlock and all the light blocks
|
||||
// before)
|
||||
//
|
||||
// 3) behind trustedLightBlock.Height => remove all the light blocks between
|
||||
// 3) behind trustedLightBlock.Height => remove all the light blocks between
|
||||
// options.Height and trustedLightBlock.Height, update trustedLightBlock, then
|
||||
// check options.Hash matches the trustedLightBlock's hash (if not, remove
|
||||
// trustedLightBlock and all the light blocks before)
|
||||
@@ -395,10 +395,10 @@ func (c *Client) initializeWithTrustOptions(ctx context.Context, options TrustOp
|
||||
// TrustedLightBlock returns a trusted light block at the given height (0 - the latest).
|
||||
//
|
||||
// It returns an error if:
|
||||
// - there are some issues with the trusted store, although that should not
|
||||
// happen normally;
|
||||
// - negative height is passed;
|
||||
// - header has not been verified yet and is therefore not in the store
|
||||
// - there are some issues with the trusted store, although that should not
|
||||
// happen normally;
|
||||
// - negative height is passed;
|
||||
// - header has not been verified yet and is therefore not in the store
|
||||
//
|
||||
// Safe for concurrent use by multiple goroutines.
|
||||
func (c *Client) TrustedLightBlock(height int64) (*types.LightBlock, error) {
|
||||
@@ -510,8 +510,9 @@ func (c *Client) VerifyLightBlockAtHeight(ctx context.Context, height int64, now
|
||||
//
|
||||
// If the header, which is older than the currently trusted header, is
|
||||
// requested and the light client does not have it, VerifyHeader will perform:
|
||||
// a) verifySkipping verification if nearest trusted header is found & not expired
|
||||
// b) backwards verification in all other cases
|
||||
//
|
||||
// a) verifySkipping verification if nearest trusted header is found & not expired
|
||||
// b) backwards verification in all other cases
|
||||
//
|
||||
// It returns ErrOldHeaderExpired if the latest trusted header expired.
|
||||
//
|
||||
@@ -980,12 +981,12 @@ func (c *Client) backwards(
|
||||
// lightBlockFromPrimary retrieves the lightBlock from the primary provider
|
||||
// at the specified height. This method also handles provider behavior as follows:
|
||||
//
|
||||
// 1. If the provider does not respond or does not have the block, it tries again
|
||||
// with a different provider
|
||||
// 2. If all providers return the same error, the light client forwards the error to
|
||||
// where the initial request came from
|
||||
// 3. If the provider provides an invalid light block, is deemed unreliable or returns
|
||||
// any other error, the primary is permanently dropped and is replaced by a witness.
|
||||
// 1. If the provider does not respond or does not have the block, it tries again
|
||||
// with a different provider
|
||||
// 2. If all providers return the same error, the light client forwards the error to
|
||||
// where the initial request came from
|
||||
// 3. If the provider provides an invalid light block, is deemed unreliable or returns
|
||||
// any other error, the primary is permanently dropped and is replaced by a witness.
|
||||
func (c *Client) lightBlockFromPrimary(ctx context.Context, height int64) (*types.LightBlock, error) {
|
||||
c.providerMutex.Lock()
|
||||
l, err := c.primary.LightBlock(ctx, height)
|
||||
|
||||
@@ -109,7 +109,9 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig
|
||||
//
|
||||
// 1: errConflictingHeaders -> there may have been an attack on this light client
|
||||
// 2: errBadWitness -> the witness has either not responded, doesn't have the header or has given us an invalid one
|
||||
// Note: In the case of an invalid header we remove the witness
|
||||
//
|
||||
// Note: In the case of an invalid header we remove the witness
|
||||
//
|
||||
// 3: nil -> the hashes of the two headers match
|
||||
func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan error, h *types.SignedHeader,
|
||||
witness provider.Provider, witnessIndex int) {
|
||||
@@ -275,16 +277,16 @@ func (c *Client) handleConflictingHeaders(
|
||||
// it has received from another and preforms verifySkipping at the heights of each of the intermediate
|
||||
// headers in the trace until it reaches the divergentHeader. 1 of 2 things can happen.
|
||||
//
|
||||
// 1. The light client verifies a header that is different to the intermediate header in the trace. This
|
||||
// is the bifurcation point and the light client can create evidence from it
|
||||
// 2. The source stops responding, doesn't have the block or sends an invalid header in which case we
|
||||
// return the error and remove the witness
|
||||
// 1. The light client verifies a header that is different to the intermediate header in the trace. This
|
||||
// is the bifurcation point and the light client can create evidence from it
|
||||
// 2. The source stops responding, doesn't have the block or sends an invalid header in which case we
|
||||
// return the error and remove the witness
|
||||
//
|
||||
// CONTRACT:
|
||||
// 1. Trace can not be empty len(trace) > 0
|
||||
// 2. The last block in the trace can not be of a lower height than the target block
|
||||
// trace[len(trace)-1].Height >= targetBlock.Height
|
||||
// 3. The
|
||||
// 1. Trace can not be empty len(trace) > 0
|
||||
// 2. The last block in the trace can not be of a lower height than the target block
|
||||
// trace[len(trace)-1].Height >= targetBlock.Height
|
||||
// 3. The
|
||||
func (c *Client) examineConflictingHeaderAgainstTrace(
|
||||
ctx context.Context,
|
||||
trace []*types.LightBlock,
|
||||
|
||||
46
light/doc.go
46
light/doc.go
@@ -63,31 +63,31 @@ This package provides three major things:
|
||||
|
||||
Example usage:
|
||||
|
||||
db, err := dbm.NewGoLevelDB("light-client-db", dbDir)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
db, err := dbm.NewGoLevelDB("light-client-db", dbDir)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
|
||||
c, err := NewHTTPClient(
|
||||
chainID,
|
||||
TrustOptions{
|
||||
Period: 504 * time.Hour, // 21 days
|
||||
Height: 100,
|
||||
Hash: header.Hash(),
|
||||
},
|
||||
"http://localhost:26657",
|
||||
[]string{"http://witness1:26657"},
|
||||
dbs.New(db, ""),
|
||||
)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
c, err := NewHTTPClient(
|
||||
chainID,
|
||||
TrustOptions{
|
||||
Period: 504 * time.Hour, // 21 days
|
||||
Height: 100,
|
||||
Hash: header.Hash(),
|
||||
},
|
||||
"http://localhost:26657",
|
||||
[]string{"http://witness1:26657"},
|
||||
dbs.New(db, ""),
|
||||
)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
|
||||
h, err := c.TrustedHeader(100)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
fmt.Println("header", h)
|
||||
h, err := c.TrustedHeader(100)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
fmt.Println("header", h)
|
||||
|
||||
Check out other examples in example_test.go
|
||||
|
||||
|
||||
@@ -216,6 +216,6 @@ func validateHeight(height int64) (*int64, error) {
|
||||
// exponential backoff (with jitter)
|
||||
// 0.5s -> 2s -> 4.5s -> 8s -> 12.5 with 1s variation
|
||||
func backoffTimeout(attempt uint16) time.Duration {
|
||||
// nolint:gosec // G404: Use of weak random number generator
|
||||
//nolint:gosec // G404: Use of weak random number generator
|
||||
return time.Duration(500*attempt*attempt)*time.Millisecond + time.Duration(rand.Intn(1000))*time.Millisecond
|
||||
}
|
||||
|
||||
@@ -25,8 +25,6 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc {
|
||||
"genesis": rpcserver.NewRPCFunc(makeGenesisFunc(c), ""),
|
||||
"genesis_chunked": rpcserver.NewRPCFunc(makeGenesisChunkedFunc(c), ""),
|
||||
"block": rpcserver.NewRPCFunc(makeBlockFunc(c), "height"),
|
||||
"header": rpcserver.NewRPCFunc(makeHeaderFunc(c), "height"),
|
||||
"header_by_hash": rpcserver.NewRPCFunc(makeHeaderByHashFunc(c), "hash"),
|
||||
"block_by_hash": rpcserver.NewRPCFunc(makeBlockByHashFunc(c), "hash"),
|
||||
"block_results": rpcserver.NewRPCFunc(makeBlockResultsFunc(c), "height"),
|
||||
"commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height"),
|
||||
@@ -64,7 +62,7 @@ func makeHealthFunc(c *lrpc.Client) rpcHealthFunc {
|
||||
|
||||
type rpcStatusFunc func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error)
|
||||
|
||||
// nolint: interfacer
|
||||
//nolint: interfacer
|
||||
func makeStatusFunc(c *lrpc.Client) rpcStatusFunc {
|
||||
return func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) {
|
||||
return c.Status(ctx.Context())
|
||||
@@ -111,22 +109,6 @@ func makeBlockFunc(c *lrpc.Client) rpcBlockFunc {
|
||||
}
|
||||
}
|
||||
|
||||
type rpcHeaderFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultHeader, error)
|
||||
|
||||
func makeHeaderFunc(c *lrpc.Client) rpcHeaderFunc {
|
||||
return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultHeader, error) {
|
||||
return c.Header(ctx.Context(), height)
|
||||
}
|
||||
}
|
||||
|
||||
type rpcHeaderByHashFunc func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultHeader, error)
|
||||
|
||||
func makeHeaderByHashFunc(c *lrpc.Client) rpcHeaderByHashFunc {
|
||||
return func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultHeader, error) {
|
||||
return c.HeaderByHash(ctx.Context(), hash)
|
||||
}
|
||||
}
|
||||
|
||||
type rpcBlockByHashFunc func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error)
|
||||
|
||||
func makeBlockByHashFunc(c *lrpc.Client) rpcBlockByHashFunc {
|
||||
@@ -296,7 +278,7 @@ func makeABCIInfoFunc(c *lrpc.Client) rpcABCIInfoFunc {
|
||||
|
||||
type rpcBroadcastEvidenceFunc func(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error)
|
||||
|
||||
// nolint: interfacer
|
||||
//nolint: interfacer
|
||||
func makeBroadcastEvidenceFunc(c *lrpc.Client) rpcBroadcastEvidenceFunc {
|
||||
return func(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) {
|
||||
return c.BroadcastEvidence(ctx.Context(), ev)
|
||||
|
||||
@@ -440,40 +440,6 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Header fetches and verifies the header directly via the light client
|
||||
func (c *Client) Header(ctx context.Context, height *int64) (*ctypes.ResultHeader, error) {
|
||||
lb, err := c.updateLightClientIfNeededTo(ctx, height)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ctypes.ResultHeader{Header: lb.Header}, nil
|
||||
}
|
||||
|
||||
// HeaderByHash calls rpcclient#HeaderByHash and updates the client if it's falling behind.
|
||||
func (c *Client) HeaderByHash(ctx context.Context, hash tmbytes.HexBytes) (*ctypes.ResultHeader, error) {
|
||||
res, err := c.next.HeaderByHash(ctx, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := res.Header.ValidateBasic(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lb, err := c.updateLightClientIfNeededTo(ctx, &res.Header.Height)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !bytes.Equal(lb.Header.Hash(), res.Header.Hash()) {
|
||||
return nil, fmt.Errorf("primary header hash does not match trusted header hash. (%X != %X)",
|
||||
lb.Header.Hash(), res.Header.Hash())
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) {
|
||||
// Update the light client if we're behind and retrieve the light block at the requested height
|
||||
// or at the latest height if no height is provided.
|
||||
|
||||
@@ -19,13 +19,13 @@ var (
|
||||
// VerifyNonAdjacent verifies non-adjacent untrustedHeader against
|
||||
// trustedHeader. It ensures that:
|
||||
//
|
||||
// a) trustedHeader can still be trusted (if not, ErrOldHeaderExpired is returned)
|
||||
// b) untrustedHeader is valid (if not, ErrInvalidHeader is returned)
|
||||
// c) trustLevel ([1/3, 1]) of trustedHeaderVals (or trustedHeaderNextVals)
|
||||
// signed correctly (if not, ErrNewValSetCantBeTrusted is returned)
|
||||
// d) more than 2/3 of untrustedVals have signed h2
|
||||
// (otherwise, ErrInvalidHeader is returned)
|
||||
// e) headers are non-adjacent.
|
||||
// a) trustedHeader can still be trusted (if not, ErrOldHeaderExpired is returned)
|
||||
// b) untrustedHeader is valid (if not, ErrInvalidHeader is returned)
|
||||
// c) trustLevel ([1/3, 1]) of trustedHeaderVals (or trustedHeaderNextVals)
|
||||
// signed correctly (if not, ErrNewValSetCantBeTrusted is returned)
|
||||
// d) more than 2/3 of untrustedVals have signed h2
|
||||
// (otherwise, ErrInvalidHeader is returned)
|
||||
// e) headers are non-adjacent.
|
||||
//
|
||||
// maxClockDrift defines how much untrustedHeader.Time can drift into the
|
||||
// future.
|
||||
@@ -81,12 +81,12 @@ func VerifyNonAdjacent(
|
||||
// VerifyAdjacent verifies directly adjacent untrustedHeader against
|
||||
// trustedHeader. It ensures that:
|
||||
//
|
||||
// a) trustedHeader can still be trusted (if not, ErrOldHeaderExpired is returned)
|
||||
// b) untrustedHeader is valid (if not, ErrInvalidHeader is returned)
|
||||
// c) untrustedHeader.ValidatorsHash equals trustedHeader.NextValidatorsHash
|
||||
// d) more than 2/3 of new validators (untrustedVals) have signed h2
|
||||
// (otherwise, ErrInvalidHeader is returned)
|
||||
// e) headers are adjacent.
|
||||
// a) trustedHeader can still be trusted (if not, ErrOldHeaderExpired is returned)
|
||||
// b) untrustedHeader is valid (if not, ErrInvalidHeader is returned)
|
||||
// c) untrustedHeader.ValidatorsHash equals trustedHeader.NextValidatorsHash
|
||||
// d) more than 2/3 of new validators (untrustedVals) have signed h2
|
||||
// (otherwise, ErrInvalidHeader is returned)
|
||||
// e) headers are adjacent.
|
||||
//
|
||||
// maxClockDrift defines how much untrustedHeader.Time can drift into the
|
||||
// future.
|
||||
@@ -212,12 +212,12 @@ func HeaderExpired(h *types.SignedHeader, trustingPeriod time.Duration, now time
|
||||
// VerifyBackwards verifies an untrusted header with a height one less than
|
||||
// that of an adjacent trusted header. It ensures that:
|
||||
//
|
||||
// a) untrusted header is valid
|
||||
// b) untrusted header has a time before the trusted header
|
||||
// c) that the LastBlockID hash of the trusted header is the same as the hash
|
||||
// of the trusted header
|
||||
// a) untrusted header is valid
|
||||
// b) untrusted header has a time before the trusted header
|
||||
// c) that the LastBlockID hash of the trusted header is the same as the hash
|
||||
// of the trusted header
|
||||
//
|
||||
// For any of these cases ErrInvalidHeader is returned.
|
||||
// For any of these cases ErrInvalidHeader is returned.
|
||||
func VerifyBackwards(untrustedHeader, trustedHeader *types.Header) error {
|
||||
if err := untrustedHeader.ValidateBasic(); err != nil {
|
||||
return ErrInvalidHeader{err}
|
||||
|
||||
@@ -194,7 +194,9 @@ func (mem *CListMempool) TxsWaitChan() <-chan struct{} {
|
||||
|
||||
// It blocks if we're waiting on Update() or Reap().
|
||||
// cb: A callback from the CheckTx command.
|
||||
// It gets called from another goroutine.
|
||||
//
|
||||
// It gets called from another goroutine.
|
||||
//
|
||||
// CONTRACT: Either cb will get called, or err returned.
|
||||
//
|
||||
// Safe for concurrent use by multiple goroutines.
|
||||
@@ -310,7 +312,7 @@ func (mem *CListMempool) reqResCb(
|
||||
}
|
||||
|
||||
// Called from:
|
||||
// - resCbFirstTime (lock not held) if tx is valid
|
||||
// - resCbFirstTime (lock not held) if tx is valid
|
||||
func (mem *CListMempool) addTx(memTx *mempoolTx) {
|
||||
e := mem.txs.PushBack(memTx)
|
||||
mem.txsMap.Store(memTx.tx.Key(), e)
|
||||
@@ -319,8 +321,8 @@ func (mem *CListMempool) addTx(memTx *mempoolTx) {
|
||||
}
|
||||
|
||||
// Called from:
|
||||
// - Update (lock held) if tx was committed
|
||||
// - resCbRecheck (lock not held) if tx was invalidated
|
||||
// - Update (lock held) if tx was committed
|
||||
// - resCbRecheck (lock not held) if tx was invalidated
|
||||
func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) {
|
||||
mem.txs.Remove(elem)
|
||||
elem.DetachPrev()
|
||||
|
||||
45
node/doc.go
45
node/doc.go
@@ -6,35 +6,34 @@ Adding new p2p.Reactor(s)
|
||||
|
||||
To add a new p2p.Reactor, use the CustomReactors option:
|
||||
|
||||
node, err := NewNode(
|
||||
config,
|
||||
privVal,
|
||||
nodeKey,
|
||||
clientCreator,
|
||||
genesisDocProvider,
|
||||
dbProvider,
|
||||
metricsProvider,
|
||||
logger,
|
||||
CustomReactors(map[string]p2p.Reactor{"CUSTOM": customReactor}),
|
||||
)
|
||||
node, err := NewNode(
|
||||
config,
|
||||
privVal,
|
||||
nodeKey,
|
||||
clientCreator,
|
||||
genesisDocProvider,
|
||||
dbProvider,
|
||||
metricsProvider,
|
||||
logger,
|
||||
CustomReactors(map[string]p2p.Reactor{"CUSTOM": customReactor}),
|
||||
)
|
||||
|
||||
Replacing existing p2p.Reactor(s)
|
||||
|
||||
To replace the built-in p2p.Reactor, use the CustomReactors option:
|
||||
|
||||
node, err := NewNode(
|
||||
config,
|
||||
privVal,
|
||||
nodeKey,
|
||||
clientCreator,
|
||||
genesisDocProvider,
|
||||
dbProvider,
|
||||
metricsProvider,
|
||||
logger,
|
||||
CustomReactors(map[string]p2p.Reactor{"BLOCKCHAIN": customBlockchainReactor}),
|
||||
)
|
||||
node, err := NewNode(
|
||||
config,
|
||||
privVal,
|
||||
nodeKey,
|
||||
clientCreator,
|
||||
genesisDocProvider,
|
||||
dbProvider,
|
||||
metricsProvider,
|
||||
logger,
|
||||
CustomReactors(map[string]p2p.Reactor{"BLOCKCHAIN": customBlockchainReactor}),
|
||||
)
|
||||
|
||||
The list of existing reactors can be found in CustomReactors documentation.
|
||||
|
||||
*/
|
||||
package node
|
||||
|
||||
84
node/node.go
84
node/node.go
@@ -16,7 +16,7 @@ import (
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
bc "github.com/tendermint/tendermint/blocksync"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cs "github.com/tendermint/tendermint/consensus"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
@@ -51,7 +51,7 @@ import (
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
|
||||
_ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port
|
||||
_ "net/http/pprof" //nolint: gosec // securely exposed on separate, optional port
|
||||
|
||||
_ "github.com/lib/pq" // provide the psql db driver
|
||||
)
|
||||
@@ -133,10 +133,10 @@ func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider {
|
||||
// Option sets a parameter for the node.
|
||||
type Option func(*Node)
|
||||
|
||||
// Temporary interface for switching to fast sync, we should get rid of v0 and v1 reactors.
|
||||
// Temporary interface for switching to block sync, we should get rid of v0 and v1 reactors.
|
||||
// See: https://github.com/tendermint/tendermint/issues/4595
|
||||
type fastSyncReactor interface {
|
||||
SwitchToFastSync(sm.State) error
|
||||
type blockSyncReactor interface {
|
||||
SwitchToBlockSync(sm.State) error
|
||||
}
|
||||
|
||||
// CustomReactors allows you to add custom reactors (name -> p2p.Reactor) to
|
||||
@@ -145,12 +145,12 @@ type fastSyncReactor interface {
|
||||
// WARNING: using any name from the below list of the existing reactors will
|
||||
// result in replacing it with the custom one.
|
||||
//
|
||||
// - MEMPOOL
|
||||
// - BLOCKCHAIN
|
||||
// - CONSENSUS
|
||||
// - EVIDENCE
|
||||
// - PEX
|
||||
// - STATESYNC
|
||||
// - MEMPOOL
|
||||
// - BLOCKCHAIN
|
||||
// - CONSENSUS
|
||||
// - EVIDENCE
|
||||
// - PEX
|
||||
// - STATESYNC
|
||||
func CustomReactors(reactors map[string]p2p.Reactor) Option {
|
||||
return func(n *Node) {
|
||||
for name, reactor := range reactors {
|
||||
@@ -212,7 +212,7 @@ type Node struct {
|
||||
eventBus *types.EventBus // pub/sub for services
|
||||
stateStore sm.Store
|
||||
blockStore *store.BlockStore // store the blockchain to disk
|
||||
bcReactor p2p.Reactor // for fast-syncing
|
||||
bcReactor p2p.Reactor // for block-syncing
|
||||
mempoolReactor p2p.Reactor // for gossipping transactions
|
||||
mempool mempl.Mempool
|
||||
stateSync bool // whether the node should state sync on startup
|
||||
@@ -430,7 +430,9 @@ func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
|
||||
return nil, nil, err
|
||||
}
|
||||
evidenceLogger := logger.With("module", "evidence")
|
||||
evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB), blockStore)
|
||||
evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: config.Storage.DiscardABCIResponses,
|
||||
}), blockStore)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -443,16 +445,16 @@ func createBlockchainReactor(config *cfg.Config,
|
||||
state sm.State,
|
||||
blockExec *sm.BlockExecutor,
|
||||
blockStore *store.BlockStore,
|
||||
fastSync bool,
|
||||
blockSync bool,
|
||||
logger log.Logger) (bcReactor p2p.Reactor, err error) {
|
||||
|
||||
switch config.FastSync.Version {
|
||||
switch config.BlockSync.Version {
|
||||
case "v0":
|
||||
bcReactor = bc.NewReactor(state.Copy(), blockExec, blockStore, fastSync)
|
||||
bcReactor = bc.NewReactor(state.Copy(), blockExec, blockStore, blockSync)
|
||||
case "v1", "v2":
|
||||
return nil, fmt.Errorf("fast sync version %s has been deprecated. Please use v0", config.FastSync.Version)
|
||||
return nil, fmt.Errorf("block sync version %s has been deprecated. Please use v0", config.BlockSync.Version)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
|
||||
return nil, fmt.Errorf("unknown fastsync version %s", config.BlockSync.Version)
|
||||
}
|
||||
|
||||
bcReactor.SetLogger(logger.With("module", "blockchain"))
|
||||
@@ -642,9 +644,9 @@ func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
|
||||
return pexReactor
|
||||
}
|
||||
|
||||
// startStateSync starts an asynchronous state sync process, then switches to fast sync mode.
|
||||
func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reactor,
|
||||
stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, fastSync bool,
|
||||
// startStateSync starts an asynchronous state sync process, then switches to block sync mode.
|
||||
func startStateSync(ssR *statesync.Reactor, bcR blockSyncReactor, conR *cs.Reactor,
|
||||
stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, blockSync bool,
|
||||
stateStore sm.Store, blockStore *store.BlockStore, state sm.State) error {
|
||||
ssR.Logger.Info("Starting state sync")
|
||||
|
||||
@@ -682,13 +684,13 @@ func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reacto
|
||||
return
|
||||
}
|
||||
|
||||
if fastSync {
|
||||
if blockSync {
|
||||
// FIXME Very ugly to have these metrics bleed through here.
|
||||
conR.Metrics.StateSyncing.Set(0)
|
||||
conR.Metrics.FastSyncing.Set(1)
|
||||
err = bcR.SwitchToFastSync(state)
|
||||
conR.Metrics.BlockSyncing.Set(1)
|
||||
err = bcR.SwitchToBlockSync(state)
|
||||
if err != nil {
|
||||
ssR.Logger.Error("Failed to switch to fast sync", "err", err)
|
||||
ssR.Logger.Error("Failed to switch to block sync", "err", err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
@@ -714,7 +716,9 @@ func NewNode(config *cfg.Config,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: config.Storage.DiscardABCIResponses,
|
||||
})
|
||||
|
||||
state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider)
|
||||
if err != nil {
|
||||
@@ -783,9 +787,9 @@ func NewNode(config *cfg.Config,
|
||||
}
|
||||
}
|
||||
|
||||
// Determine whether we should do fast sync. This must happen after the handshake, since the
|
||||
// Determine whether we should do block sync. This must happen after the handshake, since the
|
||||
// app may modify the validator set, specifying ourself as the only validator.
|
||||
fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey)
|
||||
blockSync := config.BlockSyncMode && !onlyValidatorIsUs(state, pubKey)
|
||||
|
||||
logNodeStartupInfo(state, pubKey, logger, consensusLogger)
|
||||
|
||||
@@ -808,26 +812,26 @@ func NewNode(config *cfg.Config,
|
||||
sm.BlockExecutorWithMetrics(smMetrics),
|
||||
)
|
||||
|
||||
// Make BlockchainReactor. Don't start fast sync if we're doing a state sync first.
|
||||
bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, fastSync && !stateSync, logger)
|
||||
// Make BlockchainReactor. Don't start block sync if we're doing a state sync first.
|
||||
bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, blockSync && !stateSync, logger)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not create blockchain reactor: %w", err)
|
||||
}
|
||||
|
||||
// Make ConsensusReactor. Don't enable fully if doing a state sync and/or fast sync first.
|
||||
// Make ConsensusReactor. Don't enable fully if doing a state sync and/or block sync first.
|
||||
// FIXME We need to update metrics here, since other reactors don't have access to them.
|
||||
if stateSync {
|
||||
csMetrics.StateSyncing.Set(1)
|
||||
} else if fastSync {
|
||||
csMetrics.FastSyncing.Set(1)
|
||||
} else if blockSync {
|
||||
csMetrics.BlockSyncing.Set(1)
|
||||
}
|
||||
consensusReactor, consensusState := createConsensusReactor(
|
||||
config, state, blockExec, blockStore, mempool, evidencePool,
|
||||
privValidator, csMetrics, stateSync || fastSync, eventBus, consensusLogger,
|
||||
privValidator, csMetrics, stateSync || blockSync, eventBus, consensusLogger,
|
||||
)
|
||||
|
||||
// Set up state sync reactor, and schedule a sync if requested.
|
||||
// FIXME The way we do phased startups (e.g. replay -> fast sync -> consensus) is very messy,
|
||||
// FIXME The way we do phased startups (e.g. replay -> block sync -> consensus) is very messy,
|
||||
// we should clean this whole thing up. See:
|
||||
// https://github.com/tendermint/tendermint/issues/4644
|
||||
stateSyncReactor := statesync.NewReactor(
|
||||
@@ -982,12 +986,12 @@ func (n *Node) OnStart() error {
|
||||
|
||||
// Run state sync
|
||||
if n.stateSync {
|
||||
bcR, ok := n.bcReactor.(fastSyncReactor)
|
||||
bcR, ok := n.bcReactor.(blockSyncReactor)
|
||||
if !ok {
|
||||
return fmt.Errorf("this blockchain reactor does not support switching from state sync")
|
||||
}
|
||||
err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider,
|
||||
n.config.StateSync, n.config.FastSyncMode, n.stateStore, n.blockStore, n.stateSyncGenesis)
|
||||
n.config.StateSync, n.config.BlockSyncMode, n.stateStore, n.blockStore, n.stateSyncGenesis)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start state sync: %w", err)
|
||||
}
|
||||
@@ -1335,7 +1339,7 @@ func makeNodeInfo(
|
||||
Network: genDoc.ChainID,
|
||||
Version: version.TMCoreSemVer,
|
||||
Channels: []byte{
|
||||
bc.BlockchainChannel,
|
||||
bc.BlocksyncChannel,
|
||||
cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel,
|
||||
mempl.MempoolChannel,
|
||||
evidence.EvidenceChannel,
|
||||
@@ -1390,7 +1394,9 @@ func LoadStateFromDBOrGenesisDocProvider(
|
||||
return sm.State{}, nil, err
|
||||
}
|
||||
}
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
if err != nil {
|
||||
return sm.State{}, nil, err
|
||||
|
||||
@@ -235,7 +235,9 @@ func TestCreateProposalBlock(t *testing.T) {
|
||||
|
||||
var height int64 = 1
|
||||
state, stateDB, privVals := state(1, height)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
maxBytes := 16384
|
||||
var partSize uint32 = 256
|
||||
maxEvidenceBytes := int64(maxBytes / 2)
|
||||
@@ -340,7 +342,9 @@ func TestMaxProposalBlockSize(t *testing.T) {
|
||||
|
||||
var height int64 = 1
|
||||
state, stateDB, _ := state(1, height)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
var maxBytes int64 = 16384
|
||||
var partSize uint32 = 256
|
||||
state.ConsensusParams.Block.MaxBytes = maxBytes
|
||||
@@ -464,7 +468,9 @@ func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) {
|
||||
|
||||
// save validators to db for 2 heights
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
if err := stateStore.Save(s); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -10,9 +10,13 @@ import (
|
||||
|
||||
// Only Go1.10 has a proper net.Conn implementation that
|
||||
// has the SetDeadline method implemented as per
|
||||
// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706
|
||||
//
|
||||
// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706
|
||||
//
|
||||
// lest we run into problems like
|
||||
// https://github.com/tendermint/tendermint/issues/851
|
||||
//
|
||||
// https://github.com/tendermint/tendermint/issues/851
|
||||
//
|
||||
// so for go versions < Go1.10 use our custom net.Conn creator
|
||||
// that doesn't return an `Unimplemented error` for net.Conn.
|
||||
// Before https://github.com/tendermint/tendermint/commit/49faa79bdce5663894b3febbf4955fb1d172df04
|
||||
|
||||
@@ -62,6 +62,7 @@ The byte id and the relative priorities of each `Channel` are configured upon
|
||||
initialization of the connection.
|
||||
|
||||
There are two methods for sending messages:
|
||||
|
||||
func (m MConnection) Send(chID byte, msgBytes []byte) bool {}
|
||||
func (m MConnection) TrySend(chID byte, msgBytes []byte}) bool {}
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestMConnectionSendFlushStop(t *testing.T) {
|
||||
clientConn := createTestMConnection(client)
|
||||
err := clientConn.Start()
|
||||
require.Nil(t, err)
|
||||
defer clientConn.Stop() // nolint:errcheck // ignore for tests
|
||||
defer clientConn.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
msg := []byte("abc")
|
||||
assert.True(t, clientConn.Send(0x01, msg))
|
||||
@@ -89,7 +89,7 @@ func TestMConnectionSend(t *testing.T) {
|
||||
mconn := createTestMConnection(client)
|
||||
err := mconn.Start()
|
||||
require.Nil(t, err)
|
||||
defer mconn.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconn.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
msg := []byte("Ant-Man")
|
||||
assert.True(t, mconn.Send(0x01, msg))
|
||||
@@ -128,12 +128,12 @@ func TestMConnectionReceive(t *testing.T) {
|
||||
mconn1 := createMConnectionWithCallbacks(client, onReceive, onError)
|
||||
err := mconn1.Start()
|
||||
require.Nil(t, err)
|
||||
defer mconn1.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconn1.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
mconn2 := createTestMConnection(server)
|
||||
err = mconn2.Start()
|
||||
require.Nil(t, err)
|
||||
defer mconn2.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconn2.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
msg := []byte("Cyclops")
|
||||
assert.True(t, mconn2.Send(0x01, msg))
|
||||
@@ -156,7 +156,7 @@ func TestMConnectionStatus(t *testing.T) {
|
||||
mconn := createTestMConnection(client)
|
||||
err := mconn.Start()
|
||||
require.Nil(t, err)
|
||||
defer mconn.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconn.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
status := mconn.Status()
|
||||
assert.NotNil(t, status)
|
||||
@@ -179,7 +179,7 @@ func TestMConnectionPongTimeoutResultsInError(t *testing.T) {
|
||||
mconn := createMConnectionWithCallbacks(client, onReceive, onError)
|
||||
err := mconn.Start()
|
||||
require.Nil(t, err)
|
||||
defer mconn.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconn.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
serverGotPing := make(chan struct{})
|
||||
go func() {
|
||||
@@ -218,7 +218,7 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) {
|
||||
mconn := createMConnectionWithCallbacks(client, onReceive, onError)
|
||||
err := mconn.Start()
|
||||
require.Nil(t, err)
|
||||
defer mconn.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconn.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// sending 3 pongs in a row (abuse)
|
||||
protoWriter := protoio.NewDelimitedWriter(server)
|
||||
@@ -273,7 +273,7 @@ func TestMConnectionMultiplePings(t *testing.T) {
|
||||
mconn := createMConnectionWithCallbacks(client, onReceive, onError)
|
||||
err := mconn.Start()
|
||||
require.Nil(t, err)
|
||||
defer mconn.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconn.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// sending 3 pings in a row (abuse)
|
||||
// see https://github.com/tendermint/tendermint/issues/1190
|
||||
@@ -322,7 +322,7 @@ func TestMConnectionPingPongs(t *testing.T) {
|
||||
mconn := createMConnectionWithCallbacks(client, onReceive, onError)
|
||||
err := mconn.Start()
|
||||
require.Nil(t, err)
|
||||
defer mconn.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconn.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
serverGotPing := make(chan struct{})
|
||||
go func() {
|
||||
@@ -380,7 +380,7 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) {
|
||||
mconn := createMConnectionWithCallbacks(client, onReceive, onError)
|
||||
err := mconn.Start()
|
||||
require.Nil(t, err)
|
||||
defer mconn.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconn.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
if err := client.Close(); err != nil {
|
||||
t.Error(err)
|
||||
@@ -492,8 +492,8 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) {
|
||||
chOnRcv := make(chan struct{})
|
||||
|
||||
mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr)
|
||||
defer mconnClient.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconnServer.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconnClient.Stop() //nolint:errcheck // ignore for tests
|
||||
defer mconnServer.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
mconnServer.onReceive = func(chID byte, msgBytes []byte) {
|
||||
chOnRcv <- struct{}{}
|
||||
@@ -528,8 +528,8 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) {
|
||||
func TestMConnectionReadErrorUnknownMsgType(t *testing.T) {
|
||||
chOnErr := make(chan struct{})
|
||||
mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr)
|
||||
defer mconnClient.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconnServer.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconnClient.Stop() //nolint:errcheck // ignore for tests
|
||||
defer mconnServer.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// send msg with unknown msg type
|
||||
_, err := protoio.NewDelimitedWriter(mconnClient.conn).WriteMsg(&types.Header{ChainID: "x"})
|
||||
@@ -545,7 +545,7 @@ func TestMConnectionTrySend(t *testing.T) {
|
||||
mconn := createTestMConnection(client)
|
||||
err := mconn.Start()
|
||||
require.Nil(t, err)
|
||||
defer mconn.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconn.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
msg := []byte("Semicolon-Woman")
|
||||
resultCh := make(chan string, 2)
|
||||
@@ -564,7 +564,7 @@ func TestMConnectionTrySend(t *testing.T) {
|
||||
assert.Equal(t, "TrySend", <-resultCh)
|
||||
}
|
||||
|
||||
// nolint:lll //ignore line length for tests
|
||||
//nolint:lll //ignore line length for tests
|
||||
func TestConnVectors(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
|
||||
@@ -103,7 +103,7 @@ func (fc *FuzzedConnection) SetWriteDeadline(t time.Time) error {
|
||||
|
||||
func (fc *FuzzedConnection) randomDuration() time.Duration {
|
||||
maxDelayMillis := int(fc.config.MaxDelay.Nanoseconds() / 1000)
|
||||
return time.Millisecond * time.Duration(tmrand.Int()%maxDelayMillis) // nolint: gas
|
||||
return time.Millisecond * time.Duration(tmrand.Int()%maxDelayMillis) //nolint: gas
|
||||
}
|
||||
|
||||
// implements the fuzz (delay, kill conn)
|
||||
|
||||
@@ -33,7 +33,7 @@ func TestNodeInfoValidate(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
"Too Many Channels",
|
||||
func(ni *DefaultNodeInfo) { ni.Channels = append(channels, byte(maxNumChannels)) }, // nolint: gocritic
|
||||
func(ni *DefaultNodeInfo) { ni.Channels = append(channels, byte(maxNumChannels)) }, //nolint: gocritic
|
||||
true,
|
||||
},
|
||||
{"Duplicate Channel", func(ni *DefaultNodeInfo) { ni.Channels = dupChannels }, true},
|
||||
|
||||
@@ -94,17 +94,16 @@ func (ka *knownAddress) removeBucketRef(bucketIdx int) int {
|
||||
}
|
||||
|
||||
/*
|
||||
An address is bad if the address in question is a New address, has not been tried in the last
|
||||
minute, and meets one of the following criteria:
|
||||
An address is bad if the address in question is a New address, has not been tried in the last
|
||||
minute, and meets one of the following criteria:
|
||||
|
||||
1) It claims to be from the future
|
||||
2) It hasn't been seen in over a week
|
||||
3) It has failed at least three times and never succeeded
|
||||
4) It has failed ten times in the last week
|
||||
|
||||
All addresses that meet these criteria are assumed to be worthless and not
|
||||
worth keeping hold of.
|
||||
1) It claims to be from the future
|
||||
2) It hasn't been seen in over a week
|
||||
3) It has failed at least three times and never succeeded
|
||||
4) It has failed ten times in the last week
|
||||
|
||||
All addresses that meet these criteria are assumed to be worthless and not
|
||||
worth keeping hold of.
|
||||
*/
|
||||
func (ka *knownAddress) isBad() bool {
|
||||
// Is Old --> good
|
||||
|
||||
@@ -58,15 +58,15 @@ func TestPEXReactorAddRemovePeer(t *testing.T) {
|
||||
}
|
||||
|
||||
// --- FAIL: TestPEXReactorRunning (11.10s)
|
||||
// pex_reactor_test.go:411: expected all switches to be connected to at
|
||||
// least one peer (switches: 0 => {outbound: 1, inbound: 0}, 1 =>
|
||||
// {outbound: 0, inbound: 1}, 2 => {outbound: 0, inbound: 0}, )
|
||||
//
|
||||
// pex_reactor_test.go:411: expected all switches to be connected to at
|
||||
// least one peer (switches: 0 => {outbound: 1, inbound: 0}, 1 =>
|
||||
// {outbound: 0, inbound: 1}, 2 => {outbound: 0, inbound: 0}, )
|
||||
//
|
||||
// EXPLANATION: peers are getting rejected because in switch#addPeer we check
|
||||
// if any peer (who we already connected to) has the same IP. Even though local
|
||||
// peers have different IP addresses, they all have the same underlying remote
|
||||
// IP: 127.0.0.1.
|
||||
//
|
||||
func TestPEXReactorRunning(t *testing.T) {
|
||||
N := 3
|
||||
switches := make([]*p2p.Switch, N)
|
||||
@@ -214,7 +214,7 @@ func TestCheckSeeds(t *testing.T) {
|
||||
// 1. test creating peer with no seeds works
|
||||
peerSwitch := testCreateDefaultPeer(dir, 0)
|
||||
require.Nil(t, peerSwitch.Start())
|
||||
peerSwitch.Stop() // nolint:errcheck // ignore for tests
|
||||
peerSwitch.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// 2. create seed
|
||||
seed := testCreateSeed(dir, 1, []*p2p.NetAddress{}, []*p2p.NetAddress{})
|
||||
@@ -222,7 +222,7 @@ func TestCheckSeeds(t *testing.T) {
|
||||
// 3. test create peer with online seed works
|
||||
peerSwitch = testCreatePeerWithSeed(dir, 2, seed)
|
||||
require.Nil(t, peerSwitch.Start())
|
||||
peerSwitch.Stop() // nolint:errcheck // ignore for tests
|
||||
peerSwitch.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// 4. test create peer with all seeds having unresolvable DNS fails
|
||||
badPeerConfig := &ReactorConfig{
|
||||
@@ -231,7 +231,7 @@ func TestCheckSeeds(t *testing.T) {
|
||||
}
|
||||
peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig)
|
||||
require.Error(t, peerSwitch.Start())
|
||||
peerSwitch.Stop() // nolint:errcheck // ignore for tests
|
||||
peerSwitch.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// 5. test create peer with one good seed address succeeds
|
||||
badPeerConfig = &ReactorConfig{
|
||||
@@ -241,7 +241,7 @@ func TestCheckSeeds(t *testing.T) {
|
||||
}
|
||||
peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig)
|
||||
require.Nil(t, peerSwitch.Start())
|
||||
peerSwitch.Stop() // nolint:errcheck // ignore for tests
|
||||
peerSwitch.Stop() //nolint:errcheck // ignore for tests
|
||||
}
|
||||
|
||||
func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) {
|
||||
@@ -253,12 +253,12 @@ func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) {
|
||||
// 1. create seed
|
||||
seed := testCreateSeed(dir, 0, []*p2p.NetAddress{}, []*p2p.NetAddress{})
|
||||
require.Nil(t, seed.Start())
|
||||
defer seed.Stop() // nolint:errcheck // ignore for tests
|
||||
defer seed.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// 2. create usual peer with only seed configured.
|
||||
peer := testCreatePeerWithSeed(dir, 1, seed)
|
||||
require.Nil(t, peer.Start())
|
||||
defer peer.Stop() // nolint:errcheck // ignore for tests
|
||||
defer peer.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// 3. check that the peer connects to seed immediately
|
||||
assertPeersWithTimeout(t, []*p2p.Switch{peer}, 10*time.Millisecond, 3*time.Second, 1)
|
||||
@@ -273,18 +273,18 @@ func TestConnectionSpeedForPeerReceivedFromSeed(t *testing.T) {
|
||||
// 1. create peer
|
||||
peerSwitch := testCreateDefaultPeer(dir, 1)
|
||||
require.Nil(t, peerSwitch.Start())
|
||||
defer peerSwitch.Stop() // nolint:errcheck // ignore for tests
|
||||
defer peerSwitch.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// 2. Create seed which knows about the peer
|
||||
peerAddr := peerSwitch.NetAddress()
|
||||
seed := testCreateSeed(dir, 2, []*p2p.NetAddress{peerAddr}, []*p2p.NetAddress{peerAddr})
|
||||
require.Nil(t, seed.Start())
|
||||
defer seed.Stop() // nolint:errcheck // ignore for tests
|
||||
defer seed.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// 3. create another peer with only seed configured.
|
||||
secondPeer := testCreatePeerWithSeed(dir, 3, seed)
|
||||
require.Nil(t, secondPeer.Start())
|
||||
defer secondPeer.Stop() // nolint:errcheck // ignore for tests
|
||||
defer secondPeer.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// 4. check that the second peer connects to seed immediately
|
||||
assertPeersWithTimeout(t, []*p2p.Switch{secondPeer}, 10*time.Millisecond, 3*time.Second, 1)
|
||||
@@ -307,13 +307,13 @@ func TestPEXReactorSeedMode(t *testing.T) {
|
||||
sw.SetAddrBook(book)
|
||||
err = sw.Start()
|
||||
require.NoError(t, err)
|
||||
defer sw.Stop() // nolint:errcheck // ignore for tests
|
||||
defer sw.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
assert.Zero(t, sw.Peers().Size())
|
||||
|
||||
peerSwitch := testCreateDefaultPeer(dir, 1)
|
||||
require.NoError(t, peerSwitch.Start())
|
||||
defer peerSwitch.Stop() // nolint:errcheck // ignore for tests
|
||||
defer peerSwitch.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
// 1. Test crawlPeers dials the peer
|
||||
pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()})
|
||||
@@ -346,13 +346,13 @@ func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) {
|
||||
sw.SetAddrBook(book)
|
||||
err = sw.Start()
|
||||
require.NoError(t, err)
|
||||
defer sw.Stop() // nolint:errcheck // ignore for tests
|
||||
defer sw.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
assert.Zero(t, sw.Peers().Size())
|
||||
|
||||
peerSwitch := testCreateDefaultPeer(dir, 1)
|
||||
require.NoError(t, peerSwitch.Start())
|
||||
defer peerSwitch.Stop() // nolint:errcheck // ignore for tests
|
||||
defer peerSwitch.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
err = sw.AddPersistentPeers([]string{peerSwitch.NetAddress().String()})
|
||||
require.NoError(t, err)
|
||||
@@ -618,7 +618,7 @@ func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress)
|
||||
book := NewAddrBook(filepath.Join(dir, "addrbookSeed.json"), false)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
for j := 0; j < len(knownAddrs); j++ {
|
||||
book.AddAddress(knownAddrs[j], srcAddrs[j]) // nolint:errcheck // ignore for tests
|
||||
book.AddAddress(knownAddrs[j], srcAddrs[j]) //nolint:errcheck // ignore for tests
|
||||
book.MarkGood(knownAddrs[j].ID)
|
||||
}
|
||||
sw.SetAddrBook(book)
|
||||
|
||||
@@ -379,8 +379,8 @@ func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) {
|
||||
// to the PEX/Addrbook to find the peer with the addr again
|
||||
// NOTE: this will keep trying even if the handshake or auth fails.
|
||||
// TODO: be more explicit with error types so we only retry on certain failures
|
||||
// - ie. if we're getting ErrDuplicatePeer we can stop
|
||||
// because the addrbook got us the peer back already
|
||||
// - ie. if we're getting ErrDuplicatePeer we can stop
|
||||
// because the addrbook got us the peer back already
|
||||
func (sw *Switch) reconnectToPeer(addr *NetAddress) {
|
||||
if sw.reconnecting.Has(string(addr.ID)) {
|
||||
return
|
||||
|
||||
@@ -529,8 +529,8 @@ func TestTransportMultiplexRejectSelf(t *testing.T) {
|
||||
}
|
||||
|
||||
_, err := mt.Accept(peerConfig{})
|
||||
if err, ok := err.(ErrRejected); ok {
|
||||
if !err.IsSelf() {
|
||||
if e, ok := err.(ErrRejected); ok {
|
||||
if !e.IsSelf() {
|
||||
t.Errorf("expected to reject self, got: %v", err)
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -72,6 +72,7 @@ func TestTrustMetricCopyNilPointer(t *testing.T) {
|
||||
}
|
||||
|
||||
// XXX: This test fails non-deterministically
|
||||
//
|
||||
//nolint:unused,deadcode
|
||||
func _TestTrustMetricStopPause(t *testing.T) {
|
||||
// The TestTicker will provide manual control over
|
||||
|
||||
@@ -202,7 +202,7 @@ func localIPv4() (net.IP, error) {
|
||||
}
|
||||
|
||||
func getServiceURL(rootURL string) (url, urnDomain string, err error) {
|
||||
r, err := http.Get(rootURL) // nolint: gosec
|
||||
r, err := http.Get(rootURL) //nolint: gosec
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
/*
|
||||
|
||||
Package privval provides different implementations of the types.PrivValidator.
|
||||
|
||||
FilePV
|
||||
# FilePV
|
||||
|
||||
FilePV is the simplest implementation and developer default.
|
||||
It uses one file for the private key and another to store state.
|
||||
|
||||
SignerListenerEndpoint
|
||||
# SignerListenerEndpoint
|
||||
|
||||
SignerListenerEndpoint establishes a connection to an external process,
|
||||
like a Key Management Server (KMS), using a socket.
|
||||
@@ -15,15 +14,14 @@ SignerListenerEndpoint listens for the external KMS process to dial in.
|
||||
SignerListenerEndpoint takes a listener, which determines the type of connection
|
||||
(ie. encrypted over tcp, or unencrypted over unix).
|
||||
|
||||
SignerDialerEndpoint
|
||||
# SignerDialerEndpoint
|
||||
|
||||
SignerDialerEndpoint is a simple wrapper around a net.Conn. It's used by both IPCVal and TCPVal.
|
||||
|
||||
SignerClient
|
||||
# SignerClient
|
||||
|
||||
SignerClient handles remote validator connections that provide signing services.
|
||||
In production, it's recommended to wrap it with RetrySignerClient to avoid
|
||||
termination in case of temporary errors.
|
||||
|
||||
*/
|
||||
package privval
|
||||
|
||||
@@ -57,7 +57,6 @@ func exampleProposal() *types.Proposal {
|
||||
}
|
||||
}
|
||||
|
||||
// nolint:lll // ignore line length for tests
|
||||
func TestPrivvalVectors(t *testing.T) {
|
||||
pk := ed25519.GenPrivKeyFromSecret([]byte("it's a secret")).PubKey()
|
||||
ppk, err := cryptoenc.PubKeyToProto(pk)
|
||||
|
||||
@@ -75,10 +75,10 @@ message RequestQuery {
|
||||
}
|
||||
|
||||
message RequestBeginBlock {
|
||||
bytes hash = 1;
|
||||
tendermint.types.Header header = 2 [(gogoproto.nullable) = false];
|
||||
LastCommitInfo last_commit_info = 3 [(gogoproto.nullable) = false];
|
||||
repeated Evidence byzantine_validators = 4 [(gogoproto.nullable) = false];
|
||||
bytes hash = 1;
|
||||
tendermint.types.Header header = 2 [(gogoproto.nullable) = false];
|
||||
LastCommitInfo last_commit_info = 3 [(gogoproto.nullable) = false];
|
||||
repeated Evidence byzantine_validators = 4 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
enum CheckTxType {
|
||||
@@ -234,7 +234,7 @@ message ResponseDeliverTx {
|
||||
}
|
||||
|
||||
message ResponseEndBlock {
|
||||
repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable) = false];
|
||||
repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable) = false];
|
||||
ConsensusParams consensus_param_updates = 2;
|
||||
repeated Event events = 3
|
||||
[(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"];
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: tendermint/blockchain/types.proto
|
||||
// source: tendermint/blocksync/types.proto
|
||||
|
||||
package blockchain
|
||||
package blocksync
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
@@ -32,7 +32,7 @@ func (m *BlockRequest) Reset() { *m = BlockRequest{} }
|
||||
func (m *BlockRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*BlockRequest) ProtoMessage() {}
|
||||
func (*BlockRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2927480384e78499, []int{0}
|
||||
return fileDescriptor_19b397c236e0fa07, []int{0}
|
||||
}
|
||||
func (m *BlockRequest) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -77,7 +77,7 @@ func (m *NoBlockResponse) Reset() { *m = NoBlockResponse{} }
|
||||
func (m *NoBlockResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*NoBlockResponse) ProtoMessage() {}
|
||||
func (*NoBlockResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2927480384e78499, []int{1}
|
||||
return fileDescriptor_19b397c236e0fa07, []int{1}
|
||||
}
|
||||
func (m *NoBlockResponse) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -122,7 +122,7 @@ func (m *BlockResponse) Reset() { *m = BlockResponse{} }
|
||||
func (m *BlockResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*BlockResponse) ProtoMessage() {}
|
||||
func (*BlockResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2927480384e78499, []int{2}
|
||||
return fileDescriptor_19b397c236e0fa07, []int{2}
|
||||
}
|
||||
func (m *BlockResponse) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -166,7 +166,7 @@ func (m *StatusRequest) Reset() { *m = StatusRequest{} }
|
||||
func (m *StatusRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*StatusRequest) ProtoMessage() {}
|
||||
func (*StatusRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2927480384e78499, []int{3}
|
||||
return fileDescriptor_19b397c236e0fa07, []int{3}
|
||||
}
|
||||
func (m *StatusRequest) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -205,7 +205,7 @@ func (m *StatusResponse) Reset() { *m = StatusResponse{} }
|
||||
func (m *StatusResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*StatusResponse) ProtoMessage() {}
|
||||
func (*StatusResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2927480384e78499, []int{4}
|
||||
return fileDescriptor_19b397c236e0fa07, []int{4}
|
||||
}
|
||||
func (m *StatusResponse) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -262,7 +262,7 @@ func (m *Message) Reset() { *m = Message{} }
|
||||
func (m *Message) String() string { return proto.CompactTextString(m) }
|
||||
func (*Message) ProtoMessage() {}
|
||||
func (*Message) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2927480384e78499, []int{5}
|
||||
return fileDescriptor_19b397c236e0fa07, []int{5}
|
||||
}
|
||||
func (m *Message) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -373,42 +373,41 @@ func (*Message) XXX_OneofWrappers() []interface{} {
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*BlockRequest)(nil), "tendermint.blockchain.BlockRequest")
|
||||
proto.RegisterType((*NoBlockResponse)(nil), "tendermint.blockchain.NoBlockResponse")
|
||||
proto.RegisterType((*BlockResponse)(nil), "tendermint.blockchain.BlockResponse")
|
||||
proto.RegisterType((*StatusRequest)(nil), "tendermint.blockchain.StatusRequest")
|
||||
proto.RegisterType((*StatusResponse)(nil), "tendermint.blockchain.StatusResponse")
|
||||
proto.RegisterType((*Message)(nil), "tendermint.blockchain.Message")
|
||||
proto.RegisterType((*BlockRequest)(nil), "tendermint.blocksync.BlockRequest")
|
||||
proto.RegisterType((*NoBlockResponse)(nil), "tendermint.blocksync.NoBlockResponse")
|
||||
proto.RegisterType((*BlockResponse)(nil), "tendermint.blocksync.BlockResponse")
|
||||
proto.RegisterType((*StatusRequest)(nil), "tendermint.blocksync.StatusRequest")
|
||||
proto.RegisterType((*StatusResponse)(nil), "tendermint.blocksync.StatusResponse")
|
||||
proto.RegisterType((*Message)(nil), "tendermint.blocksync.Message")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("tendermint/blockchain/types.proto", fileDescriptor_2927480384e78499) }
|
||||
func init() { proto.RegisterFile("tendermint/blocksync/types.proto", fileDescriptor_19b397c236e0fa07) }
|
||||
|
||||
var fileDescriptor_2927480384e78499 = []byte{
|
||||
// 370 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xc1, 0x4e, 0xfa, 0x40,
|
||||
0x10, 0xc6, 0xdb, 0x7f, 0x81, 0x7f, 0x32, 0x50, 0x1a, 0x9b, 0xa8, 0xc4, 0x98, 0x46, 0xab, 0x12,
|
||||
0x3d, 0xd8, 0x26, 0x78, 0x25, 0x1e, 0x38, 0x11, 0x13, 0x8c, 0xa9, 0xc6, 0x83, 0x17, 0xd2, 0xe2,
|
||||
0x86, 0x36, 0x4a, 0x17, 0xd9, 0xed, 0xc1, 0xb7, 0xf0, 0x19, 0x7c, 0x1a, 0x8f, 0x1c, 0x3d, 0x1a,
|
||||
0x78, 0x11, 0xc3, 0x6c, 0x29, 0x4b, 0x03, 0xf5, 0xb6, 0x3b, 0xfd, 0xe6, 0x37, 0xdf, 0x7e, 0x99,
|
||||
0xc2, 0x31, 0x27, 0xf1, 0x33, 0x99, 0x8c, 0xa2, 0x98, 0xbb, 0xc1, 0x2b, 0x1d, 0xbc, 0x0c, 0x42,
|
||||
0x3f, 0x8a, 0x5d, 0xfe, 0x3e, 0x26, 0xcc, 0x19, 0x4f, 0x28, 0xa7, 0xe6, 0xee, 0x4a, 0xe2, 0xac,
|
||||
0x24, 0x07, 0x87, 0x52, 0x27, 0xca, 0x45, 0xbf, 0x68, 0xb2, 0x9b, 0x50, 0xeb, 0x2c, 0xae, 0x1e,
|
||||
0x79, 0x4b, 0x08, 0xe3, 0xe6, 0x1e, 0x54, 0x42, 0x12, 0x0d, 0x43, 0xde, 0x50, 0x8f, 0xd4, 0x73,
|
||||
0xcd, 0x4b, 0x6f, 0xf6, 0x05, 0x18, 0xb7, 0x34, 0x55, 0xb2, 0x31, 0x8d, 0x19, 0xd9, 0x2a, 0xbd,
|
||||
0x06, 0x7d, 0x5d, 0x78, 0x09, 0x65, 0x1c, 0x89, 0xba, 0x6a, 0x6b, 0xdf, 0x91, 0x8c, 0x8a, 0x07,
|
||||
0x08, 0xbd, 0x50, 0xd9, 0x06, 0xe8, 0xf7, 0xdc, 0xe7, 0x09, 0x4b, 0x3d, 0xd9, 0x6d, 0xa8, 0x2f,
|
||||
0x0b, 0xc5, 0xa3, 0x4d, 0x13, 0x4a, 0x81, 0xcf, 0x48, 0xe3, 0x1f, 0x56, 0xf1, 0x6c, 0x7f, 0x6a,
|
||||
0xf0, 0xbf, 0x47, 0x18, 0xf3, 0x87, 0xc4, 0xbc, 0x01, 0x1d, 0x67, 0xf4, 0x27, 0x02, 0x9d, 0x3a,
|
||||
0x3a, 0x71, 0x36, 0x46, 0xe7, 0xc8, 0xc9, 0x74, 0x15, 0xaf, 0x16, 0xc8, 0x49, 0x3d, 0xc0, 0x4e,
|
||||
0x4c, 0xfb, 0x4b, 0x9c, 0x30, 0x86, 0x83, 0xab, 0xad, 0xe6, 0x16, 0x5e, 0x2e, 0xc1, 0xae, 0xe2,
|
||||
0x19, 0x71, 0x2e, 0xd4, 0x1e, 0xd4, 0x73, 0x48, 0x0d, 0x91, 0xa7, 0xc5, 0x16, 0x33, 0xa0, 0x1e,
|
||||
0xe4, 0x71, 0x0c, 0xa3, 0xcb, 0x5e, 0x5c, 0x2a, 0xc4, 0xad, 0x05, 0xbf, 0xc0, 0x31, 0xb9, 0x60,
|
||||
0xde, 0x81, 0x91, 0xe1, 0x52, 0x7b, 0x65, 0xe4, 0x9d, 0xfd, 0xc1, 0xcb, 0xfc, 0xd5, 0xd9, 0x5a,
|
||||
0xa5, 0x53, 0x06, 0x8d, 0x25, 0xa3, 0xce, 0xe3, 0xd7, 0xcc, 0x52, 0xa7, 0x33, 0x4b, 0xfd, 0x99,
|
||||
0x59, 0xea, 0xc7, 0xdc, 0x52, 0xa6, 0x73, 0x4b, 0xf9, 0x9e, 0x5b, 0xca, 0x53, 0x7b, 0x18, 0xf1,
|
||||
0x30, 0x09, 0x9c, 0x01, 0x1d, 0xb9, 0xf2, 0x26, 0xaf, 0x8e, 0xb8, 0xc8, 0xee, 0xc6, 0xff, 0x23,
|
||||
0xa8, 0xe0, 0xc7, 0xab, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5e, 0x59, 0x07, 0xbd, 0x3f, 0x03,
|
||||
0x00, 0x00,
|
||||
var fileDescriptor_19b397c236e0fa07 = []byte{
|
||||
// 368 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0x4d, 0x4f, 0xfa, 0x40,
|
||||
0x10, 0xc6, 0xdb, 0x7f, 0x81, 0x7f, 0x32, 0x50, 0x1a, 0x1b, 0xa3, 0xc4, 0x98, 0x86, 0xd4, 0x97,
|
||||
0xe8, 0xc1, 0x36, 0xc1, 0xa3, 0xc6, 0x03, 0x27, 0x4c, 0x7c, 0x49, 0x4a, 0xbc, 0x78, 0x21, 0x14,
|
||||
0x37, 0x40, 0x94, 0x2e, 0x32, 0xdb, 0x03, 0xdf, 0xc2, 0x2f, 0xe0, 0xf7, 0xf1, 0xc8, 0xd1, 0xa3,
|
||||
0x81, 0x2f, 0x62, 0x98, 0x2d, 0x65, 0x69, 0xb0, 0xb7, 0xdd, 0xe9, 0x33, 0xbf, 0x79, 0xfa, 0x64,
|
||||
0x16, 0xea, 0x82, 0x45, 0x2f, 0x6c, 0x32, 0x1a, 0x46, 0xc2, 0x0f, 0xdf, 0x78, 0xef, 0x15, 0xa7,
|
||||
0x51, 0xcf, 0x17, 0xd3, 0x31, 0x43, 0x6f, 0x3c, 0xe1, 0x82, 0xdb, 0xbb, 0x6b, 0x85, 0x97, 0x2a,
|
||||
0x0e, 0x0e, 0x95, 0x3e, 0x52, 0xcb, 0x6e, 0xd9, 0xe3, 0x9e, 0x42, 0xa5, 0xb9, 0xbc, 0x06, 0xec,
|
||||
0x3d, 0x66, 0x28, 0xec, 0x3d, 0x28, 0x0d, 0xd8, 0xb0, 0x3f, 0x10, 0x35, 0xbd, 0xae, 0x9f, 0x19,
|
||||
0x41, 0x72, 0x73, 0xcf, 0xc1, 0x7a, 0xe0, 0x89, 0x12, 0xc7, 0x3c, 0x42, 0xf6, 0xa7, 0xf4, 0x06,
|
||||
0xcc, 0x4d, 0xe1, 0x05, 0x14, 0x69, 0x24, 0xe9, 0xca, 0x8d, 0x7d, 0x4f, 0xf1, 0x29, 0xfd, 0x4b,
|
||||
0xbd, 0x54, 0xb9, 0x16, 0x98, 0x6d, 0xd1, 0x15, 0x31, 0x26, 0x9e, 0xdc, 0x6b, 0xa8, 0xae, 0x0a,
|
||||
0xf9, 0xa3, 0x6d, 0x1b, 0x0a, 0x61, 0x17, 0x59, 0xed, 0x1f, 0x55, 0xe9, 0xec, 0x7e, 0x1a, 0xf0,
|
||||
0xff, 0x9e, 0x21, 0x76, 0xfb, 0xcc, 0xbe, 0x05, 0x93, 0x66, 0x74, 0x26, 0x12, 0x9d, 0x38, 0x72,
|
||||
0xbd, 0x6d, 0xc9, 0x79, 0x6a, 0x30, 0x2d, 0x2d, 0xa8, 0x84, 0x6a, 0x50, 0x6d, 0xd8, 0x89, 0x78,
|
||||
0x67, 0x45, 0x93, 0xbe, 0x68, 0x6e, 0xb9, 0x71, 0xb2, 0x1d, 0x97, 0xc9, 0xaf, 0xa5, 0x05, 0x56,
|
||||
0x94, 0x89, 0xf4, 0x0e, 0xaa, 0x19, 0xa2, 0x41, 0xc4, 0xa3, 0x5c, 0x83, 0x29, 0xcf, 0x0c, 0xb3,
|
||||
0x34, 0xa4, 0xdc, 0xd2, 0xdf, 0x2d, 0xe4, 0xd1, 0x36, 0x42, 0x5f, 0xd2, 0x50, 0x2d, 0xd8, 0x8f,
|
||||
0x60, 0xa5, 0xb4, 0xc4, 0x5c, 0x91, 0x70, 0xc7, 0xf9, 0xb8, 0xd4, 0x5d, 0x15, 0x37, 0x2a, 0xcd,
|
||||
0x22, 0x18, 0x18, 0x8f, 0x9a, 0x4f, 0x5f, 0x73, 0x47, 0x9f, 0xcd, 0x1d, 0xfd, 0x67, 0xee, 0xe8,
|
||||
0x1f, 0x0b, 0x47, 0x9b, 0x2d, 0x1c, 0xed, 0x7b, 0xe1, 0x68, 0xcf, 0x57, 0xfd, 0xa1, 0x18, 0xc4,
|
||||
0xa1, 0xd7, 0xe3, 0x23, 0x5f, 0x5d, 0xe2, 0xf5, 0x91, 0x76, 0xd8, 0xdf, 0xf6, 0x30, 0xc2, 0x12,
|
||||
0x7d, 0xbb, 0xfc, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xf5, 0x1c, 0xa3, 0x45, 0x37, 0x03, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *BlockRequest) Marshal() (dAtA []byte, err error) {
|
||||
@@ -1,7 +1,7 @@
|
||||
syntax = "proto3";
|
||||
package tendermint.blockchain;
|
||||
package tendermint.blocksync;
|
||||
|
||||
option go_package = "github.com/tendermint/tendermint/proto/tendermint/blockchain";
|
||||
option go_package = "github.com/tendermint/tendermint/proto/tendermint/blocksync";
|
||||
|
||||
import "tendermint/types/block.proto";
|
||||
|
||||
@@ -104,7 +104,7 @@ func (m *NewRoundStep) GetLastCommitRound() int32 {
|
||||
}
|
||||
|
||||
// NewValidBlock is sent when a validator observes a valid block B in some round r,
|
||||
//i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r.
|
||||
// i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r.
|
||||
// In case the block is also committed, then IsCommit flag is set to true.
|
||||
type NewValidBlock struct {
|
||||
Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"`
|
||||
|
||||
@@ -18,7 +18,7 @@ message NewRoundStep {
|
||||
}
|
||||
|
||||
// NewValidBlock is sent when a validator observes a valid block B in some round r,
|
||||
//i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r.
|
||||
// i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r.
|
||||
// In case the block is also committed, then IsCommit flag is set to true.
|
||||
message NewValidBlock {
|
||||
int64 height = 1;
|
||||
|
||||
@@ -199,6 +199,58 @@ func (m *ConsensusParamsInfo) GetLastHeightChanged() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
type ABCIResponsesInfo struct {
|
||||
AbciResponses *ABCIResponses `protobuf:"bytes,1,opt,name=abci_responses,json=abciResponses,proto3" json:"abci_responses,omitempty"`
|
||||
Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ABCIResponsesInfo) Reset() { *m = ABCIResponsesInfo{} }
|
||||
func (m *ABCIResponsesInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*ABCIResponsesInfo) ProtoMessage() {}
|
||||
func (*ABCIResponsesInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ccfacf933f22bf93, []int{3}
|
||||
}
|
||||
func (m *ABCIResponsesInfo) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ABCIResponsesInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ABCIResponsesInfo.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ABCIResponsesInfo) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ABCIResponsesInfo.Merge(m, src)
|
||||
}
|
||||
func (m *ABCIResponsesInfo) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ABCIResponsesInfo) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ABCIResponsesInfo.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ABCIResponsesInfo proto.InternalMessageInfo
|
||||
|
||||
func (m *ABCIResponsesInfo) GetAbciResponses() *ABCIResponses {
|
||||
if m != nil {
|
||||
return m.AbciResponses
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ABCIResponsesInfo) GetHeight() int64 {
|
||||
if m != nil {
|
||||
return m.Height
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type Version struct {
|
||||
Consensus version.Consensus `protobuf:"bytes,1,opt,name=consensus,proto3" json:"consensus"`
|
||||
Software string `protobuf:"bytes,2,opt,name=software,proto3" json:"software,omitempty"`
|
||||
@@ -208,7 +260,7 @@ func (m *Version) Reset() { *m = Version{} }
|
||||
func (m *Version) String() string { return proto.CompactTextString(m) }
|
||||
func (*Version) ProtoMessage() {}
|
||||
func (*Version) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ccfacf933f22bf93, []int{3}
|
||||
return fileDescriptor_ccfacf933f22bf93, []int{4}
|
||||
}
|
||||
func (m *Version) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -284,7 +336,7 @@ func (m *State) Reset() { *m = State{} }
|
||||
func (m *State) String() string { return proto.CompactTextString(m) }
|
||||
func (*State) ProtoMessage() {}
|
||||
func (*State) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ccfacf933f22bf93, []int{4}
|
||||
return fileDescriptor_ccfacf933f22bf93, []int{5}
|
||||
}
|
||||
func (m *State) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -415,6 +467,7 @@ func init() {
|
||||
proto.RegisterType((*ABCIResponses)(nil), "tendermint.state.ABCIResponses")
|
||||
proto.RegisterType((*ValidatorsInfo)(nil), "tendermint.state.ValidatorsInfo")
|
||||
proto.RegisterType((*ConsensusParamsInfo)(nil), "tendermint.state.ConsensusParamsInfo")
|
||||
proto.RegisterType((*ABCIResponsesInfo)(nil), "tendermint.state.ABCIResponsesInfo")
|
||||
proto.RegisterType((*Version)(nil), "tendermint.state.Version")
|
||||
proto.RegisterType((*State)(nil), "tendermint.state.State")
|
||||
}
|
||||
@@ -422,55 +475,58 @@ func init() {
|
||||
func init() { proto.RegisterFile("tendermint/state/types.proto", fileDescriptor_ccfacf933f22bf93) }
|
||||
|
||||
var fileDescriptor_ccfacf933f22bf93 = []byte{
|
||||
// 763 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x6f, 0xd3, 0x30,
|
||||
0x14, 0x6e, 0xe8, 0xb6, 0xb6, 0xce, 0xda, 0x0e, 0x8f, 0x43, 0xd6, 0xb1, 0xb4, 0x2b, 0x3f, 0x34,
|
||||
0x71, 0x48, 0xa5, 0x71, 0x40, 0x5c, 0x26, 0x2d, 0x2d, 0x62, 0x95, 0x26, 0x04, 0xd9, 0xb4, 0x03,
|
||||
0x97, 0xc8, 0x6d, 0xbc, 0x24, 0xa2, 0x4d, 0xa2, 0xd8, 0x2d, 0xe3, 0x0f, 0xe0, 0xbe, 0x2b, 0xff,
|
||||
0xd1, 0x8e, 0x3b, 0x22, 0x0e, 0x03, 0xba, 0x7f, 0x04, 0xd9, 0xce, 0x0f, 0xb7, 0x65, 0xd2, 0x10,
|
||||
0x37, 0xfb, 0x7d, 0xdf, 0xfb, 0xfc, 0xf9, 0xf9, 0x3d, 0x19, 0x3c, 0xa6, 0x38, 0x70, 0x70, 0x3c,
|
||||
0xf6, 0x03, 0xda, 0x21, 0x14, 0x51, 0xdc, 0xa1, 0x5f, 0x22, 0x4c, 0x8c, 0x28, 0x0e, 0x69, 0x08,
|
||||
0x37, 0x72, 0xd4, 0xe0, 0x68, 0xe3, 0x91, 0x1b, 0xba, 0x21, 0x07, 0x3b, 0x6c, 0x25, 0x78, 0x8d,
|
||||
0x6d, 0x49, 0x05, 0x0d, 0x86, 0xbe, 0x2c, 0xd2, 0x90, 0x8f, 0xe0, 0xf1, 0x39, 0xb4, 0xb5, 0x84,
|
||||
0x4e, 0xd1, 0xc8, 0x77, 0x10, 0x0d, 0xe3, 0x84, 0xb1, 0xb3, 0xc4, 0x88, 0x50, 0x8c, 0xc6, 0xa9,
|
||||
0x80, 0x2e, 0xc1, 0x53, 0x1c, 0x13, 0x3f, 0x0c, 0xe6, 0x0e, 0x68, 0xba, 0x61, 0xe8, 0x8e, 0x70,
|
||||
0x87, 0xef, 0x06, 0x93, 0xf3, 0x0e, 0xf5, 0xc7, 0x98, 0x50, 0x34, 0x8e, 0x04, 0xa1, 0xfd, 0x43,
|
||||
0x01, 0xd5, 0x43, 0xb3, 0xdb, 0xb7, 0x30, 0x89, 0xc2, 0x80, 0x60, 0x02, 0xbb, 0x40, 0x75, 0xf0,
|
||||
0xc8, 0x9f, 0xe2, 0xd8, 0xa6, 0x17, 0x44, 0x53, 0x5a, 0xc5, 0x3d, 0x75, 0xbf, 0x6d, 0x48, 0xc5,
|
||||
0x60, 0x97, 0x34, 0xd2, 0x84, 0x9e, 0xe0, 0x9e, 0x5e, 0x58, 0xc0, 0x49, 0x97, 0x04, 0x1e, 0x80,
|
||||
0x0a, 0x0e, 0x1c, 0x7b, 0x30, 0x0a, 0x87, 0x9f, 0xb4, 0x07, 0x2d, 0x65, 0x4f, 0xdd, 0xdf, 0xbd,
|
||||
0x53, 0xe2, 0x4d, 0xe0, 0x98, 0x8c, 0x68, 0x95, 0x71, 0xb2, 0x82, 0x3d, 0xa0, 0x0e, 0xb0, 0xeb,
|
||||
0x07, 0x89, 0x42, 0x91, 0x2b, 0x3c, 0xb9, 0x53, 0xc1, 0x64, 0x5c, 0xa1, 0x01, 0x06, 0xd9, 0xba,
|
||||
0xfd, 0x55, 0x01, 0xb5, 0xb3, 0xb4, 0xa0, 0xa4, 0x1f, 0x9c, 0x87, 0xb0, 0x0b, 0xaa, 0x59, 0x89,
|
||||
0x6d, 0x82, 0xa9, 0xa6, 0x70, 0x69, 0x5d, 0x96, 0x16, 0x05, 0xcc, 0x12, 0x4f, 0x30, 0xb5, 0xd6,
|
||||
0xa7, 0xd2, 0x0e, 0x1a, 0x60, 0x73, 0x84, 0x08, 0xb5, 0x3d, 0xec, 0xbb, 0x1e, 0xb5, 0x87, 0x1e,
|
||||
0x0a, 0x5c, 0xec, 0xf0, 0x7b, 0x16, 0xad, 0x87, 0x0c, 0x3a, 0xe2, 0x48, 0x57, 0x00, 0xed, 0x6f,
|
||||
0x0a, 0xd8, 0xec, 0x32, 0x9f, 0x01, 0x99, 0x90, 0xf7, 0xfc, 0xfd, 0xb8, 0x19, 0x0b, 0x6c, 0x0c,
|
||||
0xd3, 0xb0, 0x2d, 0xde, 0x35, 0xf1, 0xb3, 0xbb, 0xec, 0x67, 0x41, 0xc0, 0x5c, 0xb9, 0xba, 0x69,
|
||||
0x16, 0xac, 0xfa, 0x70, 0x3e, 0xfc, 0xcf, 0xde, 0x3c, 0x50, 0x3a, 0x13, 0x8d, 0x03, 0x0f, 0x41,
|
||||
0x25, 0x53, 0x4b, 0x7c, 0xec, 0xc8, 0x3e, 0x92, 0x06, 0xcb, 0x9d, 0x24, 0x1e, 0xf2, 0x2c, 0xd8,
|
||||
0x00, 0x65, 0x12, 0x9e, 0xd3, 0xcf, 0x28, 0xc6, 0xfc, 0xc8, 0x8a, 0x95, 0xed, 0xdb, 0xbf, 0xd7,
|
||||
0xc0, 0xea, 0x09, 0x9b, 0x23, 0xf8, 0x1a, 0x94, 0x12, 0xad, 0xe4, 0x98, 0x2d, 0x63, 0x71, 0xd6,
|
||||
0x8c, 0xc4, 0x54, 0x72, 0x44, 0xca, 0x87, 0xcf, 0x41, 0x79, 0xe8, 0x21, 0x3f, 0xb0, 0x7d, 0x71,
|
||||
0xa7, 0x8a, 0xa9, 0xce, 0x6e, 0x9a, 0xa5, 0x2e, 0x8b, 0xf5, 0x7b, 0x56, 0x89, 0x83, 0x7d, 0x07,
|
||||
0x3e, 0x03, 0x35, 0x3f, 0xf0, 0xa9, 0x8f, 0x46, 0x49, 0x25, 0xb4, 0x1a, 0xaf, 0x40, 0x35, 0x89,
|
||||
0x8a, 0x22, 0xc0, 0x17, 0x80, 0x97, 0x44, 0xb4, 0x59, 0xca, 0x2c, 0x72, 0x66, 0x9d, 0x01, 0xbc,
|
||||
0x8f, 0x12, 0xae, 0x05, 0xaa, 0x12, 0xd7, 0x77, 0xb4, 0x95, 0x65, 0xef, 0xe2, 0xa9, 0x78, 0x56,
|
||||
0xbf, 0x67, 0x6e, 0x32, 0xef, 0xb3, 0x9b, 0xa6, 0x7a, 0x9c, 0x4a, 0xf5, 0x7b, 0x96, 0x9a, 0xe9,
|
||||
0xf6, 0x1d, 0x78, 0x0c, 0xea, 0x92, 0x26, 0x1b, 0x4e, 0x6d, 0x95, 0xab, 0x36, 0x0c, 0x31, 0xb9,
|
||||
0x46, 0x3a, 0xb9, 0xc6, 0x69, 0x3a, 0xb9, 0x66, 0x99, 0xc9, 0x5e, 0xfe, 0x6c, 0x2a, 0x56, 0x35,
|
||||
0xd3, 0x62, 0x28, 0x7c, 0x0b, 0xea, 0x01, 0xbe, 0xa0, 0x76, 0xd6, 0xac, 0x44, 0x5b, 0xbb, 0x57,
|
||||
0x7b, 0xd7, 0x58, 0x5a, 0x3e, 0x29, 0xf0, 0x00, 0x00, 0x49, 0xa3, 0x74, 0x2f, 0x0d, 0x29, 0x83,
|
||||
0x19, 0xe1, 0xd7, 0x92, 0x44, 0xca, 0xf7, 0x33, 0xc2, 0xd2, 0x24, 0x23, 0x5d, 0xa0, 0xcb, 0xdd,
|
||||
0x9c, 0xeb, 0x65, 0x8d, 0x5d, 0xe1, 0x8f, 0xb5, 0x9d, 0x37, 0x76, 0x9e, 0x9d, 0xb4, 0xf8, 0x5f,
|
||||
0xc7, 0x0c, 0xfc, 0xe7, 0x98, 0xbd, 0x03, 0x4f, 0xe7, 0xc6, 0x6c, 0x41, 0x3f, 0xb3, 0xa7, 0x72,
|
||||
0x7b, 0x2d, 0x69, 0xee, 0xe6, 0x85, 0x52, 0x8f, 0x69, 0x23, 0xc6, 0x98, 0x4c, 0x46, 0x94, 0xd8,
|
||||
0x1e, 0x22, 0x9e, 0xb6, 0xde, 0x52, 0xf6, 0xd6, 0x45, 0x23, 0x5a, 0x22, 0x7e, 0x84, 0x88, 0x07,
|
||||
0xb7, 0x40, 0x19, 0x45, 0x91, 0xa0, 0x54, 0x39, 0xa5, 0x84, 0xa2, 0x88, 0x41, 0xe6, 0x87, 0xab,
|
||||
0x99, 0xae, 0x5c, 0xcf, 0x74, 0xe5, 0xd7, 0x4c, 0x57, 0x2e, 0x6f, 0xf5, 0xc2, 0xf5, 0xad, 0x5e,
|
||||
0xf8, 0x7e, 0xab, 0x17, 0x3e, 0xbe, 0x72, 0x7d, 0xea, 0x4d, 0x06, 0xc6, 0x30, 0x1c, 0x77, 0xe4,
|
||||
0x3f, 0x25, 0x5f, 0x8a, 0x8f, 0x6d, 0xf1, 0x4b, 0x1c, 0xac, 0xf1, 0xf8, 0xcb, 0x3f, 0x01, 0x00,
|
||||
0x00, 0xff, 0xff, 0xa5, 0x17, 0xac, 0x23, 0x2d, 0x07, 0x00, 0x00,
|
||||
// 805 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcd, 0x8e, 0xe3, 0x44,
|
||||
0x10, 0x8e, 0xc9, 0x6e, 0x7e, 0xca, 0x93, 0x64, 0xb7, 0x07, 0x21, 0x6f, 0x96, 0x75, 0xb2, 0xe1,
|
||||
0x47, 0x23, 0x0e, 0x8e, 0xb4, 0x1c, 0x10, 0x97, 0x95, 0xd6, 0x09, 0xb0, 0x91, 0x56, 0x08, 0x3c,
|
||||
0xa3, 0x39, 0x70, 0xb1, 0x3a, 0x71, 0x8f, 0x6d, 0x91, 0xd8, 0x96, 0xbb, 0x13, 0x86, 0x07, 0xe0,
|
||||
0x3e, 0x57, 0xde, 0x68, 0x8e, 0x73, 0x44, 0x1c, 0x06, 0xc8, 0xbc, 0x08, 0xea, 0x1f, 0xdb, 0x9d,
|
||||
0x84, 0x91, 0x06, 0xed, 0xad, 0x5d, 0xf5, 0xd5, 0x57, 0x5f, 0x55, 0x57, 0xb5, 0xe1, 0x63, 0x46,
|
||||
0x92, 0x80, 0xe4, 0xab, 0x38, 0x61, 0x63, 0xca, 0x30, 0x23, 0x63, 0xf6, 0x6b, 0x46, 0xa8, 0x93,
|
||||
0xe5, 0x29, 0x4b, 0xd1, 0x93, 0xca, 0xeb, 0x08, 0x6f, 0xff, 0xc3, 0x30, 0x0d, 0x53, 0xe1, 0x1c,
|
||||
0xf3, 0x93, 0xc4, 0xf5, 0x9f, 0x6b, 0x2c, 0x78, 0xbe, 0x88, 0x75, 0x92, 0xbe, 0x9e, 0x42, 0xd8,
|
||||
0x77, 0xbc, 0xc3, 0x03, 0xef, 0x06, 0x2f, 0xe3, 0x00, 0xb3, 0x34, 0x57, 0x88, 0x17, 0x07, 0x88,
|
||||
0x0c, 0xe7, 0x78, 0x55, 0x10, 0xd8, 0x9a, 0x7b, 0x43, 0x72, 0x1a, 0xa7, 0xc9, 0x4e, 0x82, 0x41,
|
||||
0x98, 0xa6, 0xe1, 0x92, 0x8c, 0xc5, 0xd7, 0x7c, 0x7d, 0x31, 0x66, 0xf1, 0x8a, 0x50, 0x86, 0x57,
|
||||
0x99, 0x04, 0x8c, 0xfe, 0x34, 0xa0, 0xf3, 0xc6, 0x9d, 0xcc, 0x3c, 0x42, 0xb3, 0x34, 0xa1, 0x84,
|
||||
0xa2, 0x09, 0x98, 0x01, 0x59, 0xc6, 0x1b, 0x92, 0xfb, 0xec, 0x92, 0x5a, 0xc6, 0xb0, 0x7e, 0x62,
|
||||
0xbe, 0x1a, 0x39, 0x5a, 0x33, 0x78, 0x91, 0x4e, 0x11, 0x30, 0x95, 0xd8, 0xb3, 0x4b, 0x0f, 0x82,
|
||||
0xe2, 0x48, 0xd1, 0x6b, 0x68, 0x93, 0x24, 0xf0, 0xe7, 0xcb, 0x74, 0xf1, 0xb3, 0xf5, 0xc1, 0xd0,
|
||||
0x38, 0x31, 0x5f, 0xbd, 0xbc, 0x97, 0xe2, 0x9b, 0x24, 0x70, 0x39, 0xd0, 0x6b, 0x11, 0x75, 0x42,
|
||||
0x53, 0x30, 0xe7, 0x24, 0x8c, 0x13, 0xc5, 0x50, 0x17, 0x0c, 0x9f, 0xdc, 0xcb, 0xe0, 0x72, 0xac,
|
||||
0xe4, 0x80, 0x79, 0x79, 0x1e, 0xfd, 0x66, 0x40, 0xf7, 0xbc, 0x68, 0x28, 0x9d, 0x25, 0x17, 0x29,
|
||||
0x9a, 0x40, 0xa7, 0x6c, 0xb1, 0x4f, 0x09, 0xb3, 0x0c, 0x41, 0x6d, 0xeb, 0xd4, 0xb2, 0x81, 0x65,
|
||||
0xe0, 0x29, 0x61, 0xde, 0xd1, 0x46, 0xfb, 0x42, 0x0e, 0x1c, 0x2f, 0x31, 0x65, 0x7e, 0x44, 0xe2,
|
||||
0x30, 0x62, 0xfe, 0x22, 0xc2, 0x49, 0x48, 0x02, 0x51, 0x67, 0xdd, 0x7b, 0xca, 0x5d, 0x6f, 0x85,
|
||||
0x67, 0x22, 0x1d, 0xa3, 0xdf, 0x0d, 0x38, 0x9e, 0x70, 0x9d, 0x09, 0x5d, 0xd3, 0x1f, 0xc4, 0xfd,
|
||||
0x09, 0x31, 0x1e, 0x3c, 0x59, 0x14, 0x66, 0x5f, 0xde, 0xab, 0xd2, 0xf3, 0xf2, 0x50, 0xcf, 0x1e,
|
||||
0x81, 0xfb, 0xe8, 0xfa, 0x76, 0x50, 0xf3, 0x7a, 0x8b, 0x5d, 0xf3, 0xff, 0xd6, 0x46, 0xe1, 0xe9,
|
||||
0xce, 0xfd, 0x0b, 0x61, 0xdf, 0x42, 0x97, 0xf7, 0xd7, 0xcf, 0x0b, 0xab, 0x92, 0x35, 0x70, 0xf6,
|
||||
0x77, 0xc2, 0xd9, 0x09, 0xf6, 0x3a, 0x3c, 0xac, 0x9a, 0xa5, 0x8f, 0xa0, 0x21, 0x75, 0xa8, 0xfc,
|
||||
0xea, 0x6b, 0x14, 0x41, 0xf3, 0x5c, 0x4e, 0x2b, 0x7a, 0x03, 0xed, 0xb2, 0x04, 0x95, 0xe5, 0x85,
|
||||
0x9e, 0x45, 0x4d, 0x75, 0x55, 0xbe, 0x2a, 0xbc, 0x8a, 0x42, 0x7d, 0x68, 0xd1, 0xf4, 0x82, 0xfd,
|
||||
0x82, 0x73, 0x22, 0xf2, 0xb4, 0xbd, 0xf2, 0x7b, 0xf4, 0x4f, 0x03, 0x1e, 0x9f, 0x72, 0xa1, 0xe8,
|
||||
0x6b, 0x68, 0x2a, 0x2e, 0x95, 0xe6, 0xd9, 0x61, 0x31, 0x4a, 0x94, 0x4a, 0x51, 0xe0, 0xd1, 0xe7,
|
||||
0xd0, 0x5a, 0x44, 0x38, 0x4e, 0xfc, 0x58, 0x36, 0xb2, 0xed, 0x9a, 0xdb, 0xdb, 0x41, 0x73, 0xc2,
|
||||
0x6d, 0xb3, 0xa9, 0xd7, 0x14, 0xce, 0x59, 0x80, 0x3e, 0x83, 0x6e, 0x9c, 0xc4, 0x2c, 0xc6, 0x4b,
|
||||
0xd5, 0x7e, 0xab, 0x2b, 0xca, 0xee, 0x28, 0xab, 0xec, 0x3c, 0xfa, 0x02, 0xc4, 0x3d, 0xc8, 0xd9,
|
||||
0x2e, 0x90, 0x75, 0x81, 0xec, 0x71, 0x87, 0x18, 0x5e, 0x85, 0xf5, 0xa0, 0xa3, 0x61, 0xe3, 0xc0,
|
||||
0x7a, 0x74, 0xa8, 0x5d, 0xce, 0x87, 0x88, 0x9a, 0x4d, 0xdd, 0x63, 0xae, 0x7d, 0x7b, 0x3b, 0x30,
|
||||
0xdf, 0x15, 0x54, 0xb3, 0xa9, 0x67, 0x96, 0xbc, 0xb3, 0x00, 0xbd, 0x83, 0x9e, 0xc6, 0xc9, 0x5f,
|
||||
0x04, 0xeb, 0xb1, 0x60, 0xed, 0x3b, 0xf2, 0xb9, 0x70, 0x8a, 0xe7, 0xc2, 0x39, 0x2b, 0x9e, 0x0b,
|
||||
0xb7, 0xc5, 0x69, 0xaf, 0xfe, 0x1a, 0x18, 0x5e, 0xa7, 0xe4, 0xe2, 0x5e, 0xf4, 0x1d, 0xf4, 0x12,
|
||||
0x72, 0xc9, 0xfc, 0x72, 0x43, 0xa8, 0xd5, 0x78, 0xd0, 0x4e, 0x75, 0x79, 0x58, 0xb5, 0x9e, 0xe8,
|
||||
0x35, 0x80, 0xc6, 0xd1, 0x7c, 0x10, 0x87, 0x16, 0xc1, 0x85, 0x88, 0xb2, 0x34, 0x92, 0xd6, 0xc3,
|
||||
0x84, 0xf0, 0x30, 0x4d, 0xc8, 0x04, 0x6c, 0x7d, 0x85, 0x2a, 0xbe, 0x72, 0x9b, 0xda, 0xe2, 0xb2,
|
||||
0x9e, 0x57, 0xdb, 0x54, 0x45, 0xab, 0xbd, 0xfa, 0xcf, 0xdd, 0x86, 0xf7, 0xdc, 0xed, 0xef, 0xe1,
|
||||
0xd3, 0x9d, 0xdd, 0xde, 0xe3, 0x2f, 0xe5, 0x99, 0x42, 0xde, 0x50, 0x5b, 0xf6, 0x5d, 0xa2, 0x42,
|
||||
0x63, 0x31, 0x88, 0x39, 0xa1, 0xeb, 0x25, 0xa3, 0x7e, 0x84, 0x69, 0x64, 0x1d, 0x0d, 0x8d, 0x93,
|
||||
0x23, 0x39, 0x88, 0x9e, 0xb4, 0xbf, 0xc5, 0x34, 0x42, 0xcf, 0xa0, 0x85, 0xb3, 0x4c, 0x42, 0x3a,
|
||||
0x02, 0xd2, 0xc4, 0x59, 0xc6, 0x5d, 0xee, 0x8f, 0xd7, 0x5b, 0xdb, 0xb8, 0xd9, 0xda, 0xc6, 0xdf,
|
||||
0x5b, 0xdb, 0xb8, 0xba, 0xb3, 0x6b, 0x37, 0x77, 0x76, 0xed, 0x8f, 0x3b, 0xbb, 0xf6, 0xd3, 0x57,
|
||||
0x61, 0xcc, 0xa2, 0xf5, 0xdc, 0x59, 0xa4, 0xab, 0xb1, 0xfe, 0x23, 0xab, 0x8e, 0xf2, 0x6f, 0xba,
|
||||
0xff, 0x1f, 0x9e, 0x37, 0x84, 0xfd, 0xcb, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x1a, 0xb9,
|
||||
0x2e, 0xa2, 0x07, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *ABCIResponses) Marshal() (dAtA []byte, err error) {
|
||||
@@ -612,6 +668,46 @@ func (m *ConsensusParamsInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *ABCIResponsesInfo) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *ABCIResponsesInfo) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *ABCIResponsesInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Height != 0 {
|
||||
i = encodeVarintTypes(dAtA, i, uint64(m.Height))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if m.AbciResponses != nil {
|
||||
{
|
||||
size, err := m.AbciResponses.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *Version) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
@@ -747,12 +843,12 @@ func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i--
|
||||
dAtA[i] = 0x32
|
||||
}
|
||||
n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):])
|
||||
if err10 != nil {
|
||||
return 0, err10
|
||||
n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):])
|
||||
if err11 != nil {
|
||||
return 0, err11
|
||||
}
|
||||
i -= n10
|
||||
i = encodeVarintTypes(dAtA, i, uint64(n10))
|
||||
i -= n11
|
||||
i = encodeVarintTypes(dAtA, i, uint64(n11))
|
||||
i--
|
||||
dAtA[i] = 0x2a
|
||||
{
|
||||
@@ -854,6 +950,22 @@ func (m *ConsensusParamsInfo) Size() (n int) {
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ABCIResponsesInfo) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.AbciResponses != nil {
|
||||
l = m.AbciResponses.Size()
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
if m.Height != 0 {
|
||||
n += 1 + sovTypes(uint64(m.Height))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *Version) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
@@ -1291,6 +1403,111 @@ func (m *ConsensusParamsInfo) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *ABCIResponsesInfo) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: ABCIResponsesInfo: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: ABCIResponsesInfo: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field AbciResponses", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.AbciResponses == nil {
|
||||
m.AbciResponses = &ABCIResponses{}
|
||||
}
|
||||
if err := m.AbciResponses.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType)
|
||||
}
|
||||
m.Height = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Height |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTypes(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *Version) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
|
||||
@@ -32,6 +32,11 @@ message ConsensusParamsInfo {
|
||||
int64 last_height_changed = 2;
|
||||
}
|
||||
|
||||
message ABCIResponsesInfo {
|
||||
ABCIResponses abci_responses = 1;
|
||||
int64 height = 2;
|
||||
}
|
||||
|
||||
message Version {
|
||||
tendermint.version.Consensus consensus = 1 [(gogoproto.nullable) = false];
|
||||
string software = 2;
|
||||
|
||||
@@ -17,20 +17,20 @@ message Evidence {
|
||||
|
||||
// DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes.
|
||||
message DuplicateVoteEvidence {
|
||||
tendermint.types.Vote vote_a = 1;
|
||||
tendermint.types.Vote vote_b = 2;
|
||||
int64 total_voting_power = 3;
|
||||
int64 validator_power = 4;
|
||||
google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
|
||||
tendermint.types.Vote vote_a = 1;
|
||||
tendermint.types.Vote vote_b = 2;
|
||||
int64 total_voting_power = 3;
|
||||
int64 validator_power = 4;
|
||||
google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
|
||||
}
|
||||
|
||||
// LightClientAttackEvidence contains evidence of a set of validators attempting to mislead a light client.
|
||||
message LightClientAttackEvidence {
|
||||
tendermint.types.LightBlock conflicting_block = 1;
|
||||
int64 common_height = 2;
|
||||
tendermint.types.LightBlock conflicting_block = 1;
|
||||
int64 common_height = 2;
|
||||
repeated tendermint.types.Validator byzantine_validators = 3;
|
||||
int64 total_voting_power = 4;
|
||||
google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
|
||||
int64 total_voting_power = 4;
|
||||
google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
|
||||
}
|
||||
|
||||
message EvidenceList {
|
||||
|
||||
@@ -106,10 +106,10 @@ message Vote {
|
||||
|
||||
// Commit contains the evidence that a block was committed by a set of validators.
|
||||
message Commit {
|
||||
int64 height = 1;
|
||||
int32 round = 2;
|
||||
BlockID block_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"];
|
||||
repeated CommitSig signatures = 4 [(gogoproto.nullable) = false];
|
||||
int64 height = 1;
|
||||
int32 round = 2;
|
||||
BlockID block_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"];
|
||||
repeated CommitSig signatures = 4 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
// CommitSig is a part of the Vote included in a Commit.
|
||||
|
||||
@@ -53,7 +53,7 @@ func TestAppConns_Failure(t *testing.T) {
|
||||
}()
|
||||
|
||||
quitCh := make(chan struct{})
|
||||
var recvQuitCh <-chan struct{} // nolint:gosimple
|
||||
var recvQuitCh <-chan struct{} //nolint:gosimple
|
||||
recvQuitCh = quitCh
|
||||
|
||||
clientCreatorMock := &mocks.ClientCreator{}
|
||||
|
||||
@@ -39,24 +39,24 @@ the example for more details.
|
||||
|
||||
Example:
|
||||
|
||||
c, err := New("http://192.168.1.10:26657", "/websocket")
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
c, err := New("http://192.168.1.10:26657", "/websocket")
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
|
||||
// call Start/Stop if you're subscribing to events
|
||||
err = c.Start()
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
defer c.Stop()
|
||||
// call Start/Stop if you're subscribing to events
|
||||
err = c.Start()
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
defer c.Stop()
|
||||
|
||||
res, err := c.Status()
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
res, err := c.Status()
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
|
||||
// handle result
|
||||
// handle result
|
||||
*/
|
||||
type HTTP struct {
|
||||
remote string
|
||||
@@ -98,11 +98,9 @@ type baseRPCClient struct {
|
||||
caller jsonrpcclient.Caller
|
||||
}
|
||||
|
||||
var (
|
||||
_ rpcClient = (*HTTP)(nil)
|
||||
_ rpcClient = (*BatchHTTP)(nil)
|
||||
_ rpcClient = (*baseRPCClient)(nil)
|
||||
)
|
||||
var _ rpcClient = (*HTTP)(nil)
|
||||
var _ rpcClient = (*BatchHTTP)(nil)
|
||||
var _ rpcClient = (*baseRPCClient)(nil)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// HTTP
|
||||
@@ -446,31 +444,6 @@ func (c *baseRPCClient) BlockResults(
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (c *baseRPCClient) Header(ctx context.Context, height *int64) (*ctypes.ResultHeader, error) {
|
||||
result := new(ctypes.ResultHeader)
|
||||
params := make(map[string]interface{})
|
||||
if height != nil {
|
||||
params["height"] = height
|
||||
}
|
||||
_, err := c.caller.Call(ctx, "header", params, result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (c *baseRPCClient) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) {
|
||||
result := new(ctypes.ResultHeader)
|
||||
params := map[string]interface{}{
|
||||
"hash": hash,
|
||||
}
|
||||
_, err := c.caller.Call(ctx, "header_by_hash", params, result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) {
|
||||
result := new(ctypes.ResultCommit)
|
||||
params := make(map[string]interface{})
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user