mirror of
https://github.com/tendermint/tendermint.git
synced 2026-02-09 21:40:11 +00:00
add params proto messages
This commit is contained in:
@@ -882,8 +882,11 @@ func (cfg *MempoolConfig) ValidateBasic() error {
|
||||
|
||||
// StateSyncConfig defines the configuration for the Tendermint state sync service
|
||||
type StateSyncConfig struct {
|
||||
Enable bool `mapstructure:"enable"`
|
||||
TempDir string `mapstructure:"temp-dir"`
|
||||
Enable bool `mapstructure:"enable"`
|
||||
TempDir string `mapstructure:"temp-dir"`
|
||||
// Light blocks needed for state verification can be obtained either via
|
||||
// the P2P layer or RPC layer.
|
||||
UseP2P bool `mapstructure:"use-p2p"`
|
||||
RPCServers []string `mapstructure:"rpc-servers"`
|
||||
TrustPeriod time.Duration `mapstructure:"trust-period"`
|
||||
TrustHeight int64 `mapstructure:"trust-height"`
|
||||
@@ -920,17 +923,21 @@ func TestStateSyncConfig() *StateSyncConfig {
|
||||
// ValidateBasic performs basic validation.
|
||||
func (cfg *StateSyncConfig) ValidateBasic() error {
|
||||
if cfg.Enable {
|
||||
if len(cfg.RPCServers) == 0 {
|
||||
return errors.New("rpc-servers is required")
|
||||
}
|
||||
// If we're not using the P2P stack then we need to validate the
|
||||
// RPCServers
|
||||
if !cfg.UseP2P {
|
||||
if len(cfg.RPCServers) == 0 {
|
||||
return errors.New("rpc-servers is required")
|
||||
}
|
||||
|
||||
if len(cfg.RPCServers) < 2 {
|
||||
return errors.New("at least two rpc-servers entries is required")
|
||||
}
|
||||
if len(cfg.RPCServers) < 2 {
|
||||
return errors.New("at least two rpc-servers entries is required")
|
||||
}
|
||||
|
||||
for _, server := range cfg.RPCServers {
|
||||
if len(server) == 0 {
|
||||
return errors.New("found empty rpc-servers entry")
|
||||
for _, server := range cfg.RPCServers {
|
||||
if len(server) == 0 {
|
||||
return errors.New("found empty rpc-servers entry")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -426,15 +426,17 @@ ttl-num-blocks = {{ .Mempool.TTLNumBlocks }}
|
||||
# starting from the height of the snapshot.
|
||||
enable = {{ .StateSync.Enable }}
|
||||
|
||||
# RPC servers (comma-separated) for light client verification of the synced state machine and
|
||||
# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding
|
||||
# header hash obtained from a trusted source, and a period during which validators can be trusted.
|
||||
#
|
||||
# For Cosmos SDK-based chains, trust-period should usually be about 2/3 of the unbonding time (~2
|
||||
# weeks) during which they can be financially punished (slashed) for misbehavior.
|
||||
# State sync can source light blocks needed to verify state either through the P2P layer or via RPC
|
||||
# When using the RPC, at least two (comma-separated) server addresses must be specified
|
||||
use-p2p = {{ .StateSync.UseP2P }}
|
||||
rpc-servers = "{{ StringsJoin .StateSync.RPCServers "," }}"
|
||||
|
||||
# The hash and height of a trusted block. Must be within the trust-period.
|
||||
trust-height = {{ .StateSync.TrustHeight }}
|
||||
trust-hash = "{{ .StateSync.TrustHash }}"
|
||||
|
||||
# For Cosmos SDK-based chains, trust-period should usually be about 2/3 of the unbonding time (~2
|
||||
# weeks) during which they can be financially punished (slashed) for misbehavior.
|
||||
trust-period = "{{ .StateSync.TrustPeriod }}"
|
||||
|
||||
# Time to spend discovering snapshots before initiating a restore.
|
||||
|
||||
@@ -3,7 +3,6 @@ package statesync
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -26,12 +25,19 @@ var (
|
||||
// blocks. NOTE: It is not the responsibility of the dispatcher to verify the
|
||||
// light blocks.
|
||||
type dispatcher struct {
|
||||
// a pool of peers to send light block request too
|
||||
availablePeers *peerlist
|
||||
requestCh chan<- p2p.Envelope
|
||||
timeout time.Duration
|
||||
// timeout for light block delivery (immutable)
|
||||
timeout time.Duration
|
||||
|
||||
mtx sync.Mutex
|
||||
calls map[types.NodeID]chan *types.LightBlock
|
||||
mtx sync.Mutex
|
||||
// the set of providers that the dispatcher is providing for (is distinct
|
||||
// from available peers)
|
||||
providers map[types.NodeID]struct{}
|
||||
// all pending calls that have been dispatched and are awaiting an answer
|
||||
calls map[types.NodeID]chan *types.LightBlock
|
||||
// signals whether the underlying reactor is still running
|
||||
running bool
|
||||
}
|
||||
|
||||
@@ -40,8 +46,8 @@ func newDispatcher(requestCh chan<- p2p.Envelope, timeout time.Duration) *dispat
|
||||
availablePeers: newPeerList(),
|
||||
timeout: timeout,
|
||||
requestCh: requestCh,
|
||||
providers: make(map[types.NodeID]struct{}),
|
||||
calls: make(map[types.NodeID]chan *types.LightBlock),
|
||||
running: true,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,6 +55,10 @@ func newDispatcher(requestCh chan<- p2p.Envelope, timeout time.Duration) *dispat
|
||||
// in a list, tracks the call and waits for the reactor to pass along the response
|
||||
func (d *dispatcher) LightBlock(ctx context.Context, height int64) (*types.LightBlock, types.NodeID, error) {
|
||||
d.mtx.Lock()
|
||||
// check that the dispatcher is connected to the reactor
|
||||
if !d.running {
|
||||
return nil, "", errDisconnected
|
||||
}
|
||||
// check to see that the dispatcher is connected to at least one peer
|
||||
if d.availablePeers.Len() == 0 && len(d.calls) == 0 {
|
||||
d.mtx.Unlock()
|
||||
@@ -59,27 +69,36 @@ func (d *dispatcher) LightBlock(ctx context.Context, height int64) (*types.Light
|
||||
// fetch the next peer id in the list and request a light block from that
|
||||
// peer
|
||||
peer := d.availablePeers.Pop(ctx)
|
||||
defer d.release(peer)
|
||||
lb, err := d.lightBlock(ctx, height, peer)
|
||||
return lb, peer, err
|
||||
}
|
||||
|
||||
// Providers turns the dispatcher into a set of providers (per peer) which can
|
||||
// be used by a light client
|
||||
func (d *dispatcher) Providers(chainID string, timeout time.Duration) []provider.Provider {
|
||||
func (d *dispatcher) Providers(chainID string) []provider.Provider {
|
||||
providers := make([]provider.Provider, d.availablePeers.Len())
|
||||
for i := 0; i < cap(providers); i++ {
|
||||
peer := d.availablePeers.Pop(context.Background())
|
||||
providers[i] = d.CreateProvider(peer, chainID)
|
||||
}
|
||||
return providers
|
||||
}
|
||||
|
||||
// Creates an individual provider from a peer id that the dispatcher is
|
||||
// connected with.
|
||||
func (d *dispatcher) CreateProvider(peer types.NodeID, chainID string) provider.Provider {
|
||||
d.mtx.Lock()
|
||||
defer d.mtx.Unlock()
|
||||
|
||||
providers := make([]provider.Provider, d.availablePeers.Len())
|
||||
peers := d.availablePeers.Peers()
|
||||
for index, peer := range peers {
|
||||
providers[index] = &blockProvider{
|
||||
peer: peer,
|
||||
dispatcher: d,
|
||||
chainID: chainID,
|
||||
timeout: timeout,
|
||||
}
|
||||
d.availablePeers.Remove(peer)
|
||||
d.providers[peer] = struct{}{}
|
||||
return &blockProvider{
|
||||
peer: peer,
|
||||
dispatcher: d,
|
||||
chainID: chainID,
|
||||
timeout: d.timeout,
|
||||
}
|
||||
return providers
|
||||
}
|
||||
|
||||
func (d *dispatcher) stop() {
|
||||
@@ -111,11 +130,9 @@ func (d *dispatcher) lightBlock(ctx context.Context, height int64, peer types.No
|
||||
return resp, nil
|
||||
|
||||
case <-ctx.Done():
|
||||
d.release(peer)
|
||||
return nil, nil
|
||||
return nil, ctx.Err()
|
||||
|
||||
case <-time.After(d.timeout):
|
||||
d.release(peer)
|
||||
return nil, errNoResponse
|
||||
}
|
||||
}
|
||||
@@ -132,10 +149,6 @@ func (d *dispatcher) respond(lb *proto.LightBlock, peer types.NodeID) error {
|
||||
// this can also happen if the response came in after the timeout
|
||||
return errUnsolicitedResponse
|
||||
}
|
||||
// release the peer after returning the response
|
||||
defer d.availablePeers.Append(peer)
|
||||
defer close(answerCh)
|
||||
defer delete(d.calls, peer)
|
||||
|
||||
if lb == nil {
|
||||
answerCh <- nil
|
||||
@@ -144,7 +157,7 @@ func (d *dispatcher) respond(lb *proto.LightBlock, peer types.NodeID) error {
|
||||
|
||||
block, err := types.LightBlockFromProto(lb)
|
||||
if err != nil {
|
||||
fmt.Println("error with converting light block")
|
||||
answerCh <- nil
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -152,20 +165,36 @@ func (d *dispatcher) respond(lb *proto.LightBlock, peer types.NodeID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// addPeer adds a peer to the dispatcher
|
||||
func (d *dispatcher) addPeer(peer types.NodeID) {
|
||||
d.availablePeers.Append(peer)
|
||||
}
|
||||
|
||||
// removePeer removes a peer from the dispatcher
|
||||
func (d *dispatcher) removePeer(peer types.NodeID) {
|
||||
d.mtx.Lock()
|
||||
defer d.mtx.Unlock()
|
||||
if _, ok := d.calls[peer]; ok {
|
||||
if call, ok := d.calls[peer]; ok {
|
||||
call <- nil
|
||||
close(call)
|
||||
delete(d.calls, peer)
|
||||
} else {
|
||||
d.availablePeers.Remove(peer)
|
||||
}
|
||||
}
|
||||
|
||||
// peerCount returns the amount of peers that the dispatcher is connected with
|
||||
func (d *dispatcher) peerCount() int {
|
||||
return d.availablePeers.Len()
|
||||
}
|
||||
|
||||
func (d *dispatcher) isConnected(peer types.NodeID) bool {
|
||||
d.mtx.Lock()
|
||||
defer d.mtx.Unlock()
|
||||
_, ok := d.providers[peer]
|
||||
return ok
|
||||
}
|
||||
|
||||
// dispatch takes a peer and allocates it a channel so long as it's not already
|
||||
// busy and the receiving channel is still running. It then dispatches the message
|
||||
func (d *dispatcher) dispatch(peer types.NodeID, height int64) (chan *types.LightBlock, error) {
|
||||
@@ -223,15 +252,19 @@ type blockProvider struct {
|
||||
}
|
||||
|
||||
func (p *blockProvider) LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) {
|
||||
// FIXME: The provider doesn't know if the dispatcher is still connected to
|
||||
// that peer. If the connection is dropped for whatever reason the
|
||||
// dispatcher needs to be able to relay this back to the provider so it can
|
||||
// return ErrConnectionClosed instead of ErrNoResponse
|
||||
// check if the underlying reactor is still connected with the peer
|
||||
if !p.dispatcher.isConnected(p.peer) {
|
||||
return nil, provider.ErrConnectionClosed
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, p.timeout)
|
||||
defer cancel()
|
||||
lb, _ := p.dispatcher.lightBlock(ctx, height, p.peer)
|
||||
lb, err := p.dispatcher.lightBlock(ctx, height, p.peer)
|
||||
if err != nil {
|
||||
return nil, provider.ErrUnreliableProvider{Reason: err.Error()}
|
||||
}
|
||||
if lb == nil {
|
||||
return nil, provider.ErrNoResponse
|
||||
return nil, provider.ErrLightBlockNotFound
|
||||
}
|
||||
|
||||
if err := lb.ValidateBasic(p.chainID); err != nil {
|
||||
|
||||
@@ -49,6 +49,9 @@ func TestDispatcherBasic(t *testing.T) {
|
||||
}(int64(i))
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// we should finish with as many peers as we started out with
|
||||
assert.Equal(t, 5, d.peerCount())
|
||||
}
|
||||
|
||||
func TestDispatcherReturnsNoBlock(t *testing.T) {
|
||||
@@ -99,7 +102,7 @@ func TestDispatcherReturnsBlockOncePeerAvailable(t *testing.T) {
|
||||
lb, peerResult, err := d.LightBlock(wrapped, 1)
|
||||
require.Nil(t, lb)
|
||||
require.Equal(t, peerFromSet, peerResult)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, context.Canceled, err)
|
||||
|
||||
// calls to dispatcher.Lightblock write into the dispatcher's requestCh.
|
||||
// we read from the requestCh here to unblock the requestCh for future
|
||||
@@ -134,7 +137,7 @@ func TestDispatcherProviders(t *testing.T) {
|
||||
closeCh := make(chan struct{})
|
||||
defer close(closeCh)
|
||||
|
||||
d := newDispatcher(ch, 1*time.Second)
|
||||
d := newDispatcher(ch, 5*time.Second)
|
||||
|
||||
go handleRequests(t, d, ch, closeCh)
|
||||
|
||||
@@ -143,16 +146,17 @@ func TestDispatcherProviders(t *testing.T) {
|
||||
d.addPeer(peer)
|
||||
}
|
||||
|
||||
providers := d.Providers(chainID, 5*time.Second)
|
||||
providers := d.Providers(chainID)
|
||||
require.Len(t, providers, 5)
|
||||
for i, p := range providers {
|
||||
bp, ok := p.(*blockProvider)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, bp.String(), string(peers[i]))
|
||||
assert.Equal(t, string(peers[i]), bp.String(), i)
|
||||
lb, err := p.LightBlock(context.Background(), 10)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, lb)
|
||||
}
|
||||
require.Equal(t, 0, d.peerCount())
|
||||
}
|
||||
|
||||
func TestPeerListBasic(t *testing.T) {
|
||||
@@ -178,13 +182,22 @@ func TestPeerListBasic(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, half, peerList.Len())
|
||||
|
||||
// removing a peer that doesn't exist should not change the list
|
||||
peerList.Remove(types.NodeID("lp"))
|
||||
assert.Equal(t, half, peerList.Len())
|
||||
|
||||
// removing a peer that exists should decrease the list size by one
|
||||
peerList.Remove(peerSet[half])
|
||||
half++
|
||||
assert.Equal(t, peerSet[half], peerList.Pop(ctx))
|
||||
assert.Equal(t, numPeers-half-1, peerList.Len())
|
||||
|
||||
// popping the next peer should work as expected
|
||||
assert.Equal(t, peerSet[half+1], peerList.Pop(ctx))
|
||||
assert.Equal(t, numPeers-half-2, peerList.Len())
|
||||
|
||||
// append the two peers back
|
||||
peerList.Append(peerSet[half])
|
||||
peerList.Append(peerSet[half+1])
|
||||
assert.Equal(t, half, peerList.Len())
|
||||
}
|
||||
|
||||
func TestPeerListBlocksWhenEmpty(t *testing.T) {
|
||||
@@ -277,6 +290,25 @@ func TestPeerListConcurrent(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerListRemove(t *testing.T) {
|
||||
peerList := newPeerList()
|
||||
numPeers := 10
|
||||
|
||||
peerSet := createPeerSet(numPeers)
|
||||
for _, peer := range peerSet {
|
||||
peerList.Append(peer)
|
||||
}
|
||||
|
||||
for _, peer := range peerSet {
|
||||
peerList.Remove(peer)
|
||||
for _, p := range peerList.Peers() {
|
||||
require.NotEqual(t, p, peer)
|
||||
}
|
||||
numPeers--
|
||||
require.Equal(t, numPeers, peerList.Len())
|
||||
}
|
||||
}
|
||||
|
||||
// handleRequests is a helper function usually run in a separate go routine to
|
||||
// imitate the expected responses of the reactor wired to the dispatcher
|
||||
func handleRequests(t *testing.T, d *dispatcher, ch chan p2p.Envelope, closeCh chan struct{}) {
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
package statesync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
state "github.com/tendermint/tendermint/state"
|
||||
)
|
||||
|
||||
// MockSyncReactor is an autogenerated mock type for the SyncReactor type.
|
||||
// Because of the stateprovider uses in Sync(), we use package statesync instead of mocks.
|
||||
type MockSyncReactor struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// Backfill provides a mock function with given fields: _a0
|
||||
func (_m *MockSyncReactor) Backfill(_a0 state.State) error {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(state.State) error); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Sync provides a mock function with given fields: _a0, _a1, _a2
|
||||
func (_m *MockSyncReactor) Sync(_a0 context.Context, _a1 StateProvider, _a2 time.Duration) (state.State, error) {
|
||||
ret := _m.Called(_a0, _a1, _a2)
|
||||
|
||||
var r0 state.State
|
||||
if rf, ok := ret.Get(0).(func(context.Context, StateProvider, time.Duration) state.State); ok {
|
||||
r0 = rf(_a0, _a1, _a2)
|
||||
} else {
|
||||
r0 = ret.Get(0).(state.State)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, StateProvider, time.Duration) error); ok {
|
||||
r1 = rf(_a0, _a1, _a2)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/light"
|
||||
ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
@@ -81,6 +82,9 @@ const (
|
||||
// LightBlockChannel exchanges light blocks
|
||||
LightBlockChannel = p2p.ChannelID(0x62)
|
||||
|
||||
// ParamsChannel exchanges consensus params
|
||||
ParamsChannel = p2p.ChannelID(0x63)
|
||||
|
||||
// recentSnapshots is the number of recent snapshots to send and receive per peer.
|
||||
recentSnapshots = 10
|
||||
|
||||
@@ -102,12 +106,6 @@ const (
|
||||
maxLightBlockRequestRetries = 20
|
||||
)
|
||||
|
||||
// SyncReactor defines an interface used for testing abilities of node.startStateSync.
|
||||
type SyncReactor interface {
|
||||
Sync(context.Context, StateProvider, time.Duration) (sm.State, error)
|
||||
Backfill(sm.State) error
|
||||
}
|
||||
|
||||
// Reactor handles state sync, both restoring snapshots for the local node and
|
||||
// serving snapshots for other nodes.
|
||||
type Reactor struct {
|
||||
@@ -214,13 +212,14 @@ func (r *Reactor) OnStop() {
|
||||
}
|
||||
|
||||
// Sync runs a state sync, fetching snapshots and providing chunks to the
|
||||
// application. It also saves tendermint state and runs a backfill process to
|
||||
// retrieve the necessary amount of headers, commits and validators sets to be
|
||||
// able to process evidence and participate in consensus.
|
||||
// application. At the close of the operation, Sync will bootstrap the state
|
||||
// store and persist the commit at that height so that either consensus or
|
||||
// blocksync can commence. It will then proceed to backfill the necessary amount
|
||||
// of historical blocks before participating in consensus
|
||||
func (r *Reactor) Sync(
|
||||
ctx context.Context,
|
||||
stateProvider StateProvider,
|
||||
discoveryTime time.Duration,
|
||||
chainID string,
|
||||
initialHeight int64,
|
||||
) (sm.State, error) {
|
||||
r.mtx.Lock()
|
||||
if r.syncer != nil {
|
||||
@@ -228,9 +227,31 @@ func (r *Reactor) Sync(
|
||||
return sm.State{}, errors.New("a state sync is already in progress")
|
||||
}
|
||||
|
||||
if stateProvider == nil {
|
||||
r.mtx.Unlock()
|
||||
return sm.State{}, errors.New("the stateProvider should not be nil when doing the state sync")
|
||||
to := light.TrustOptions{
|
||||
Period: r.cfg.TrustPeriod,
|
||||
Height: r.cfg.TrustHeight,
|
||||
Hash: r.cfg.TrustHashBytes(),
|
||||
}
|
||||
spLogger := r.Logger.With("module", "stateprovider")
|
||||
|
||||
var (
|
||||
stateProvider StateProvider
|
||||
err error
|
||||
)
|
||||
if r.cfg.UseP2P {
|
||||
// state provider needs at least two connected peers to initialize
|
||||
spLogger.Info("Generating P2P state provider")
|
||||
r.waitForEnoughPeers(ctx, 2)
|
||||
stateProvider, err = NewP2PStateProvider(ctx, chainID, initialHeight, r.Dispatcher(), to, spLogger)
|
||||
if err != nil {
|
||||
return sm.State{}, err
|
||||
}
|
||||
spLogger.Info("Finished generating P2P state provider")
|
||||
} else {
|
||||
stateProvider, err = NewRPCStateProvider(ctx, chainID, initialHeight, r.cfg.RPCServers, to, spLogger)
|
||||
if err != nil {
|
||||
return sm.State{}, err
|
||||
}
|
||||
}
|
||||
|
||||
r.syncer = newSyncer(
|
||||
@@ -253,7 +274,7 @@ func (r *Reactor) Sync(
|
||||
}
|
||||
}
|
||||
|
||||
state, commit, err := r.syncer.SyncAny(ctx, discoveryTime, requestSnapshotsHook)
|
||||
state, commit, err := r.syncer.SyncAny(ctx, r.cfg.DiscoveryTime, requestSnapshotsHook)
|
||||
if err != nil {
|
||||
return sm.State{}, err
|
||||
}
|
||||
@@ -272,6 +293,11 @@ func (r *Reactor) Sync(
|
||||
return sm.State{}, fmt.Errorf("failed to store last seen commit: %w", err)
|
||||
}
|
||||
|
||||
err = r.Backfill(ctx, state)
|
||||
if err != nil {
|
||||
return sm.State{}, err
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
@@ -279,7 +305,7 @@ func (r *Reactor) Sync(
|
||||
// order. It does not stop verifying blocks until reaching a block with a height
|
||||
// and time that is less or equal to the stopHeight and stopTime. The
|
||||
// trustedBlockID should be of the header at startHeight.
|
||||
func (r *Reactor) Backfill(state sm.State) error {
|
||||
func (r *Reactor) Backfill(ctx context.Context, state sm.State) error {
|
||||
params := state.ConsensusParams.Evidence
|
||||
stopHeight := state.LastBlockHeight - params.MaxAgeNumBlocks
|
||||
stopTime := state.LastBlockTime.Add(-params.MaxAgeDuration)
|
||||
@@ -290,7 +316,7 @@ func (r *Reactor) Backfill(state sm.State) error {
|
||||
stopTime = state.LastBlockTime
|
||||
}
|
||||
return r.backfill(
|
||||
context.Background(),
|
||||
ctx,
|
||||
state.ChainID,
|
||||
state.LastBlockHeight,
|
||||
stopHeight,
|
||||
@@ -732,7 +758,7 @@ func (r *Reactor) processCh(ch *p2p.Channel, chName string) {
|
||||
// processPeerUpdate processes a PeerUpdate, returning an error upon failing to
|
||||
// handle the PeerUpdate or if a panic is recovered.
|
||||
func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
|
||||
r.Logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status)
|
||||
r.Logger.Info("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status)
|
||||
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
@@ -750,6 +776,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
|
||||
}
|
||||
r.dispatcher.removePeer(peerUpdate.NodeID)
|
||||
}
|
||||
r.Logger.Info("processed peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status)
|
||||
}
|
||||
|
||||
// processPeerUpdates initiates a blocking process where we listen for and handle
|
||||
@@ -839,5 +866,17 @@ func (r *Reactor) fetchLightBlock(height uint64) (*types.LightBlock, error) {
|
||||
},
|
||||
ValidatorSet: vals,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *Reactor) waitForEnoughPeers(ctx context.Context, numPeers int) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
if r.dispatcher.peerCount() >= numPeers {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/statesync/mocks"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/light"
|
||||
"github.com/tendermint/tendermint/light/provider"
|
||||
ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
@@ -388,8 +389,9 @@ func TestReactor_Dispatcher(t *testing.T) {
|
||||
go handleLightBlockRequests(t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0)
|
||||
|
||||
dispatcher := rts.reactor.Dispatcher()
|
||||
providers := dispatcher.Providers(factory.DefaultTestChainID, 5*time.Second)
|
||||
providers := dispatcher.Providers(factory.DefaultTestChainID)
|
||||
require.Len(t, providers, 2)
|
||||
require.Equal(t, 2, dispatcher.peerCount())
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
@@ -416,6 +418,41 @@ func TestReactor_Dispatcher(t *testing.T) {
|
||||
t.Fail()
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
t.Log(dispatcher.availablePeers.Peers())
|
||||
require.Equal(t, 2, dispatcher.peerCount())
|
||||
|
||||
rts.peerUpdateCh <- p2p.PeerUpdate{
|
||||
NodeID: types.NodeID("cc"),
|
||||
Status: p2p.PeerStatusUp,
|
||||
}
|
||||
|
||||
require.Equal(t, 3, dispatcher.peerCount())
|
||||
|
||||
// we now test the p2p state provider
|
||||
lb, _, err := dispatcher.LightBlock(ctx, 2)
|
||||
require.NoError(t, err)
|
||||
to := light.TrustOptions{
|
||||
Period: 24 * time.Hour,
|
||||
Height: lb.Height,
|
||||
Hash: lb.Hash(),
|
||||
}
|
||||
|
||||
p2pStateProvider, err := NewP2PStateProvider(ctx, "testchain", 1, rts.reactor.Dispatcher(), to, log.TestingLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
appHash, err := p2pStateProvider.AppHash(ctx, 5)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, appHash, 20)
|
||||
|
||||
state, err := p2pStateProvider.State(ctx, 6)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, appHash, state.AppHash)
|
||||
|
||||
commit, err := p2pStateProvider.Commit(ctx, 5)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, commit.BlockID, state.LastBlockID)
|
||||
|
||||
}
|
||||
|
||||
func TestReactor_Backfill(t *testing.T) {
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
|
||||
//go:generate ../../scripts/mockery_generate.sh StateProvider
|
||||
@@ -33,20 +34,21 @@ type StateProvider interface {
|
||||
State(ctx context.Context, height uint64) (sm.State, error)
|
||||
}
|
||||
|
||||
// lightClientStateProvider is a state provider using the light client.
|
||||
type lightClientStateProvider struct {
|
||||
// stateProvider implements the above interface using a light client to fetch and
|
||||
// verify the AppHash, Commit, and Tendermint State from trusted data in order
|
||||
// to bootstrap a node. The stateProvider can either be initated using RPC or
|
||||
// P2P Providers.
|
||||
type stateProvider struct {
|
||||
tmsync.Mutex // light.Client is not concurrency-safe
|
||||
lc *light.Client
|
||||
version sm.Version
|
||||
initialHeight int64
|
||||
providers map[lightprovider.Provider]string
|
||||
}
|
||||
|
||||
// NewLightClientStateProvider creates a new StateProvider using a light client and RPC clients.
|
||||
func NewLightClientStateProvider(
|
||||
// NewRPCStateProvider creates a new StateProvider using a light client and RPC clients.
|
||||
func NewRPCStateProvider(
|
||||
ctx context.Context,
|
||||
chainID string,
|
||||
version sm.Version,
|
||||
initialHeight int64,
|
||||
servers []string,
|
||||
trustOptions light.TrustOptions,
|
||||
@@ -75,26 +77,24 @@ func NewLightClientStateProvider(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &lightClientStateProvider{
|
||||
return &stateProvider{
|
||||
lc: lc,
|
||||
version: version,
|
||||
initialHeight: initialHeight,
|
||||
providers: providerRemotes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewLightClientStateProviderFromDispatcher creates a light client state
|
||||
// provider but uses a p2p connected dispatched instead of RPC endpoints
|
||||
func NewLightClientStateProviderFromDispatcher(
|
||||
// NewP2PStateProvider creates a light client state
|
||||
// provider but uses a dispatcher connected to the P2P layer
|
||||
func NewP2PStateProvider(
|
||||
ctx context.Context,
|
||||
chainID string,
|
||||
version sm.Version,
|
||||
initialHeight int64,
|
||||
dispatcher *dispatcher,
|
||||
trustOptions light.TrustOptions,
|
||||
logger log.Logger,
|
||||
) (StateProvider, error) {
|
||||
providers := dispatcher.Providers(chainID, 30*time.Second)
|
||||
providers := dispatcher.Providers(chainID)
|
||||
if len(providers) < 2 {
|
||||
return nil, fmt.Errorf("at least 2 peers are required, got %d", len(providers))
|
||||
}
|
||||
@@ -110,16 +110,15 @@ func NewLightClientStateProviderFromDispatcher(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &lightClientStateProvider{
|
||||
return &stateProvider{
|
||||
lc: lc,
|
||||
version: version,
|
||||
initialHeight: initialHeight,
|
||||
providers: providersMap,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AppHash implements StateProvider.
|
||||
func (s *lightClientStateProvider) AppHash(ctx context.Context, height uint64) ([]byte, error) {
|
||||
func (s *stateProvider) AppHash(ctx context.Context, height uint64) ([]byte, error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
@@ -128,7 +127,7 @@ func (s *lightClientStateProvider) AppHash(ctx context.Context, height uint64) (
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// We also try to fetch the blocks at height H and H+2, since we need these
|
||||
// We also try to fetch the blocks at H+2, since we need these
|
||||
// when building the state while restoring the snapshot. This avoids the race
|
||||
// condition where we try to restore a snapshot before H+2 exists.
|
||||
//
|
||||
@@ -140,15 +139,11 @@ func (s *lightClientStateProvider) AppHash(ctx context.Context, height uint64) (
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = s.lc.VerifyLightBlockAtHeight(ctx, int64(height), time.Now())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return header.AppHash, nil
|
||||
}
|
||||
|
||||
// Commit implements StateProvider.
|
||||
func (s *lightClientStateProvider) Commit(ctx context.Context, height uint64) (*types.Commit, error) {
|
||||
func (s *stateProvider) Commit(ctx context.Context, height uint64) (*types.Commit, error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
header, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height), time.Now())
|
||||
@@ -159,13 +154,12 @@ func (s *lightClientStateProvider) Commit(ctx context.Context, height uint64) (*
|
||||
}
|
||||
|
||||
// State implements StateProvider.
|
||||
func (s *lightClientStateProvider) State(ctx context.Context, height uint64) (sm.State, error) {
|
||||
func (s *stateProvider) State(ctx context.Context, height uint64) (sm.State, error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
state := sm.State{
|
||||
ChainID: s.lc.ChainID(),
|
||||
Version: s.version,
|
||||
InitialHeight: s.initialHeight,
|
||||
}
|
||||
if state.InitialHeight == 0 {
|
||||
@@ -193,6 +187,10 @@ func (s *lightClientStateProvider) State(ctx context.Context, height uint64) (sm
|
||||
return sm.State{}, err
|
||||
}
|
||||
|
||||
state.Version = sm.Version{
|
||||
Consensus: currentLightBlock.Version,
|
||||
Software: version.TMVersion,
|
||||
}
|
||||
state.LastBlockHeight = lastLightBlock.Height
|
||||
state.LastBlockTime = lastLightBlock.Time
|
||||
state.LastBlockID = lastLightBlock.Commit.BlockID
|
||||
|
||||
137
node/node.go
137
node/node.go
@@ -29,7 +29,6 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/libs/strings"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
"github.com/tendermint/tendermint/light"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
tmgrpc "github.com/tendermint/tendermint/privval/grpc"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
@@ -652,6 +651,8 @@ func (n *nodeImpl) OnStart() error {
|
||||
}
|
||||
|
||||
// Run state sync
|
||||
// TODO: We shouldn't run state sync if we already have state that has a
|
||||
// LastBlockHeight that is not InitialHeight
|
||||
if n.stateSync {
|
||||
bcR, ok := n.bcReactor.(cs.BlockSyncReactor)
|
||||
if !ok {
|
||||
@@ -664,17 +665,53 @@ func (n *nodeImpl) OnStart() error {
|
||||
return fmt.Errorf("unable to derive state: %w", err)
|
||||
}
|
||||
|
||||
ssc := n.config.StateSync
|
||||
sp, err := constructStateProvider(ssc, state, n.Logger.With("module", "light"))
|
||||
n.Logger.Info("starting state sync...")
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set up light client state provider: %w", err)
|
||||
// TODO: we may want to move these events within the respective
|
||||
// reactors.
|
||||
// at the beginning of the statesync start, we use the initialHeight as the event height
|
||||
// because of the statesync doesn't have the concreate state height before fetched the snapshot.
|
||||
d := types.EventDataStateSyncStatus{Complete: false, Height: state.InitialHeight}
|
||||
if err := n.eventBus.PublishEventStateSyncStatus(d); err != nil {
|
||||
n.eventBus.Logger.Error("failed to emit the statesync start event", "err", err)
|
||||
}
|
||||
|
||||
if err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, sp,
|
||||
ssc, n.config.FastSyncMode, state.InitialHeight, n.eventBus); err != nil {
|
||||
return fmt.Errorf("failed to start state sync: %w", err)
|
||||
}
|
||||
// FIXME: We shouldn't allow state sync to silently error out without
|
||||
// bubbling up the error and gracefully shutting down the rest of the node
|
||||
go func() {
|
||||
state, err := n.stateSyncReactor.Sync(context.TODO(), state.ChainID, state.InitialHeight)
|
||||
if err != nil {
|
||||
n.Logger.Error("state sync failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
n.consensusReactor.SetStateSyncingMetrics(0)
|
||||
|
||||
d := types.EventDataStateSyncStatus{Complete: true, Height: state.LastBlockHeight}
|
||||
if err := n.eventBus.PublishEventStateSyncStatus(d); err != nil {
|
||||
n.eventBus.Logger.Error("failed to emit the statesync start event", "err", err)
|
||||
}
|
||||
|
||||
// TODO: Some form of orchestrator is needed here between the state
|
||||
// advancing reactors to be able to control which one of the three
|
||||
// is running
|
||||
if n.config.FastSyncMode {
|
||||
// FIXME Very ugly to have these metrics bleed through here.
|
||||
n.consensusReactor.SetBlockSyncingMetrics(1)
|
||||
if err := bcR.SwitchToBlockSync(state); err != nil {
|
||||
n.Logger.Error("failed to switch to block sync", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
d := types.EventDataBlockSyncStatus{Complete: false, Height: state.LastBlockHeight}
|
||||
if err := n.eventBus.PublishEventBlockSyncStatus(d); err != nil {
|
||||
n.eventBus.Logger.Error("failed to emit the block sync starting event", "err", err)
|
||||
}
|
||||
|
||||
} else {
|
||||
n.consensusReactor.SwitchToConsensus(state, true)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -1033,67 +1070,6 @@ func (n *nodeImpl) NodeInfo() types.NodeInfo {
|
||||
return n.nodeInfo
|
||||
}
|
||||
|
||||
// startStateSync starts an asynchronous state sync process, then switches to block sync mode.
|
||||
func startStateSync(
|
||||
ssR statesync.SyncReactor,
|
||||
bcR cs.BlockSyncReactor,
|
||||
conR cs.ConsSyncReactor,
|
||||
sp statesync.StateProvider,
|
||||
config *cfg.StateSyncConfig,
|
||||
blockSync bool,
|
||||
stateInitHeight int64,
|
||||
eb *types.EventBus,
|
||||
) error {
|
||||
stateSyncLogger := eb.Logger.With("module", "statesync")
|
||||
|
||||
stateSyncLogger.Info("starting state sync...")
|
||||
|
||||
// at the beginning of the statesync start, we use the initialHeight as the event height
|
||||
// because of the statesync doesn't have the concreate state height before fetched the snapshot.
|
||||
d := types.EventDataStateSyncStatus{Complete: false, Height: stateInitHeight}
|
||||
if err := eb.PublishEventStateSyncStatus(d); err != nil {
|
||||
stateSyncLogger.Error("failed to emit the statesync start event", "err", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
state, err := ssR.Sync(context.TODO(), sp, config.DiscoveryTime)
|
||||
if err != nil {
|
||||
stateSyncLogger.Error("state sync failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := ssR.Backfill(state); err != nil {
|
||||
stateSyncLogger.Error("backfill failed; node has insufficient history to verify all evidence;"+
|
||||
" proceeding optimistically...", "err", err)
|
||||
}
|
||||
|
||||
conR.SetStateSyncingMetrics(0)
|
||||
|
||||
d := types.EventDataStateSyncStatus{Complete: true, Height: state.LastBlockHeight}
|
||||
if err := eb.PublishEventStateSyncStatus(d); err != nil {
|
||||
stateSyncLogger.Error("failed to emit the statesync start event", "err", err)
|
||||
}
|
||||
|
||||
if blockSync {
|
||||
// FIXME Very ugly to have these metrics bleed through here.
|
||||
conR.SetBlockSyncingMetrics(1)
|
||||
if err := bcR.SwitchToBlockSync(state); err != nil {
|
||||
stateSyncLogger.Error("failed to switch to block sync", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
d := types.EventDataBlockSyncStatus{Complete: false, Height: state.LastBlockHeight}
|
||||
if err := eb.PublishEventBlockSyncStatus(d); err != nil {
|
||||
stateSyncLogger.Error("failed to emit the block sync starting event", "err", err)
|
||||
}
|
||||
|
||||
} else {
|
||||
conR.SwitchToConsensus(state, true)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// genesisDocProvider returns a GenesisDoc.
|
||||
// It allows the GenesisDoc to be pulled from sources other than the
|
||||
// filesystem, for instance from a distributed key-value store cluster.
|
||||
@@ -1276,24 +1252,3 @@ func getChannelsFromShim(reactorShim *p2p.ReactorShim) map[p2p.ChannelID]*p2p.Ch
|
||||
|
||||
return channels
|
||||
}
|
||||
|
||||
func constructStateProvider(
|
||||
ssc *cfg.StateSyncConfig,
|
||||
state sm.State,
|
||||
logger log.Logger,
|
||||
) (statesync.StateProvider, error) {
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
to := light.TrustOptions{
|
||||
Period: ssc.TrustPeriod,
|
||||
Height: ssc.TrustHeight,
|
||||
Hash: ssc.TrustHashBytes(),
|
||||
}
|
||||
|
||||
return statesync.NewLightClientStateProvider(
|
||||
ctx,
|
||||
state.ChainID, state.Version, state.InitialHeight,
|
||||
ssc.RPCServers, to, logger,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -21,16 +21,12 @@ import (
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
consmocks "github.com/tendermint/tendermint/internal/consensus/mocks"
|
||||
ssmocks "github.com/tendermint/tendermint/internal/statesync/mocks"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/evidence"
|
||||
"github.com/tendermint/tendermint/internal/mempool"
|
||||
mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0"
|
||||
statesync "github.com/tendermint/tendermint/internal/statesync"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
@@ -656,65 +652,3 @@ func loadStatefromGenesis(t *testing.T) sm.State {
|
||||
|
||||
return state
|
||||
}
|
||||
|
||||
func TestNodeStartStateSync(t *testing.T) {
|
||||
mockSSR := &statesync.MockSyncReactor{}
|
||||
mockFSR := &consmocks.BlockSyncReactor{}
|
||||
mockCSR := &consmocks.ConsSyncReactor{}
|
||||
mockSP := &ssmocks.StateProvider{}
|
||||
state := loadStatefromGenesis(t)
|
||||
config := cfg.ResetTestRoot("load_state_from_genesis")
|
||||
|
||||
eventBus, err := createAndStartEventBus(log.TestingLogger())
|
||||
defer func() {
|
||||
err := eventBus.Stop()
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, eventBus)
|
||||
|
||||
sub, err := eventBus.Subscribe(context.Background(), "test-client", types.EventQueryStateSyncStatus, 10)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sub)
|
||||
|
||||
cfgSS := config.StateSync
|
||||
|
||||
mockSSR.On("Sync", context.TODO(), mockSP, cfgSS.DiscoveryTime).Return(state, nil).
|
||||
On("Backfill", state).Return(nil)
|
||||
mockCSR.On("SetStateSyncingMetrics", float64(0)).Return().
|
||||
On("SwitchToConsensus", state, true).Return()
|
||||
|
||||
require.NoError(t,
|
||||
startStateSync(mockSSR, mockFSR, mockCSR, mockSP, config.StateSync, false, state.InitialHeight, eventBus))
|
||||
|
||||
for cnt := 0; cnt < 2; {
|
||||
select {
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Errorf("StateSyncStatus timeout")
|
||||
case msg := <-sub.Out():
|
||||
if cnt == 0 {
|
||||
ensureStateSyncStatus(t, msg, false, state.InitialHeight)
|
||||
cnt++
|
||||
} else {
|
||||
// the state height = 0 because we are not actually update the state in this test
|
||||
ensureStateSyncStatus(t, msg, true, 0)
|
||||
cnt++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mockSSR.AssertNumberOfCalls(t, "Sync", 1)
|
||||
mockSSR.AssertNumberOfCalls(t, "Backfill", 1)
|
||||
mockCSR.AssertNumberOfCalls(t, "SetStateSyncingMetrics", 1)
|
||||
mockCSR.AssertNumberOfCalls(t, "SwitchToConsensus", 1)
|
||||
}
|
||||
|
||||
func ensureStateSyncStatus(t *testing.T, msg tmpubsub.Message, complete bool, height int64) {
|
||||
t.Helper()
|
||||
status, ok := msg.Data().(types.EventDataStateSyncStatus)
|
||||
|
||||
require.True(t, ok)
|
||||
require.Equal(t, complete, status.Complete)
|
||||
require.Equal(t, height, status.Height)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
fmt "fmt"
|
||||
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Wrap implements the p2p Wrapper interface and wraps a state sync proto message.
|
||||
@@ -28,6 +30,12 @@ func (m *Message) Wrap(pb proto.Message) error {
|
||||
case *LightBlockResponse:
|
||||
m.Sum = &Message_LightBlockResponse{LightBlockResponse: msg}
|
||||
|
||||
case *ParamsRequest:
|
||||
m.Sum = &Message_ParamsRequest{ParamsRequest: msg}
|
||||
|
||||
case *ParamsResponse:
|
||||
m.Sum = &Message_ParamsResponse{ParamsResponse: msg}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown message: %T", msg)
|
||||
}
|
||||
@@ -57,6 +65,12 @@ func (m *Message) Unwrap() (proto.Message, error) {
|
||||
case *Message_LightBlockResponse:
|
||||
return m.GetLightBlockResponse(), nil
|
||||
|
||||
case *Message_ParamsRequest:
|
||||
return m.GetParamsRequest(), nil
|
||||
|
||||
case *Message_ParamsResponse:
|
||||
return m.GetParamsResponse(), nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message: %T", msg)
|
||||
}
|
||||
@@ -106,6 +120,22 @@ func (m *Message) Validate() error {
|
||||
// light block validation handled by the backfill process
|
||||
case *Message_LightBlockResponse:
|
||||
|
||||
case *Message_ParamsRequest:
|
||||
if m.GetParamsRequest().Height == 0 {
|
||||
return errors.New("height cannot be 0")
|
||||
}
|
||||
|
||||
case *Message_ParamsResponse:
|
||||
resp := m.GetParamsResponse()
|
||||
if resp.Height == 0 {
|
||||
return errors.New("height cannot be 0")
|
||||
}
|
||||
|
||||
if resp.ConsensusParams != nil {
|
||||
cp := types.ConsensusParamsFromProto(*resp.ConsensusParams)
|
||||
return cp.ValidateConsensusParams()
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown message type: %T", msg)
|
||||
}
|
||||
|
||||
@@ -161,6 +161,35 @@ func TestStateSyncVectors(t *testing.T) {
|
||||
},
|
||||
"2214080110021803220c697427732061206368756e6b",
|
||||
},
|
||||
{
|
||||
"LightBlockRequest",
|
||||
&ssproto.LightBlockRequest{
|
||||
Height: 100,
|
||||
},
|
||||
"2a020864",
|
||||
},
|
||||
{
|
||||
"LightBlockResponse",
|
||||
&ssproto.LightBlockResponse{
|
||||
LightBlock: nil,
|
||||
},
|
||||
"3200",
|
||||
},
|
||||
{
|
||||
"ParamsRequest",
|
||||
&ssproto.ParamsRequest{
|
||||
Height: 9001,
|
||||
},
|
||||
"3a0308a946",
|
||||
},
|
||||
{
|
||||
"ParamsResponse",
|
||||
&ssproto.ParamsResponse{
|
||||
Height: 9001,
|
||||
ConsensusParams: &tmproto.ConsensusParams{},
|
||||
},
|
||||
"420508a9461200",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
||||
@@ -31,6 +31,8 @@ type Message struct {
|
||||
// *Message_ChunkResponse
|
||||
// *Message_LightBlockRequest
|
||||
// *Message_LightBlockResponse
|
||||
// *Message_ParamsRequest
|
||||
// *Message_ParamsResponse
|
||||
Sum isMessage_Sum `protobuf_oneof:"sum"`
|
||||
}
|
||||
|
||||
@@ -91,6 +93,12 @@ type Message_LightBlockRequest struct {
|
||||
type Message_LightBlockResponse struct {
|
||||
LightBlockResponse *LightBlockResponse `protobuf:"bytes,6,opt,name=light_block_response,json=lightBlockResponse,proto3,oneof" json:"light_block_response,omitempty"`
|
||||
}
|
||||
type Message_ParamsRequest struct {
|
||||
ParamsRequest *ParamsRequest `protobuf:"bytes,7,opt,name=params_request,json=paramsRequest,proto3,oneof" json:"params_request,omitempty"`
|
||||
}
|
||||
type Message_ParamsResponse struct {
|
||||
ParamsResponse *ParamsResponse `protobuf:"bytes,8,opt,name=params_response,json=paramsResponse,proto3,oneof" json:"params_response,omitempty"`
|
||||
}
|
||||
|
||||
func (*Message_SnapshotsRequest) isMessage_Sum() {}
|
||||
func (*Message_SnapshotsResponse) isMessage_Sum() {}
|
||||
@@ -98,6 +106,8 @@ func (*Message_ChunkRequest) isMessage_Sum() {}
|
||||
func (*Message_ChunkResponse) isMessage_Sum() {}
|
||||
func (*Message_LightBlockRequest) isMessage_Sum() {}
|
||||
func (*Message_LightBlockResponse) isMessage_Sum() {}
|
||||
func (*Message_ParamsRequest) isMessage_Sum() {}
|
||||
func (*Message_ParamsResponse) isMessage_Sum() {}
|
||||
|
||||
func (m *Message) GetSum() isMessage_Sum {
|
||||
if m != nil {
|
||||
@@ -148,6 +158,20 @@ func (m *Message) GetLightBlockResponse() *LightBlockResponse {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Message) GetParamsRequest() *ParamsRequest {
|
||||
if x, ok := m.GetSum().(*Message_ParamsRequest); ok {
|
||||
return x.ParamsRequest
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Message) GetParamsResponse() *ParamsResponse {
|
||||
if x, ok := m.GetSum().(*Message_ParamsResponse); ok {
|
||||
return x.ParamsResponse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||
func (*Message) XXX_OneofWrappers() []interface{} {
|
||||
return []interface{}{
|
||||
@@ -157,6 +181,8 @@ func (*Message) XXX_OneofWrappers() []interface{} {
|
||||
(*Message_ChunkResponse)(nil),
|
||||
(*Message_LightBlockRequest)(nil),
|
||||
(*Message_LightBlockResponse)(nil),
|
||||
(*Message_ParamsRequest)(nil),
|
||||
(*Message_ParamsResponse)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -496,6 +522,102 @@ func (m *LightBlockResponse) GetLightBlock() *types.LightBlock {
|
||||
return nil
|
||||
}
|
||||
|
||||
type ParamsRequest struct {
|
||||
Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ParamsRequest) Reset() { *m = ParamsRequest{} }
|
||||
func (m *ParamsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ParamsRequest) ProtoMessage() {}
|
||||
func (*ParamsRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a1c2869546ca7914, []int{7}
|
||||
}
|
||||
func (m *ParamsRequest) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ParamsRequest.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ParamsRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ParamsRequest.Merge(m, src)
|
||||
}
|
||||
func (m *ParamsRequest) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ParamsRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ParamsRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ParamsRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *ParamsRequest) GetHeight() uint64 {
|
||||
if m != nil {
|
||||
return m.Height
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type ParamsResponse struct {
|
||||
Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"`
|
||||
ConsensusParams *types.ConsensusParams `protobuf:"bytes,2,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ParamsResponse) Reset() { *m = ParamsResponse{} }
|
||||
func (m *ParamsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ParamsResponse) ProtoMessage() {}
|
||||
func (*ParamsResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a1c2869546ca7914, []int{8}
|
||||
}
|
||||
func (m *ParamsResponse) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ParamsResponse.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ParamsResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ParamsResponse.Merge(m, src)
|
||||
}
|
||||
func (m *ParamsResponse) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ParamsResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ParamsResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ParamsResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ParamsResponse) GetHeight() uint64 {
|
||||
if m != nil {
|
||||
return m.Height
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ParamsResponse) GetConsensusParams() *types.ConsensusParams {
|
||||
if m != nil {
|
||||
return m.ConsensusParams
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Message)(nil), "tendermint.statesync.Message")
|
||||
proto.RegisterType((*SnapshotsRequest)(nil), "tendermint.statesync.SnapshotsRequest")
|
||||
@@ -504,43 +626,50 @@ func init() {
|
||||
proto.RegisterType((*ChunkResponse)(nil), "tendermint.statesync.ChunkResponse")
|
||||
proto.RegisterType((*LightBlockRequest)(nil), "tendermint.statesync.LightBlockRequest")
|
||||
proto.RegisterType((*LightBlockResponse)(nil), "tendermint.statesync.LightBlockResponse")
|
||||
proto.RegisterType((*ParamsRequest)(nil), "tendermint.statesync.ParamsRequest")
|
||||
proto.RegisterType((*ParamsResponse)(nil), "tendermint.statesync.ParamsResponse")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("tendermint/statesync/types.proto", fileDescriptor_a1c2869546ca7914) }
|
||||
|
||||
var fileDescriptor_a1c2869546ca7914 = []byte{
|
||||
// 485 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x51, 0x6b, 0xd3, 0x50,
|
||||
0x14, 0x4e, 0x5c, 0xdb, 0x8d, 0xb3, 0x46, 0x96, 0x63, 0x91, 0x32, 0x46, 0x18, 0x11, 0x74, 0x20,
|
||||
0xa4, 0xa0, 0x8f, 0xe2, 0x4b, 0x7d, 0x99, 0x30, 0x5f, 0xee, 0x1c, 0xa8, 0x08, 0x23, 0x4d, 0xaf,
|
||||
0x4d, 0xb0, 0x49, 0x6a, 0xcf, 0x2d, 0xb8, 0x1f, 0xe0, 0x93, 0x2f, 0x82, 0x7f, 0xca, 0xc7, 0x3d,
|
||||
0xfa, 0x28, 0xed, 0x1f, 0x91, 0x9c, 0xdc, 0x26, 0x77, 0x6d, 0x5d, 0x11, 0xf6, 0x96, 0xef, 0xeb,
|
||||
0x77, 0x3e, 0xbe, 0x73, 0xcf, 0xe9, 0x81, 0x63, 0x25, 0xb3, 0xa1, 0x9c, 0xa6, 0x49, 0xa6, 0x7a,
|
||||
0xa4, 0x42, 0x25, 0xe9, 0x2a, 0x8b, 0x7a, 0xea, 0x6a, 0x22, 0x29, 0x98, 0x4c, 0x73, 0x95, 0x63,
|
||||
0xa7, 0x56, 0x04, 0x95, 0xe2, 0xf0, 0xc8, 0xa8, 0x63, 0xb5, 0x59, 0xe3, 0xff, 0x6c, 0xc0, 0xee,
|
||||
0x1b, 0x49, 0x14, 0x8e, 0x24, 0x5e, 0x80, 0x4b, 0x59, 0x38, 0xa1, 0x38, 0x57, 0x74, 0x39, 0x95,
|
||||
0x5f, 0x66, 0x92, 0x54, 0xd7, 0x3e, 0xb6, 0x4f, 0xf6, 0x9f, 0x3d, 0x0e, 0x36, 0x79, 0x07, 0xe7,
|
||||
0x4b, 0xb9, 0x28, 0xd5, 0xa7, 0x96, 0x38, 0xa0, 0x15, 0x0e, 0xdf, 0x01, 0x9a, 0xb6, 0x34, 0xc9,
|
||||
0x33, 0x92, 0xdd, 0x7b, 0xec, 0xfb, 0x64, 0xab, 0x6f, 0x29, 0x3f, 0xb5, 0x84, 0x4b, 0xab, 0x24,
|
||||
0xbe, 0x06, 0x27, 0x8a, 0x67, 0xd9, 0xe7, 0x2a, 0xec, 0x0e, 0x9b, 0xfa, 0x9b, 0x4d, 0x5f, 0x15,
|
||||
0xd2, 0x3a, 0x68, 0x3b, 0x32, 0x30, 0x9e, 0xc1, 0xfd, 0xa5, 0x95, 0x0e, 0xd8, 0x60, 0xaf, 0x47,
|
||||
0xb7, 0x7a, 0x55, 0xe1, 0x9c, 0xc8, 0x24, 0xf0, 0x3d, 0x3c, 0x18, 0x27, 0xa3, 0x58, 0x5d, 0x0e,
|
||||
0xc6, 0x79, 0x54, 0xc7, 0x6b, 0xde, 0xd6, 0xf3, 0x59, 0x51, 0xd0, 0x2f, 0xf4, 0x75, 0x46, 0x77,
|
||||
0xbc, 0x4a, 0xe2, 0x47, 0xe8, 0xdc, 0xb4, 0xd6, 0x71, 0x5b, 0xec, 0x7d, 0xb2, 0xdd, 0xbb, 0xca,
|
||||
0x8c, 0xe3, 0x35, 0xb6, 0xdf, 0x84, 0x1d, 0x9a, 0xa5, 0x3e, 0xc2, 0xc1, 0xea, 0x68, 0xfd, 0xef,
|
||||
0x36, 0xb8, 0x6b, 0x73, 0xc1, 0x87, 0xd0, 0x8a, 0x65, 0xe1, 0xc3, 0x8b, 0xd2, 0x10, 0x1a, 0x15,
|
||||
0xfc, 0xa7, 0x7c, 0x9a, 0x86, 0x8a, 0x07, 0xed, 0x08, 0x8d, 0x0a, 0x9e, 0x9f, 0x8a, 0x78, 0x56,
|
||||
0x8e, 0xd0, 0x08, 0x11, 0x1a, 0x71, 0x48, 0x31, 0xbf, 0x7a, 0x5b, 0xf0, 0x37, 0x1e, 0xc2, 0x5e,
|
||||
0x2a, 0x55, 0x38, 0x0c, 0x55, 0xc8, 0x4f, 0xd7, 0x16, 0x15, 0xf6, 0xdf, 0x42, 0xdb, 0x9c, 0xe7,
|
||||
0x7f, 0xe7, 0xe8, 0x40, 0x33, 0xc9, 0x86, 0xf2, 0xab, 0x8e, 0x51, 0x02, 0xff, 0x9b, 0x0d, 0xce,
|
||||
0x8d, 0xd1, 0xde, 0x8d, 0x6f, 0xc1, 0x72, 0x9f, 0xba, 0xbd, 0x12, 0x60, 0x17, 0x76, 0xd3, 0x84,
|
||||
0x28, 0xc9, 0x46, 0xdc, 0xde, 0x9e, 0x58, 0x42, 0xff, 0x29, 0xb8, 0x6b, 0xeb, 0xf0, 0xaf, 0x28,
|
||||
0xfe, 0x39, 0xe0, 0xfa, 0x7c, 0xf1, 0x25, 0xec, 0x1b, 0x7b, 0xa2, 0xff, 0xc6, 0x47, 0xe6, 0x7a,
|
||||
0x94, 0x67, 0xc0, 0x28, 0x85, 0x7a, 0x21, 0xfa, 0x17, 0xbf, 0xe6, 0x9e, 0x7d, 0x3d, 0xf7, 0xec,
|
||||
0x3f, 0x73, 0xcf, 0xfe, 0xb1, 0xf0, 0xac, 0xeb, 0x85, 0x67, 0xfd, 0x5e, 0x78, 0xd6, 0x87, 0x17,
|
||||
0xa3, 0x44, 0xc5, 0xb3, 0x41, 0x10, 0xe5, 0x69, 0xcf, 0x3c, 0x2d, 0xf5, 0x27, 0x5f, 0x96, 0xde,
|
||||
0xa6, 0x73, 0x35, 0x68, 0xf1, 0x6f, 0xcf, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xc1, 0x45, 0x35,
|
||||
0xee, 0xcd, 0x04, 0x00, 0x00,
|
||||
// 573 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x4f, 0x8b, 0xd3, 0x5e,
|
||||
0x14, 0x6d, 0x7e, 0xd3, 0x7f, 0xdc, 0x69, 0x3a, 0xed, 0xfb, 0x15, 0x29, 0x65, 0x0c, 0x63, 0x14,
|
||||
0x67, 0x40, 0x68, 0x41, 0x97, 0xe2, 0xa6, 0xb3, 0x19, 0xa1, 0xa2, 0x64, 0x1c, 0x50, 0x11, 0xca,
|
||||
0x6b, 0xfa, 0x6c, 0x82, 0xcd, 0x1f, 0x7b, 0x5f, 0xc4, 0xf9, 0x00, 0xae, 0xdc, 0xf8, 0x59, 0xfc,
|
||||
0x14, 0x2e, 0x67, 0xe9, 0x52, 0xda, 0x2f, 0x22, 0x79, 0x79, 0x4d, 0x5e, 0x9a, 0xb6, 0x83, 0xe0,
|
||||
0x2e, 0xf7, 0xbc, 0x73, 0x4f, 0xce, 0x7d, 0x39, 0xdc, 0xc0, 0x09, 0x67, 0xfe, 0x94, 0x2d, 0x3c,
|
||||
0xd7, 0xe7, 0x03, 0xe4, 0x94, 0x33, 0xbc, 0xf6, 0xed, 0x01, 0xbf, 0x0e, 0x19, 0xf6, 0xc3, 0x45,
|
||||
0xc0, 0x03, 0xd2, 0xc9, 0x18, 0xfd, 0x94, 0xd1, 0x3b, 0x56, 0xfa, 0x04, 0x5b, 0xed, 0xe9, 0xdd,
|
||||
0x2d, 0x9c, 0x86, 0x74, 0x41, 0x3d, 0x79, 0x6c, 0xfe, 0xa8, 0x40, 0xed, 0x05, 0x43, 0xa4, 0x33,
|
||||
0x46, 0xae, 0xa0, 0x8d, 0x3e, 0x0d, 0xd1, 0x09, 0x38, 0x8e, 0x17, 0xec, 0x53, 0xc4, 0x90, 0x77,
|
||||
0xb5, 0x13, 0xed, 0xec, 0xf0, 0xf1, 0xc3, 0xfe, 0xb6, 0x57, 0xf7, 0x2f, 0xd7, 0x74, 0x2b, 0x61,
|
||||
0x5f, 0x94, 0xac, 0x16, 0x6e, 0x60, 0xe4, 0x0d, 0x10, 0x55, 0x16, 0xc3, 0xc0, 0x47, 0xd6, 0xfd,
|
||||
0x4f, 0xe8, 0x9e, 0xde, 0xaa, 0x9b, 0xd0, 0x2f, 0x4a, 0x56, 0x1b, 0x37, 0x41, 0xf2, 0x1c, 0x74,
|
||||
0xdb, 0x89, 0xfc, 0x8f, 0xa9, 0xd9, 0x03, 0x21, 0x6a, 0x6e, 0x17, 0x3d, 0x8f, 0xa9, 0x99, 0xd1,
|
||||
0x86, 0xad, 0xd4, 0x64, 0x04, 0xcd, 0xb5, 0x94, 0x34, 0x58, 0x16, 0x5a, 0xf7, 0xf7, 0x6a, 0xa5,
|
||||
0xe6, 0x74, 0x5b, 0x05, 0xc8, 0x5b, 0xf8, 0x7f, 0xee, 0xce, 0x1c, 0x3e, 0x9e, 0xcc, 0x03, 0x3b,
|
||||
0xb3, 0x57, 0xd9, 0x37, 0xf3, 0x28, 0x6e, 0x18, 0xc6, 0xfc, 0xcc, 0x63, 0x7b, 0xbe, 0x09, 0x92,
|
||||
0xf7, 0xd0, 0xc9, 0x4b, 0x4b, 0xbb, 0x55, 0xa1, 0x7d, 0x76, 0xbb, 0x76, 0xea, 0x99, 0xcc, 0x0b,
|
||||
0x68, 0x7c, 0x0d, 0x49, 0x3c, 0x52, 0xcf, 0xb5, 0x7d, 0xd7, 0xf0, 0x4a, 0x70, 0x33, 0xbf, 0x7a,
|
||||
0xa8, 0x02, 0xe4, 0x25, 0x1c, 0xa5, 0x6a, 0xd2, 0x66, 0x5d, 0xc8, 0x3d, 0xd8, 0x2f, 0x97, 0x5a,
|
||||
0x6c, 0x86, 0x39, 0x64, 0x58, 0x81, 0x03, 0x8c, 0x3c, 0x93, 0x40, 0x6b, 0x33, 0x79, 0xe6, 0x37,
|
||||
0x0d, 0xda, 0x85, 0xd8, 0x90, 0x3b, 0x50, 0x75, 0x58, 0x3c, 0xa6, 0xc8, 0x71, 0xd9, 0x92, 0x55,
|
||||
0x8c, 0x7f, 0x08, 0x16, 0x1e, 0xe5, 0x22, 0x87, 0xba, 0x25, 0xab, 0x18, 0x17, 0x5f, 0x12, 0x45,
|
||||
0x94, 0x74, 0x4b, 0x56, 0x84, 0x40, 0xd9, 0xa1, 0xe8, 0x88, 0x50, 0x34, 0x2c, 0xf1, 0x4c, 0x7a,
|
||||
0x50, 0xf7, 0x18, 0xa7, 0x53, 0xca, 0xa9, 0xf8, 0xb2, 0x0d, 0x2b, 0xad, 0xcd, 0xd7, 0xd0, 0x50,
|
||||
0xe3, 0xf6, 0xd7, 0x3e, 0x3a, 0x50, 0x71, 0xfd, 0x29, 0xfb, 0x22, 0x6d, 0x24, 0x85, 0xf9, 0x55,
|
||||
0x03, 0x3d, 0x97, 0xbc, 0x7f, 0xa3, 0x1b, 0xa3, 0x62, 0x4e, 0x39, 0x5e, 0x52, 0x90, 0x2e, 0xd4,
|
||||
0x3c, 0x17, 0xd1, 0xf5, 0x67, 0x62, 0xbc, 0xba, 0xb5, 0x2e, 0xcd, 0x47, 0xd0, 0x2e, 0xa4, 0x75,
|
||||
0x97, 0x15, 0xf3, 0x12, 0x48, 0x31, 0x7e, 0xe4, 0x19, 0x1c, 0x2a, 0x31, 0x96, 0x5b, 0xe6, 0x58,
|
||||
0x8d, 0x45, 0xb2, 0xc4, 0x94, 0x56, 0xc8, 0xf2, 0x6a, 0x9e, 0x82, 0x9e, 0xcb, 0xde, 0xce, 0xb7,
|
||||
0x7f, 0x86, 0x66, 0x3e, 0x55, 0x3b, 0xaf, 0x6c, 0x04, 0x2d, 0x3b, 0x26, 0xf8, 0x18, 0xe1, 0x38,
|
||||
0xc9, 0x9d, 0x5c, 0x52, 0xf7, 0x8a, 0xb6, 0xce, 0xd7, 0x4c, 0x29, 0x7e, 0x64, 0xe7, 0x81, 0xe1,
|
||||
0xd5, 0xcf, 0xa5, 0xa1, 0xdd, 0x2c, 0x0d, 0xed, 0xf7, 0xd2, 0xd0, 0xbe, 0xaf, 0x8c, 0xd2, 0xcd,
|
||||
0xca, 0x28, 0xfd, 0x5a, 0x19, 0xa5, 0x77, 0x4f, 0x67, 0x2e, 0x77, 0xa2, 0x49, 0xdf, 0x0e, 0xbc,
|
||||
0x81, 0xba, 0x9b, 0xb3, 0x47, 0xb1, 0x99, 0x07, 0xdb, 0xfe, 0x06, 0x93, 0xaa, 0x38, 0x7b, 0xf2,
|
||||
0x27, 0x00, 0x00, 0xff, 0xff, 0x4d, 0xf6, 0x0e, 0xb3, 0x2c, 0x06, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *Message) Marshal() (dAtA []byte, err error) {
|
||||
@@ -701,6 +830,48 @@ func (m *Message_LightBlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, err
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
func (m *Message_ParamsRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Message_ParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
if m.ParamsRequest != nil {
|
||||
{
|
||||
size, err := m.ParamsRequest.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x3a
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
func (m *Message_ParamsResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Message_ParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
if m.ParamsResponse != nil {
|
||||
{
|
||||
size, err := m.ParamsResponse.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x42
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
func (m *SnapshotsRequest) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
@@ -932,6 +1103,74 @@ func (m *LightBlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *ParamsRequest) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *ParamsRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *ParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Height != 0 {
|
||||
i = encodeVarintTypes(dAtA, i, uint64(m.Height))
|
||||
i--
|
||||
dAtA[i] = 0x8
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *ParamsResponse) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *ParamsResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *ParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.ConsensusParams != nil {
|
||||
{
|
||||
size, err := m.ConsensusParams.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
if m.Height != 0 {
|
||||
i = encodeVarintTypes(dAtA, i, uint64(m.Height))
|
||||
i--
|
||||
dAtA[i] = 0x8
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintTypes(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovTypes(v)
|
||||
base := offset
|
||||
@@ -1027,6 +1266,30 @@ func (m *Message_LightBlockResponse) Size() (n int) {
|
||||
}
|
||||
return n
|
||||
}
|
||||
func (m *Message_ParamsRequest) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.ParamsRequest != nil {
|
||||
l = m.ParamsRequest.Size()
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
func (m *Message_ParamsResponse) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.ParamsResponse != nil {
|
||||
l = m.ParamsResponse.Size()
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
func (m *SnapshotsRequest) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
@@ -1130,6 +1393,34 @@ func (m *LightBlockResponse) Size() (n int) {
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ParamsRequest) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Height != 0 {
|
||||
n += 1 + sovTypes(uint64(m.Height))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ParamsResponse) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Height != 0 {
|
||||
n += 1 + sovTypes(uint64(m.Height))
|
||||
}
|
||||
if m.ConsensusParams != nil {
|
||||
l = m.ConsensusParams.Size()
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovTypes(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
@@ -1375,6 +1666,76 @@ func (m *Message) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
m.Sum = &Message_LightBlockResponse{v}
|
||||
iNdEx = postIndex
|
||||
case 7:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ParamsRequest", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
v := &ParamsRequest{}
|
||||
if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
m.Sum = &Message_ParamsRequest{v}
|
||||
iNdEx = postIndex
|
||||
case 8:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ParamsResponse", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
v := &ParamsResponse{}
|
||||
if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
m.Sum = &Message_ParamsResponse{v}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTypes(dAtA[iNdEx:])
|
||||
@@ -2044,6 +2405,180 @@ func (m *LightBlockResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *ParamsRequest) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: ParamsRequest: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: ParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType)
|
||||
}
|
||||
m.Height = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Height |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTypes(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *ParamsResponse) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: ParamsResponse: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: ParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType)
|
||||
}
|
||||
m.Height = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Height |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.ConsensusParams == nil {
|
||||
m.ConsensusParams = &types.ConsensusParams{}
|
||||
}
|
||||
if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTypes(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipTypes(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
|
||||
@@ -2,6 +2,7 @@ syntax = "proto3";
|
||||
package tendermint.statesync;
|
||||
|
||||
import "tendermint/types/types.proto";
|
||||
import "tendermint/types/params.proto";
|
||||
|
||||
option go_package = "github.com/tendermint/tendermint/proto/tendermint/statesync";
|
||||
|
||||
@@ -13,6 +14,8 @@ message Message {
|
||||
ChunkResponse chunk_response = 4;
|
||||
LightBlockRequest light_block_request = 5;
|
||||
LightBlockResponse light_block_response = 6;
|
||||
ParamsRequest params_request = 7;
|
||||
ParamsResponse params_response = 8;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,4 +49,13 @@ message LightBlockRequest {
|
||||
|
||||
message LightBlockResponse {
|
||||
tendermint.types.LightBlock light_block = 1;
|
||||
}
|
||||
|
||||
message ParamsRequest {
|
||||
uint64 height = 1;
|
||||
}
|
||||
|
||||
message ParamsResponse {
|
||||
uint64 height = 1;
|
||||
tendermint.types.ConsensusParams consensus_params = 2;
|
||||
}
|
||||
Reference in New Issue
Block a user