mirror of
https://github.com/tendermint/tendermint.git
synced 2026-02-13 07:11:13 +00:00
Compare commits
35 Commits
p2p-accept
...
jasmina/44
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4af43afe6d | ||
|
|
6e125e3ea7 | ||
|
|
6252578abe | ||
|
|
3395affe75 | ||
|
|
3a476331f1 | ||
|
|
8b68022d16 | ||
|
|
b1bb88f92f | ||
|
|
753413941f | ||
|
|
70680856d0 | ||
|
|
4b83be646c | ||
|
|
52bc079225 | ||
|
|
a2b7df9516 | ||
|
|
9a8bcba943 | ||
|
|
8ddedd157d | ||
|
|
600f84dae6 | ||
|
|
993c90ce07 | ||
|
|
3654fc9fae | ||
|
|
2a31843f8d | ||
|
|
2cdd71a38f | ||
|
|
d6cbb05105 | ||
|
|
4e6d38030c | ||
|
|
abfc780202 | ||
|
|
18523e0927 | ||
|
|
38d36b59ec | ||
|
|
e227b9ea07 | ||
|
|
1c86cec072 | ||
|
|
361d631eb2 | ||
|
|
bf31f6fdfd | ||
|
|
d22f4c458f | ||
|
|
b4f422e413 | ||
|
|
a6e0813be5 | ||
|
|
60eaa831af | ||
|
|
4ba3892b03 | ||
|
|
07fac4ccc0 | ||
|
|
09f522b249 |
@@ -66,6 +66,13 @@ type BlockRequest struct {
|
||||
PeerID types.NodeID
|
||||
}
|
||||
|
||||
// request the header of a block at a certain height. Used to cross check
|
||||
// the validated blocks with witnesses
|
||||
type HeaderRequest struct {
|
||||
Height int64
|
||||
PeerID types.NodeID
|
||||
}
|
||||
|
||||
// BlockPool keeps track of the block sync peers, block requests and block responses.
|
||||
type BlockPool struct {
|
||||
service.BaseService
|
||||
@@ -76,7 +83,11 @@ type BlockPool struct {
|
||||
mtx sync.RWMutex
|
||||
// block requests
|
||||
requesters map[int64]*bpRequester
|
||||
height int64 // the lowest key in requesters.
|
||||
// witness requesters
|
||||
//TODO we ideally want more than one witness per height
|
||||
witnessRequesters map[int64]*witnessRequester
|
||||
|
||||
height int64 // the lowest key in requesters.
|
||||
// peers
|
||||
peers map[types.NodeID]*bpPeer
|
||||
maxPeerHeight int64 // the biggest reported height
|
||||
@@ -85,7 +96,10 @@ type BlockPool struct {
|
||||
numPending int32 // number of requests pending assignment or block response
|
||||
|
||||
requestsCh chan<- BlockRequest
|
||||
errorsCh chan<- peerError
|
||||
//ToDO We essentially request a header here but reusing the same message
|
||||
// type for now
|
||||
witnessRequestsCh chan<- HeaderRequest
|
||||
errorsCh chan<- peerError
|
||||
|
||||
startHeight int64
|
||||
lastHundredBlockTimeStamp time.Time
|
||||
@@ -99,18 +113,21 @@ func NewBlockPool(
|
||||
start int64,
|
||||
requestsCh chan<- BlockRequest,
|
||||
errorsCh chan<- peerError,
|
||||
witnessRequestCh chan<- HeaderRequest,
|
||||
) *BlockPool {
|
||||
|
||||
bp := &BlockPool{
|
||||
logger: logger,
|
||||
peers: make(map[types.NodeID]*bpPeer),
|
||||
requesters: make(map[int64]*bpRequester),
|
||||
height: start,
|
||||
startHeight: start,
|
||||
numPending: 0,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
lastSyncRate: 0,
|
||||
logger: logger,
|
||||
peers: make(map[types.NodeID]*bpPeer),
|
||||
requesters: make(map[int64]*bpRequester),
|
||||
witnessRequesters: make(map[int64]*witnessRequester),
|
||||
height: start,
|
||||
startHeight: start,
|
||||
numPending: 0,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
witnessRequestsCh: witnessRequestCh,
|
||||
lastSyncRate: 0,
|
||||
}
|
||||
bp.BaseService = *service.NewBaseService(logger, "BlockPool", bp)
|
||||
return bp
|
||||
@@ -218,7 +235,6 @@ func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block)
|
||||
}
|
||||
|
||||
// PopRequest pops the first block at pool.height.
|
||||
// It must have been validated by 'second'.Commit from PeekTwoBlocks().
|
||||
func (pool *BlockPool) PopRequest() {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
@@ -226,12 +242,14 @@ func (pool *BlockPool) PopRequest() {
|
||||
if r := pool.requesters[pool.height]; r != nil {
|
||||
r.Stop()
|
||||
delete(pool.requesters, pool.height)
|
||||
delete(pool.witnessRequesters, pool.height)
|
||||
pool.height++
|
||||
pool.lastAdvance = time.Now()
|
||||
|
||||
// the lastSyncRate will be updated every 100 blocks, it uses the adaptive filter
|
||||
// to smooth the block sync rate and the unit represents the number of blocks per second.
|
||||
if (pool.height-pool.startHeight)%100 == 0 {
|
||||
// -1 because the start height is assumed to be 1 @jmalicevic ToDo, verify it is still OK when
|
||||
// starting height is not 1
|
||||
if (pool.height-pool.startHeight-1)%100 == 0 {
|
||||
newSyncRate := 100 / time.Since(pool.lastHundredBlockTimeStamp).Seconds()
|
||||
if pool.lastSyncRate == 0 {
|
||||
pool.lastSyncRate = newSyncRate
|
||||
@@ -262,6 +280,23 @@ func (pool *BlockPool) RedoRequest(height int64) types.NodeID {
|
||||
return peerID
|
||||
}
|
||||
|
||||
func (pool *BlockPool) AddWitnessHeader(header *types.Header) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
requester := pool.witnessRequesters[header.Height]
|
||||
|
||||
if requester == nil {
|
||||
pool.logger.Error("peer sent us a block we didn't expect")
|
||||
return
|
||||
}
|
||||
requester.SetBlock(header)
|
||||
peer := pool.peers[requester.peerID]
|
||||
if peer != nil {
|
||||
peer.decrPending(header.ToProto().Size())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// AddBlock validates that the block comes from the peer it was expected from and calls the requester to store it.
|
||||
// TODO: ensure that blocks come in order for each peer.
|
||||
func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSize int) {
|
||||
@@ -289,9 +324,11 @@ func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSi
|
||||
peer.decrPending(blockSize)
|
||||
}
|
||||
} else {
|
||||
|
||||
err := errors.New("requester is different or block already exists")
|
||||
pool.logger.Error(err.Error(), "peer", peerID, "requester", requester.getPeerID(), "blockHeight", block.Height)
|
||||
pool.sendError(err, peerID)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -352,8 +389,14 @@ func (pool *BlockPool) removePeer(peerID types.NodeID) {
|
||||
if requester.getPeerID() == peerID {
|
||||
requester.redo(peerID)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for _, requester := range pool.witnessRequesters {
|
||||
if requester.getPeerID() == peerID {
|
||||
requester.redo(peerID)
|
||||
}
|
||||
}
|
||||
peer, ok := pool.peers[peerID]
|
||||
if ok {
|
||||
if peer.timeout != nil {
|
||||
@@ -381,6 +424,28 @@ func (pool *BlockPool) updateMaxPeerHeight() {
|
||||
pool.maxPeerHeight = max
|
||||
}
|
||||
|
||||
func (pool *BlockPool) pickIncrAvailableWitness(height int64) *bpPeer {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
for _, peer := range pool.peers {
|
||||
if peer.didTimeout {
|
||||
pool.removePeer(peer.id)
|
||||
continue
|
||||
}
|
||||
if peer.numPending >= maxPendingRequestsPerPeer {
|
||||
continue
|
||||
}
|
||||
if height < peer.base || height > peer.height || peer.id == pool.witnessRequesters[height].peerID {
|
||||
continue
|
||||
}
|
||||
peer.incrPending()
|
||||
|
||||
return peer
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Pick an available peer with the given height available.
|
||||
// If no peers are available, returns nil.
|
||||
func (pool *BlockPool) pickIncrAvailablePeer(height int64) *bpPeer {
|
||||
@@ -399,6 +464,7 @@ func (pool *BlockPool) pickIncrAvailablePeer(height int64) *bpPeer {
|
||||
continue
|
||||
}
|
||||
peer.incrPending()
|
||||
|
||||
return peer
|
||||
}
|
||||
return nil
|
||||
@@ -414,14 +480,20 @@ func (pool *BlockPool) makeNextRequester(ctx context.Context) {
|
||||
}
|
||||
|
||||
request := newBPRequester(pool.logger, pool, nextHeight)
|
||||
|
||||
witnessRequester := newWitnessRequester(pool.logger, pool, nextHeight)
|
||||
witnessRequester.excludePeerID = request.peerID
|
||||
pool.requesters[nextHeight] = request
|
||||
pool.witnessRequesters[nextHeight] = witnessRequester
|
||||
atomic.AddInt32(&pool.numPending, 1)
|
||||
|
||||
err := request.Start(ctx)
|
||||
if err != nil {
|
||||
request.logger.Error("error starting request", "err", err)
|
||||
}
|
||||
err = witnessRequester.Start(ctx)
|
||||
if err != nil {
|
||||
witnessRequester.logger.Error("error starting witness request", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (pool *BlockPool) requestersLen() int64 {
|
||||
@@ -435,6 +507,13 @@ func (pool *BlockPool) sendRequest(height int64, peerID types.NodeID) {
|
||||
pool.requestsCh <- BlockRequest{height, peerID}
|
||||
}
|
||||
|
||||
func (pool *BlockPool) sendWitnessRequest(height int64, peerID types.NodeID) {
|
||||
if !pool.IsRunning() {
|
||||
return
|
||||
}
|
||||
pool.witnessRequestsCh <- HeaderRequest{height, peerID}
|
||||
}
|
||||
|
||||
func (pool *BlockPool) sendError(err error, peerID types.NodeID) {
|
||||
if !pool.IsRunning() {
|
||||
return
|
||||
@@ -536,6 +615,129 @@ func (peer *bpPeer) onTimeout() {
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type BlockResponse struct {
|
||||
block *types.Block
|
||||
commit *types.Commit
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type witnessRequester struct {
|
||||
service.BaseService
|
||||
peerID types.NodeID
|
||||
header *types.Header
|
||||
height int64
|
||||
getHeaderCh chan struct{}
|
||||
redoCh chan types.NodeID
|
||||
mtx sync.Mutex
|
||||
// ID of peer we have already received this block from
|
||||
excludePeerID types.NodeID
|
||||
pool *BlockPool
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func newWitnessRequester(logger log.Logger, pool *BlockPool, height int64) *witnessRequester {
|
||||
wreq := &witnessRequester{
|
||||
logger: pool.logger,
|
||||
pool: pool,
|
||||
height: height,
|
||||
getHeaderCh: make(chan struct{}, 1),
|
||||
redoCh: make(chan types.NodeID),
|
||||
peerID: "",
|
||||
header: nil,
|
||||
}
|
||||
wreq.BaseService = *service.NewBaseService(logger, "witnessReqester", wreq)
|
||||
return wreq
|
||||
}
|
||||
|
||||
func (wreq *witnessRequester) SetBlock(header *types.Header) bool {
|
||||
wreq.mtx.Lock()
|
||||
if wreq.header != nil { //|| wreq.peerID != peerID {
|
||||
wreq.mtx.Unlock()
|
||||
return false
|
||||
}
|
||||
wreq.header = header
|
||||
wreq.mtx.Unlock()
|
||||
|
||||
select {
|
||||
case wreq.getHeaderCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (wreq *witnessRequester) OnStart(ctx context.Context) error {
|
||||
go wreq.requestRoutine(ctx)
|
||||
return nil
|
||||
}
|
||||
func (*witnessRequester) OnStop() {}
|
||||
|
||||
func (wreq *witnessRequester) getPeerID() types.NodeID {
|
||||
wreq.mtx.Lock()
|
||||
defer wreq.mtx.Unlock()
|
||||
return wreq.peerID
|
||||
}
|
||||
|
||||
func (wreq *witnessRequester) requestRoutine(ctx context.Context) {
|
||||
OUTER_LOOP:
|
||||
for {
|
||||
// Pick a peer to send request to.
|
||||
var peer *bpPeer
|
||||
PICK_PEER_LOOP:
|
||||
for {
|
||||
if !wreq.IsRunning() || !wreq.pool.IsRunning() {
|
||||
return
|
||||
}
|
||||
peer = wreq.pool.pickIncrAvailableWitness(wreq.height)
|
||||
if peer == nil {
|
||||
time.Sleep(requestInterval * time.Millisecond)
|
||||
continue PICK_PEER_LOOP
|
||||
}
|
||||
break PICK_PEER_LOOP
|
||||
}
|
||||
wreq.mtx.Lock()
|
||||
wreq.peerID = peer.id
|
||||
wreq.mtx.Unlock()
|
||||
|
||||
// Send request and wait.
|
||||
wreq.pool.sendWitnessRequest(wreq.height, peer.id)
|
||||
WAIT_LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case peerID := <-wreq.redoCh:
|
||||
if peerID == wreq.peerID {
|
||||
wreq.reset()
|
||||
continue OUTER_LOOP
|
||||
} else {
|
||||
continue WAIT_LOOP
|
||||
}
|
||||
case <-wreq.getHeaderCh:
|
||||
// We got a block!
|
||||
// Continue the for-loop and wait til Quit.
|
||||
continue WAIT_LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (wreq *witnessRequester) redo(peerID types.NodeID) {
|
||||
select {
|
||||
case wreq.redoCh <- peerID:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (wreq *witnessRequester) reset() {
|
||||
wreq.mtx.Lock()
|
||||
defer wreq.mtx.Unlock()
|
||||
wreq.peerID = ""
|
||||
wreq.header = nil
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type bpRequester struct {
|
||||
service.BaseService
|
||||
logger log.Logger
|
||||
|
||||
@@ -86,7 +86,8 @@ func TestBlockPoolBasic(t *testing.T) {
|
||||
peers := makePeers(10, start+1, 1000)
|
||||
errorsCh := make(chan peerError, 1000)
|
||||
requestsCh := make(chan BlockRequest, 1000)
|
||||
pool := NewBlockPool(log.NewNopLogger(), start, requestsCh, errorsCh)
|
||||
witnessRequestCh := make(chan HeaderRequest, 1000)
|
||||
pool := NewBlockPool(log.NewNopLogger(), start, requestsCh, errorsCh, witnessRequestCh)
|
||||
|
||||
if err := pool.Start(ctx); err != nil {
|
||||
t.Error(err)
|
||||
@@ -144,7 +145,8 @@ func TestBlockPoolTimeout(t *testing.T) {
|
||||
peers := makePeers(10, start+1, 1000)
|
||||
errorsCh := make(chan peerError, 1000)
|
||||
requestsCh := make(chan BlockRequest, 1000)
|
||||
pool := NewBlockPool(logger, start, requestsCh, errorsCh)
|
||||
witnessRequestCh := make(chan HeaderRequest, 1000)
|
||||
pool := NewBlockPool(logger, start, requestsCh, errorsCh, witnessRequestCh)
|
||||
err := pool.Start(ctx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
@@ -206,8 +208,8 @@ func TestBlockPoolRemovePeer(t *testing.T) {
|
||||
}
|
||||
requestsCh := make(chan BlockRequest)
|
||||
errorsCh := make(chan peerError)
|
||||
|
||||
pool := NewBlockPool(log.NewNopLogger(), 1, requestsCh, errorsCh)
|
||||
witnessRequestCh := make(chan HeaderRequest, 1000)
|
||||
pool := NewBlockPool(log.NewNopLogger(), 1, requestsCh, errorsCh, witnessRequestCh)
|
||||
err := pool.Start(ctx)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { cancel(); pool.Wait() })
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package blocksync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -86,11 +87,14 @@ type Reactor struct {
|
||||
|
||||
requestsCh <-chan BlockRequest
|
||||
errorsCh <-chan peerError
|
||||
witnessCh <-chan HeaderRequest
|
||||
|
||||
metrics *consensus.Metrics
|
||||
eventBus *eventbus.EventBus
|
||||
|
||||
syncStartTime time.Time
|
||||
|
||||
lastTrustedBlock *BlockResponse
|
||||
}
|
||||
|
||||
// NewReactor returns new reactor instance.
|
||||
@@ -107,16 +111,17 @@ func NewReactor(
|
||||
eventBus *eventbus.EventBus,
|
||||
) *Reactor {
|
||||
r := &Reactor{
|
||||
logger: logger,
|
||||
stateStore: stateStore,
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
consReactor: consReactor,
|
||||
blockSync: newAtomicBool(blockSync),
|
||||
chCreator: channelCreator,
|
||||
peerEvents: peerEvents,
|
||||
metrics: metrics,
|
||||
eventBus: eventBus,
|
||||
logger: logger,
|
||||
stateStore: stateStore,
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
consReactor: consReactor,
|
||||
blockSync: newAtomicBool(blockSync),
|
||||
chCreator: channelCreator,
|
||||
peerEvents: peerEvents,
|
||||
metrics: metrics,
|
||||
eventBus: eventBus,
|
||||
lastTrustedBlock: nil,
|
||||
}
|
||||
|
||||
r.BaseService = *service.NewBaseService(logger, "BlockSync", r)
|
||||
@@ -154,9 +159,11 @@ func (r *Reactor) OnStart(ctx context.Context) error {
|
||||
|
||||
requestsCh := make(chan BlockRequest, maxTotalRequesters)
|
||||
errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count.
|
||||
r.pool = NewBlockPool(r.logger, startHeight, requestsCh, errorsCh)
|
||||
witnessRequestCh := make(chan HeaderRequest, maxTotalRequesters)
|
||||
r.pool = NewBlockPool(r.logger, startHeight, requestsCh, errorsCh, witnessRequestCh)
|
||||
r.requestsCh = requestsCh
|
||||
r.errorsCh = errorsCh
|
||||
r.witnessCh = witnessRequestCh
|
||||
|
||||
if r.blockSync.IsSet() {
|
||||
if err := r.pool.Start(ctx); err != nil {
|
||||
@@ -183,18 +190,33 @@ func (r *Reactor) OnStop() {
|
||||
|
||||
// respondToPeer loads a block and sends it to the requesting peer, if we have it.
|
||||
// Otherwise, we'll respond saying we do not have it.
|
||||
func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest, peerID types.NodeID, blockSyncCh *p2p.Channel) error {
|
||||
block := r.store.LoadBlock(msg.Height)
|
||||
func (r *Reactor) sendHeaderToPeer(ctx context.Context, msg *bcproto.HeaderRequest, peerID types.NodeID, blockSyncCh *p2p.Channel) error {
|
||||
block := r.store.LoadBlockProto(msg.Height)
|
||||
|
||||
if block != nil {
|
||||
blockProto, err := block.ToProto()
|
||||
if err != nil {
|
||||
r.logger.Error("failed to convert msg to protobuf", "err", err)
|
||||
return err
|
||||
}
|
||||
header := &block.Header
|
||||
|
||||
return blockSyncCh.Send(ctx, p2p.Envelope{
|
||||
To: peerID,
|
||||
Message: &bcproto.BlockResponse{Block: blockProto},
|
||||
Message: &bcproto.HeaderResponse{Header: header},
|
||||
})
|
||||
}
|
||||
r.logger.Info("peer requesting a block we do not have", "peer", peerID, "height", msg.Height)
|
||||
|
||||
return blockSyncCh.Send(ctx, p2p.Envelope{
|
||||
To: peerID,
|
||||
Message: &bcproto.NoBlockResponse{Height: msg.Height},
|
||||
})
|
||||
}
|
||||
|
||||
// respondToPeer loads a block and sends it to the requesting peer, if we have it.
|
||||
// Otherwise, we'll respond saying we do not have it.
|
||||
func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest, peerID types.NodeID, blockSyncCh *p2p.Channel) error {
|
||||
block := r.store.LoadBlockProto(msg.Height)
|
||||
if block != nil {
|
||||
return blockSyncCh.Send(ctx, p2p.Envelope{
|
||||
To: peerID,
|
||||
Message: &bcproto.BlockResponse{Block: block},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -226,19 +248,26 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, blo
|
||||
switch envelope.ChannelID {
|
||||
case BlockSyncChannel:
|
||||
switch msg := envelope.Message.(type) {
|
||||
case *bcproto.HeaderRequest:
|
||||
return r.sendHeaderToPeer(ctx, msg, envelope.From, blockSyncCh)
|
||||
case *bcproto.HeaderResponse:
|
||||
header, err := types.HeaderFromProto(msg.Header)
|
||||
if err != nil {
|
||||
r.logger.Error("faled to convert header from proto", "err", err)
|
||||
return err
|
||||
}
|
||||
r.pool.AddWitnessHeader(&header)
|
||||
|
||||
case *bcproto.BlockRequest:
|
||||
return r.respondToPeer(ctx, msg, envelope.From, blockSyncCh)
|
||||
case *bcproto.BlockResponse:
|
||||
block, err := types.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
r.logger.Error("failed to convert block from proto",
|
||||
"peer", envelope.From,
|
||||
"err", err)
|
||||
r.logger.Error("failed to convert block from proto", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
r.pool.AddBlock(envelope.From, block, block.Size())
|
||||
|
||||
case *bcproto.StatusRequest:
|
||||
return blockSyncCh.Send(ctx, p2p.Envelope{
|
||||
To: envelope.From,
|
||||
@@ -377,6 +406,18 @@ func (r *Reactor) requestRoutine(ctx context.Context, blockSyncCh *p2p.Channel)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case wreq := <-r.witnessCh:
|
||||
if err := blockSyncCh.Send(ctx, p2p.Envelope{
|
||||
To: wreq.PeerID,
|
||||
Message: &bcproto.HeaderRequest{Height: wreq.Height},
|
||||
}); err != nil {
|
||||
if err := blockSyncCh.SendError(ctx, p2p.PeerError{
|
||||
NodeID: wreq.PeerID,
|
||||
Err: err,
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
case request := <-r.requestsCh:
|
||||
if err := blockSyncCh.Send(ctx, p2p.Envelope{
|
||||
To: request.PeerID,
|
||||
@@ -406,6 +447,19 @@ func (r *Reactor) requestRoutine(ctx context.Context, blockSyncCh *p2p.Channel)
|
||||
}
|
||||
}
|
||||
}
|
||||
func (r *Reactor) verifyWithWitnesses(newBlock *types.Block) error {
|
||||
if r.pool.witnessRequesters[newBlock.Height] != nil {
|
||||
witnessHeader := r.pool.witnessRequesters[newBlock.Height].header
|
||||
if witnessHeader == nil {
|
||||
r.pool.witnessRequestsCh <- HeaderRequest{Height: newBlock.Height, PeerID: r.pool.witnessRequesters[newBlock.Height].peerID}
|
||||
}
|
||||
if !bytes.Equal(witnessHeader.Hash(), newBlock.Hash()) {
|
||||
r.logger.Error("hashes does not match with witness header")
|
||||
return errors.New("header not matching the header provided by the witness")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// poolRoutine handles messages from the poolReactor telling the reactor what to
|
||||
// do.
|
||||
@@ -417,9 +471,7 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh
|
||||
switchToConsensusTicker = time.NewTicker(switchToConsensusIntervalSeconds * time.Second)
|
||||
|
||||
blocksSynced = uint64(0)
|
||||
|
||||
chainID = r.initialState.ChainID
|
||||
state = r.initialState
|
||||
state = r.initialState
|
||||
|
||||
lastHundred = time.Now()
|
||||
lastRate = 0.0
|
||||
@@ -489,86 +541,147 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh
|
||||
//
|
||||
// TODO: Uncouple from request routine.
|
||||
|
||||
// see if there are any blocks to sync
|
||||
first, second := r.pool.PeekTwoBlocks()
|
||||
if first == nil || second == nil {
|
||||
// we need both to sync the first block
|
||||
newBlock, verifyBlock := r.pool.PeekTwoBlocks()
|
||||
|
||||
if newBlock == nil || verifyBlock == nil {
|
||||
continue
|
||||
} else {
|
||||
// try again quickly next loop
|
||||
didProcessCh <- struct{}{}
|
||||
}
|
||||
|
||||
firstParts, err := first.MakePartSet(types.BlockPartSizeBytes)
|
||||
if err != nil {
|
||||
r.logger.Error("failed to make ",
|
||||
"height", first.Height,
|
||||
"err", err.Error())
|
||||
newBlockParts, err2 := newBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
if err2 != nil {
|
||||
r.logger.Error("failed to make block at ",
|
||||
"height", newBlock.Height,
|
||||
"err", err2.Error())
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
firstPartSetHeader = firstParts.Header()
|
||||
firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
|
||||
newBlockPartSetHeader = newBlockParts.Header()
|
||||
newBlockID = types.BlockID{Hash: newBlock.Hash(), PartSetHeader: newBlockPartSetHeader}
|
||||
)
|
||||
|
||||
// Finally, verify the first block using the second's commit.
|
||||
//
|
||||
// NOTE: We can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
// currently necessary.
|
||||
err = state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit)
|
||||
if r.lastTrustedBlock != nil {
|
||||
err := VerifyNextBlock(newBlock, newBlockID, verifyBlock, r.lastTrustedBlock.block, r.lastTrustedBlock.commit, state.NextValidators)
|
||||
|
||||
if err == nil {
|
||||
// validate the block before we persist it
|
||||
err = r.blockExec.ValidateBlock(ctx, state, first)
|
||||
}
|
||||
if err != nil {
|
||||
r.logger.Error(
|
||||
err.Error(),
|
||||
"last_commit", verifyBlock.LastCommit,
|
||||
"block_id", newBlock,
|
||||
"height", newBlock.Height,
|
||||
)
|
||||
switch err.(type) {
|
||||
case ErrBlockIDDiff:
|
||||
case ErrValidationFailed:
|
||||
peerID := r.pool.RedoRequest(r.lastTrustedBlock.block.Height + 1)
|
||||
if serr := blockSyncCh.SendError(ctx, p2p.PeerError{
|
||||
NodeID: peerID,
|
||||
Err: errors.New("invalid block for verification"),
|
||||
}); serr != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// If either of the checks failed we log the error and request for a new block
|
||||
// at that height
|
||||
if err != nil {
|
||||
r.logger.Error(
|
||||
err.Error(),
|
||||
"last_commit", second.LastCommit,
|
||||
"block_id", firstID,
|
||||
"height", first.Height,
|
||||
)
|
||||
case ErrInvalidVerifyBlock:
|
||||
r.logger.Error(
|
||||
err.Error(),
|
||||
"last_commit", verifyBlock.LastCommit,
|
||||
"verify_block_id", newBlockID,
|
||||
"verify_block_height", newBlock.Height,
|
||||
)
|
||||
peerID := r.pool.RedoRequest(r.lastTrustedBlock.block.Height + 2)
|
||||
if serr := blockSyncCh.SendError(ctx, p2p.PeerError{
|
||||
NodeID: peerID,
|
||||
Err: err,
|
||||
}); serr != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// NOTE: We've already removed the peer's request, but we still need
|
||||
// to clean up the rest.
|
||||
peerID := r.pool.RedoRequest(first.Height)
|
||||
if serr := blockSyncCh.SendError(ctx, p2p.PeerError{
|
||||
NodeID: peerID,
|
||||
Err: err,
|
||||
}); serr != nil {
|
||||
return
|
||||
}
|
||||
continue // was return previously
|
||||
}
|
||||
|
||||
peerID2 := r.pool.RedoRequest(second.Height)
|
||||
if peerID2 != peerID {
|
||||
// // Finally, verify the first block using the second's commit.
|
||||
// //
|
||||
// // NOTE: We can probably make this more efficient, but note that calling
|
||||
// // first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
// // currently necessary.
|
||||
// err = state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit)
|
||||
r.lastTrustedBlock.block = newBlock
|
||||
r.lastTrustedBlock.commit = verifyBlock.LastCommit
|
||||
} else {
|
||||
// we need to load the last block we trusted
|
||||
if r.initialState.LastBlockHeight != 0 {
|
||||
r.lastTrustedBlock = &BlockResponse{r.store.LoadBlock(r.initialState.LastBlockHeight), r.store.LoadBlockCommit(r.initialState.LastBlockHeight)}
|
||||
if r.lastTrustedBlock == nil {
|
||||
panic("Failed to load last trusted block")
|
||||
}
|
||||
}
|
||||
oldHash := r.initialState.Validators.Hash()
|
||||
if !bytes.Equal(oldHash, newBlock.ValidatorsHash) {
|
||||
r.logger.Error("The validator set provided by the new block does not match the expected validator set",
|
||||
"initial hash ", r.initialState.Validators.Hash(),
|
||||
"new hash ", newBlock.ValidatorsHash,
|
||||
)
|
||||
|
||||
peerID := r.pool.RedoRequest(r.lastTrustedBlock.block.Height + 1)
|
||||
if serr := blockSyncCh.SendError(ctx, p2p.PeerError{
|
||||
NodeID: peerID2,
|
||||
Err: err,
|
||||
NodeID: peerID,
|
||||
Err: ErrValidationFailed{},
|
||||
}); serr != nil {
|
||||
return
|
||||
}
|
||||
continue // was return previously
|
||||
}
|
||||
return
|
||||
r.lastTrustedBlock = &BlockResponse{}
|
||||
}
|
||||
// if err := r.verifyWithWitnesses(newBlock); err != nil {
|
||||
// r.logger.Debug("Witness verificatio nfailed")
|
||||
// }
|
||||
var err error
|
||||
// validate the block before we persist it
|
||||
err = r.blockExec.ValidateBlock(ctx, state, newBlock)
|
||||
if err != nil {
|
||||
r.logger.Error("The validator set provided by the new block does not match the expected validator set",
|
||||
"initial hash ", r.initialState.Validators.Hash(),
|
||||
"new hash ", newBlock.ValidatorsHash,
|
||||
)
|
||||
|
||||
peerID := r.pool.RedoRequest(r.lastTrustedBlock.block.Height + 1)
|
||||
if serr := blockSyncCh.SendError(ctx, p2p.PeerError{
|
||||
NodeID: peerID,
|
||||
Err: ErrValidationFailed{},
|
||||
}); serr != nil {
|
||||
return
|
||||
}
|
||||
continue // was return previously
|
||||
}
|
||||
r.pool.PopRequest()
|
||||
|
||||
// TODO: batch saves so we do not persist to disk every block
|
||||
r.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
r.store.SaveBlock(newBlock, newBlockParts, verifyBlock.LastCommit)
|
||||
|
||||
// TODO: Same thing for app - but we would need a way to get the hash
|
||||
// without persisting the state.
|
||||
state, err = r.blockExec.ApplyBlock(ctx, state, firstID, first)
|
||||
state, err = r.blockExec.ApplyBlock(ctx, state, newBlockID, newBlock)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", newBlock.Height, newBlock.Hash(), err))
|
||||
}
|
||||
|
||||
r.metrics.RecordConsMetrics(first)
|
||||
if err != nil {
|
||||
// TODO: This is bad, are we zombie?
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", newBlock.Height, newBlock.Hash(), err))
|
||||
}
|
||||
|
||||
r.lastTrustedBlock.block = newBlock
|
||||
r.lastTrustedBlock.commit = r.store.LoadSeenCommit()
|
||||
|
||||
r.metrics.RecordConsMetrics(newBlock)
|
||||
|
||||
blocksSynced++
|
||||
|
||||
@@ -583,6 +696,7 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh
|
||||
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ func setup(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVal types.PrivValidator,
|
||||
privValArray []types.PrivValidator,
|
||||
maxBlockHeights []int64,
|
||||
) *reactorTestSuite {
|
||||
t.Helper()
|
||||
@@ -75,10 +75,14 @@ func setup(
|
||||
chDesc := &p2p.ChannelDescriptor{ID: BlockSyncChannel, MessageType: new(bcproto.Message)}
|
||||
rts.blockSyncChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc)
|
||||
|
||||
i := 0
|
||||
for nodeID := range rts.network.Nodes {
|
||||
rts.addNode(ctx, t, nodeID, genDoc, privVal, maxBlockHeights[i])
|
||||
i++
|
||||
if maxBlockHeights[1] != 0 {
|
||||
rts.addMultipleNodes(ctx, t, rts.network.NodeIDs(), genDoc, privValArray, maxBlockHeights, 0)
|
||||
} else {
|
||||
i := 0
|
||||
for nodeID := range rts.network.Nodes {
|
||||
rts.addNode(ctx, t, nodeID, genDoc, privValArray, maxBlockHeights[i])
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
@@ -97,12 +101,151 @@ func setup(
|
||||
return rts
|
||||
}
|
||||
|
||||
// We add multiple nodes with varying initial heights
|
||||
// Allows us to test whether block sync works when a node
|
||||
// has previous state
|
||||
// maxBlockHeightPerNode - the heights for which the node already has state
|
||||
// maxBlockHeightIdx - the index of the node with maximum height
|
||||
func (rts *reactorTestSuite) addMultipleNodes(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
nodeIDs []types.NodeID,
|
||||
genDoc *types.GenesisDoc,
|
||||
privValArray []types.PrivValidator,
|
||||
maxBlockHeightPerNode []int64,
|
||||
maxBlockHeightIdx int64,
|
||||
|
||||
) {
|
||||
t.Helper()
|
||||
|
||||
logger := log.NewNopLogger()
|
||||
blockDB := make([]*dbm.MemDB, len(nodeIDs))
|
||||
stateDB := make([]*dbm.MemDB, len(nodeIDs))
|
||||
blockExecutors := make([]*sm.BlockExecutor, len(nodeIDs))
|
||||
blockStores := make([]*store.BlockStore, len(nodeIDs))
|
||||
stateStores := make([]sm.Store, len(nodeIDs))
|
||||
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
|
||||
for idx, nodeID := range nodeIDs {
|
||||
rts.nodes = append(rts.nodes, nodeID)
|
||||
rts.app[nodeID] = proxy.New(abciclient.NewLocalClient(logger, &abci.BaseApplication{}), logger, proxy.NopMetrics())
|
||||
require.NoError(t, rts.app[nodeID].Start(ctx))
|
||||
stateDB[idx] = dbm.NewMemDB()
|
||||
stateStores[idx] = sm.NewStore(stateDB[idx])
|
||||
|
||||
blockDB[idx] = dbm.NewMemDB()
|
||||
blockStores[idx] = store.NewBlockStore(blockDB[idx])
|
||||
|
||||
require.NoError(t, stateStores[idx].Save(state))
|
||||
mp := &mpmocks.Mempool{}
|
||||
mp.On("Lock").Return()
|
||||
mp.On("Unlock").Return()
|
||||
mp.On("FlushAppConn", mock.Anything).Return(nil)
|
||||
mp.On("Update",
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything).Return(nil)
|
||||
|
||||
eventbus := eventbus.NewDefault(logger)
|
||||
require.NoError(t, eventbus.Start(ctx))
|
||||
|
||||
blockExecutors[idx] = sm.NewBlockExecutor(stateStores[idx],
|
||||
log.NewNopLogger(),
|
||||
rts.app[nodeID],
|
||||
mp,
|
||||
sm.EmptyEvidencePool{},
|
||||
blockStores[idx],
|
||||
eventbus,
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
}
|
||||
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeightPerNode[maxBlockHeightIdx]; blockHeight++ {
|
||||
lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil)
|
||||
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStores[maxBlockHeightIdx].LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStores[maxBlockHeightIdx].LoadBlock(blockHeight - 1)
|
||||
|
||||
commitSigs := make([]types.CommitSig, len(privValArray))
|
||||
votes := make([]types.Vote, len(privValArray))
|
||||
for i, val := range privValArray {
|
||||
|
||||
vote, err := factory.MakeVote(
|
||||
ctx,
|
||||
val,
|
||||
lastBlock.Header.ChainID, 0,
|
||||
lastBlock.Header.Height, 0, 2,
|
||||
lastBlockMeta.BlockID,
|
||||
time.Now(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
votes[i] = *vote
|
||||
commitSigs[i] = vote.CommitSig()
|
||||
|
||||
}
|
||||
lastCommit = types.NewCommit(
|
||||
votes[0].Height,
|
||||
votes[0].Round,
|
||||
lastBlockMeta.BlockID,
|
||||
commitSigs,
|
||||
)
|
||||
|
||||
}
|
||||
thisBlock := sf.MakeBlock(state, blockHeight, lastCommit)
|
||||
thisParts, err := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
require.NoError(t, err)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
for idx := range nodeIDs {
|
||||
|
||||
if blockHeight <= maxBlockHeightPerNode[idx] {
|
||||
lastState, err := stateStores[idx].Load()
|
||||
require.NoError(t, err)
|
||||
state, err = blockExecutors[idx].ApplyBlock(ctx, lastState, blockID, thisBlock)
|
||||
require.NoError(t, err)
|
||||
blockStores[idx].SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for idx, nodeID := range nodeIDs {
|
||||
rts.peerChans[nodeID] = make(chan p2p.PeerUpdate)
|
||||
rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1)
|
||||
rts.network.Nodes[nodeID].PeerManager.Register(ctx, rts.peerUpdates[nodeID])
|
||||
|
||||
chCreator := func(ctx context.Context, chdesc *p2p.ChannelDescriptor) (*p2p.Channel, error) {
|
||||
return rts.blockSyncChannels[nodeID], nil
|
||||
}
|
||||
rts.reactors[nodeID] = NewReactor(
|
||||
rts.logger.With("nodeID", nodeID),
|
||||
stateStores[idx],
|
||||
blockExecutors[idx],
|
||||
blockStores[idx],
|
||||
nil,
|
||||
chCreator,
|
||||
func(ctx context.Context) *p2p.PeerUpdates { return rts.peerUpdates[nodeID] },
|
||||
rts.blockSync,
|
||||
consensus.NopMetrics(),
|
||||
nil, // eventbus, can be nil
|
||||
)
|
||||
|
||||
require.NoError(t, rts.reactors[nodeID].Start(ctx))
|
||||
require.True(t, rts.reactors[nodeID].IsRunning())
|
||||
}
|
||||
}
|
||||
|
||||
func (rts *reactorTestSuite) addNode(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
nodeID types.NodeID,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVal types.PrivValidator,
|
||||
privValArray []types.PrivValidator,
|
||||
maxBlockHeight int64,
|
||||
) {
|
||||
t.Helper()
|
||||
@@ -154,21 +297,28 @@ func (rts *reactorTestSuite) addNode(
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
|
||||
vote, err := factory.MakeVote(
|
||||
ctx,
|
||||
privVal,
|
||||
lastBlock.Header.ChainID, 0,
|
||||
lastBlock.Header.Height, 0, 2,
|
||||
lastBlockMeta.BlockID,
|
||||
time.Now(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
commitSigs := make([]types.CommitSig, len(privValArray))
|
||||
votes := make([]types.Vote, len(privValArray))
|
||||
for i, val := range privValArray {
|
||||
vote, err := factory.MakeVote(
|
||||
ctx,
|
||||
val,
|
||||
lastBlock.Header.ChainID, 0,
|
||||
lastBlock.Header.Height, 0, 2,
|
||||
lastBlockMeta.BlockID,
|
||||
time.Now(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
votes[i] = *vote
|
||||
commitSigs[i] = vote.CommitSig()
|
||||
}
|
||||
lastCommit = types.NewCommit(
|
||||
vote.Height,
|
||||
vote.Round,
|
||||
votes[0].Height,
|
||||
votes[0].Round,
|
||||
lastBlockMeta.BlockID,
|
||||
[]types.CommitSig{vote.CommitSig()},
|
||||
commitSigs,
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
thisBlock := sf.MakeBlock(state, blockHeight, lastCommit)
|
||||
@@ -227,7 +377,7 @@ func TestReactor_AbruptDisconnect(t *testing.T) {
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, factory.ConsensusParams())
|
||||
maxBlockHeight := int64(64)
|
||||
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0})
|
||||
rts := setup(ctx, t, genDoc, privVals, []int64{maxBlockHeight, 0})
|
||||
|
||||
require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height())
|
||||
|
||||
@@ -255,6 +405,51 @@ func TestReactor_AbruptDisconnect(t *testing.T) {
|
||||
rts.network.Nodes[rts.nodes[1]].PeerManager.Disconnected(ctx, rts.nodes[0])
|
||||
}
|
||||
|
||||
//@jmalicevic ToDO
|
||||
// a) Add tests that verify whether faulty peer is properly detected
|
||||
// 1. block at H + 1 is faulty
|
||||
// 2. block at H + 2 is faulty (the validator set does not match)
|
||||
// b) Add test to verify we replace a peer with a new one if we detect misbehavior
|
||||
func TestReactor_NonGenesisSync(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
cfg, err := config.ResetTestRoot(t.TempDir(), "block_sync_reactor_test")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
|
||||
valSet, privVals := factory.ValidatorSet(ctx, t, 4, 30)
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, factory.ConsensusParams())
|
||||
maxBlockHeight := int64(101)
|
||||
|
||||
rts := setup(ctx, t, genDoc, privVals, []int64{maxBlockHeight, 2, 0}) //50, 4, 0})
|
||||
require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height())
|
||||
rts.start(ctx, t)
|
||||
|
||||
require.Eventually(
|
||||
t,
|
||||
func() bool {
|
||||
matching := true
|
||||
for idx := range rts.nodes {
|
||||
if idx == 0 {
|
||||
continue
|
||||
}
|
||||
matching = matching && rts.reactors[rts.nodes[idx]].GetRemainingSyncTime() > time.Nanosecond &&
|
||||
rts.reactors[rts.nodes[idx]].pool.getLastSyncRate() > 0.001
|
||||
|
||||
if !matching {
|
||||
height, _, _ := rts.reactors[rts.nodes[idx]].pool.GetStatus()
|
||||
t.Logf("%d %d %s %f", height, idx, rts.reactors[rts.nodes[idx]].GetRemainingSyncTime(), rts.reactors[rts.nodes[idx]].pool.getLastSyncRate())
|
||||
}
|
||||
}
|
||||
return matching
|
||||
},
|
||||
10*time.Second,
|
||||
10*time.Millisecond,
|
||||
"expected node to be partially synced",
|
||||
)
|
||||
}
|
||||
|
||||
func TestReactor_SyncTime(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@@ -263,11 +458,11 @@ func TestReactor_SyncTime(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
|
||||
valSet, privVals := factory.ValidatorSet(ctx, t, 1, 30)
|
||||
valSet, privVals := factory.ValidatorSet(ctx, t, 4, 30)
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, factory.ConsensusParams())
|
||||
maxBlockHeight := int64(101)
|
||||
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0})
|
||||
rts := setup(ctx, t, genDoc, privVals, []int64{maxBlockHeight, 0})
|
||||
require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height())
|
||||
rts.start(ctx, t)
|
||||
|
||||
@@ -295,7 +490,7 @@ func TestReactor_NoBlockResponse(t *testing.T) {
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, factory.ConsensusParams())
|
||||
maxBlockHeight := int64(65)
|
||||
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0})
|
||||
rts := setup(ctx, t, genDoc, privVals, []int64{maxBlockHeight, 0})
|
||||
|
||||
require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height())
|
||||
|
||||
@@ -347,7 +542,7 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) {
|
||||
valSet, privVals := factory.ValidatorSet(ctx, t, 1, 30)
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, factory.ConsensusParams())
|
||||
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0, 0, 0, 0})
|
||||
rts := setup(ctx, t, genDoc, privVals, []int64{maxBlockHeight, 0, 0, 0, 0})
|
||||
|
||||
require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height())
|
||||
|
||||
@@ -385,7 +580,7 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) {
|
||||
MaxPeers: uint16(len(rts.nodes) + 1),
|
||||
MaxConnected: uint16(len(rts.nodes) + 1),
|
||||
})
|
||||
rts.addNode(ctx, t, newNode.NodeID, otherGenDoc, otherPrivVals[0], maxBlockHeight)
|
||||
rts.addNode(ctx, t, newNode.NodeID, otherGenDoc, otherPrivVals, maxBlockHeight)
|
||||
|
||||
// add a fake peer just so we do not wait for the consensus ticker to timeout
|
||||
rts.reactors[newNode.NodeID].pool.SetPeerRange("00ff", 10, 10)
|
||||
|
||||
112
internal/blocksync/verify.go
Normal file
112
internal/blocksync/verify.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package blocksync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/light"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func VerifyAdjacent(
|
||||
trustedHeader *types.SignedHeader, // height=X
|
||||
untrustedHeader *types.SignedHeader, // height=X+1
|
||||
untrustedVals *types.ValidatorSet, // height=X+1)
|
||||
) error {
|
||||
|
||||
if len(trustedHeader.NextValidatorsHash) == 0 {
|
||||
return errors.New("next validators hash in trusted header is empty")
|
||||
}
|
||||
|
||||
if untrustedHeader.Height != trustedHeader.Height+1 {
|
||||
return errors.New("headers must be adjacent in height")
|
||||
}
|
||||
|
||||
if err := untrustedHeader.ValidateBasic(trustedHeader.ChainID); err != nil {
|
||||
return fmt.Errorf("untrustedHeader.ValidateBasic failed: %w", err)
|
||||
}
|
||||
|
||||
if untrustedHeader.Height <= trustedHeader.Height {
|
||||
return fmt.Errorf("expected new header height %d to be greater than one of old header %d",
|
||||
untrustedHeader.Height,
|
||||
trustedHeader.Height)
|
||||
}
|
||||
|
||||
if !untrustedHeader.Time.After(trustedHeader.Time) {
|
||||
return fmt.Errorf("expected new header time %v to be after old header time %v",
|
||||
untrustedHeader.Time,
|
||||
trustedHeader.Time)
|
||||
}
|
||||
|
||||
if !bytes.Equal(untrustedHeader.ValidatorsHash, untrustedVals.Hash()) {
|
||||
return fmt.Errorf("expected new header validators (%X) to match those that were supplied (%X) at height %d",
|
||||
untrustedHeader.ValidatorsHash,
|
||||
untrustedVals.Hash(),
|
||||
untrustedHeader.Height,
|
||||
)
|
||||
}
|
||||
|
||||
// Check the validator hashes are the same
|
||||
if !bytes.Equal(untrustedHeader.ValidatorsHash, trustedHeader.NextValidatorsHash) {
|
||||
err := fmt.Errorf("expected old header's next validators (%X) to match those from new header (%X)",
|
||||
trustedHeader.NextValidatorsHash,
|
||||
untrustedHeader.ValidatorsHash,
|
||||
)
|
||||
return light.ErrInvalidHeader{Reason: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ErrBlockIDDiff struct {
|
||||
Reason error
|
||||
}
|
||||
|
||||
func (e ErrBlockIDDiff) Error() string {
|
||||
return "block ID in lastCommit of new block is not matching trusted block ID"
|
||||
}
|
||||
|
||||
type ErrInvalidVerifyBlock struct {
|
||||
Reason error
|
||||
}
|
||||
|
||||
func (e ErrInvalidVerifyBlock) Error() string {
|
||||
return "last commit of invalid block used to verify new block"
|
||||
}
|
||||
|
||||
type ErrValidationFailed struct {
|
||||
Reason error
|
||||
}
|
||||
|
||||
func (e ErrValidationFailed) Error() string {
|
||||
return "failed to verify next block"
|
||||
}
|
||||
|
||||
func VerifyNextBlock(newBlock *types.Block, newBlockID types.BlockID, verifyBlock *types.Block, trustedBlock *types.Block,
|
||||
trustedCommit *types.Commit, validators *types.ValidatorSet) error {
|
||||
|
||||
// If the blockID in LastCommit of NewBlock does not match the trusted block
|
||||
// we can assume NewBlock is not correct
|
||||
if trustedCommit != nil {
|
||||
if !(newBlock.LastCommit.BlockID.Equals(trustedCommit.BlockID)) {
|
||||
return ErrBlockIDDiff{}
|
||||
}
|
||||
}
|
||||
|
||||
// Todo: Verify verifyBlock.LastCommit validators against state.NextValidators
|
||||
// If they do not match, need a new verifyBlock
|
||||
if err := validators.VerifyCommitLight(trustedBlock.ChainID, newBlockID, newBlock.Height, verifyBlock.LastCommit); err != nil {
|
||||
return ErrInvalidVerifyBlock{Reason: err}
|
||||
}
|
||||
|
||||
// Verify NewBlock usign the validator set obtained after applying the last block
|
||||
// Note: VerifyAdjacent in the LightClient relies on a trusting period which is not applicable here
|
||||
// ToDo: We need witness verification here as well and backwards verification from a state where we can trust validators
|
||||
if err := VerifyAdjacent(&types.SignedHeader{Header: &trustedBlock.Header, Commit: trustedCommit},
|
||||
&types.SignedHeader{Header: &newBlock.Header, Commit: verifyBlock.LastCommit}, validators); err != nil {
|
||||
return ErrValidationFailed{Reason: err}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
@@ -126,9 +126,9 @@ func (bs *BlockStore) LoadBaseMeta() *types.BlockMeta {
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadBlock returns the block with the given height.
|
||||
// LoadBlockProto returns the block with the given height in protobuf.
|
||||
// If no block is found for that height, it returns nil.
|
||||
func (bs *BlockStore) LoadBlock(height int64) *types.Block {
|
||||
func (bs *BlockStore) LoadBlockProto(height int64) *tmproto.Block {
|
||||
var blockMeta = bs.LoadBlockMeta(height)
|
||||
if blockMeta == nil {
|
||||
return nil
|
||||
@@ -151,8 +151,17 @@ func (bs *BlockStore) LoadBlock(height int64) *types.Block {
|
||||
// block. So, make sure meta is only saved after blocks are saved.
|
||||
panic(fmt.Errorf("error reading block: %w", err))
|
||||
}
|
||||
return pbb
|
||||
}
|
||||
|
||||
block, err := types.BlockFromProto(pbb)
|
||||
// LoadBlock returns the block with the given height.
|
||||
// If no block is found for that height, it returns nil.
|
||||
func (bs *BlockStore) LoadBlock(height int64) *types.Block {
|
||||
blockProto := bs.LoadBlockProto(height)
|
||||
if blockProto == nil {
|
||||
return nil
|
||||
}
|
||||
block, err := types.BlockFromProto(blockProto)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error from proto block: %w", err))
|
||||
}
|
||||
|
||||
@@ -159,6 +159,96 @@ func (m *BlockResponse) GetBlock() *types.Block {
|
||||
return nil
|
||||
}
|
||||
|
||||
//Header requests requets the header from a specific heigth to validate
|
||||
// against witnesses
|
||||
type HeaderRequest struct {
|
||||
Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"`
|
||||
}
|
||||
|
||||
func (m *HeaderRequest) Reset() { *m = HeaderRequest{} }
|
||||
func (m *HeaderRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*HeaderRequest) ProtoMessage() {}
|
||||
func (*HeaderRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_19b397c236e0fa07, []int{3}
|
||||
}
|
||||
func (m *HeaderRequest) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *HeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_HeaderRequest.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *HeaderRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_HeaderRequest.Merge(m, src)
|
||||
}
|
||||
func (m *HeaderRequest) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *HeaderRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_HeaderRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_HeaderRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *HeaderRequest) GetHeight() int64 {
|
||||
if m != nil {
|
||||
return m.Height
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type HeaderResponse struct {
|
||||
Header *types.Header `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
|
||||
}
|
||||
|
||||
func (m *HeaderResponse) Reset() { *m = HeaderResponse{} }
|
||||
func (m *HeaderResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*HeaderResponse) ProtoMessage() {}
|
||||
func (*HeaderResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_19b397c236e0fa07, []int{4}
|
||||
}
|
||||
func (m *HeaderResponse) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *HeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_HeaderResponse.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *HeaderResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_HeaderResponse.Merge(m, src)
|
||||
}
|
||||
func (m *HeaderResponse) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *HeaderResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_HeaderResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_HeaderResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *HeaderResponse) GetHeader() *types.Header {
|
||||
if m != nil {
|
||||
return m.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StatusRequest requests the status of a peer.
|
||||
type StatusRequest struct {
|
||||
}
|
||||
@@ -167,7 +257,7 @@ func (m *StatusRequest) Reset() { *m = StatusRequest{} }
|
||||
func (m *StatusRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*StatusRequest) ProtoMessage() {}
|
||||
func (*StatusRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_19b397c236e0fa07, []int{3}
|
||||
return fileDescriptor_19b397c236e0fa07, []int{5}
|
||||
}
|
||||
func (m *StatusRequest) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -206,7 +296,7 @@ func (m *StatusResponse) Reset() { *m = StatusResponse{} }
|
||||
func (m *StatusResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*StatusResponse) ProtoMessage() {}
|
||||
func (*StatusResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_19b397c236e0fa07, []int{4}
|
||||
return fileDescriptor_19b397c236e0fa07, []int{6}
|
||||
}
|
||||
func (m *StatusResponse) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -256,6 +346,8 @@ type Message struct {
|
||||
// *Message_BlockResponse
|
||||
// *Message_StatusRequest
|
||||
// *Message_StatusResponse
|
||||
// *Message_HeaderRequest
|
||||
// *Message_HeaderResponse
|
||||
Sum isMessage_Sum `protobuf_oneof:"sum"`
|
||||
}
|
||||
|
||||
@@ -263,7 +355,7 @@ func (m *Message) Reset() { *m = Message{} }
|
||||
func (m *Message) String() string { return proto.CompactTextString(m) }
|
||||
func (*Message) ProtoMessage() {}
|
||||
func (*Message) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_19b397c236e0fa07, []int{5}
|
||||
return fileDescriptor_19b397c236e0fa07, []int{7}
|
||||
}
|
||||
func (m *Message) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -313,12 +405,20 @@ type Message_StatusRequest struct {
|
||||
type Message_StatusResponse struct {
|
||||
StatusResponse *StatusResponse `protobuf:"bytes,5,opt,name=status_response,json=statusResponse,proto3,oneof" json:"status_response,omitempty"`
|
||||
}
|
||||
type Message_HeaderRequest struct {
|
||||
HeaderRequest *HeaderRequest `protobuf:"bytes,6,opt,name=header_request,json=headerRequest,proto3,oneof" json:"header_request,omitempty"`
|
||||
}
|
||||
type Message_HeaderResponse struct {
|
||||
HeaderResponse *HeaderResponse `protobuf:"bytes,7,opt,name=header_response,json=headerResponse,proto3,oneof" json:"header_response,omitempty"`
|
||||
}
|
||||
|
||||
func (*Message_BlockRequest) isMessage_Sum() {}
|
||||
func (*Message_NoBlockResponse) isMessage_Sum() {}
|
||||
func (*Message_BlockResponse) isMessage_Sum() {}
|
||||
func (*Message_StatusRequest) isMessage_Sum() {}
|
||||
func (*Message_StatusResponse) isMessage_Sum() {}
|
||||
func (*Message_HeaderRequest) isMessage_Sum() {}
|
||||
func (*Message_HeaderResponse) isMessage_Sum() {}
|
||||
|
||||
func (m *Message) GetSum() isMessage_Sum {
|
||||
if m != nil {
|
||||
@@ -362,6 +462,20 @@ func (m *Message) GetStatusResponse() *StatusResponse {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Message) GetHeaderRequest() *HeaderRequest {
|
||||
if x, ok := m.GetSum().(*Message_HeaderRequest); ok {
|
||||
return x.HeaderRequest
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Message) GetHeaderResponse() *HeaderResponse {
|
||||
if x, ok := m.GetSum().(*Message_HeaderResponse); ok {
|
||||
return x.HeaderResponse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||
func (*Message) XXX_OneofWrappers() []interface{} {
|
||||
return []interface{}{
|
||||
@@ -370,6 +484,8 @@ func (*Message) XXX_OneofWrappers() []interface{} {
|
||||
(*Message_BlockResponse)(nil),
|
||||
(*Message_StatusRequest)(nil),
|
||||
(*Message_StatusResponse)(nil),
|
||||
(*Message_HeaderRequest)(nil),
|
||||
(*Message_HeaderResponse)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -377,6 +493,8 @@ func init() {
|
||||
proto.RegisterType((*BlockRequest)(nil), "tendermint.blocksync.BlockRequest")
|
||||
proto.RegisterType((*NoBlockResponse)(nil), "tendermint.blocksync.NoBlockResponse")
|
||||
proto.RegisterType((*BlockResponse)(nil), "tendermint.blocksync.BlockResponse")
|
||||
proto.RegisterType((*HeaderRequest)(nil), "tendermint.blocksync.HeaderRequest")
|
||||
proto.RegisterType((*HeaderResponse)(nil), "tendermint.blocksync.HeaderResponse")
|
||||
proto.RegisterType((*StatusRequest)(nil), "tendermint.blocksync.StatusRequest")
|
||||
proto.RegisterType((*StatusResponse)(nil), "tendermint.blocksync.StatusResponse")
|
||||
proto.RegisterType((*Message)(nil), "tendermint.blocksync.Message")
|
||||
@@ -385,30 +503,35 @@ func init() {
|
||||
func init() { proto.RegisterFile("tendermint/blocksync/types.proto", fileDescriptor_19b397c236e0fa07) }
|
||||
|
||||
var fileDescriptor_19b397c236e0fa07 = []byte{
|
||||
// 368 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0x4d, 0x4f, 0xfa, 0x40,
|
||||
0x10, 0xc6, 0xdb, 0x7f, 0x81, 0x7f, 0x32, 0x50, 0x1a, 0x1b, 0xa3, 0xc4, 0x98, 0x86, 0xd4, 0x97,
|
||||
0xe8, 0xc1, 0x36, 0xc1, 0xa3, 0xc6, 0x03, 0x27, 0x4c, 0x7c, 0x49, 0x4a, 0xbc, 0x78, 0x21, 0x14,
|
||||
0x37, 0x40, 0x94, 0x2e, 0x32, 0xdb, 0x03, 0xdf, 0xc2, 0x2f, 0xe0, 0xf7, 0xf1, 0xc8, 0xd1, 0xa3,
|
||||
0x81, 0x2f, 0x62, 0x98, 0x2d, 0x65, 0x69, 0xb0, 0xb7, 0xdd, 0xe9, 0x33, 0xbf, 0x79, 0xfa, 0x64,
|
||||
0x16, 0xea, 0x82, 0x45, 0x2f, 0x6c, 0x32, 0x1a, 0x46, 0xc2, 0x0f, 0xdf, 0x78, 0xef, 0x15, 0xa7,
|
||||
0x51, 0xcf, 0x17, 0xd3, 0x31, 0x43, 0x6f, 0x3c, 0xe1, 0x82, 0xdb, 0xbb, 0x6b, 0x85, 0x97, 0x2a,
|
||||
0x0e, 0x0e, 0x95, 0x3e, 0x52, 0xcb, 0x6e, 0xd9, 0xe3, 0x9e, 0x42, 0xa5, 0xb9, 0xbc, 0x06, 0xec,
|
||||
0x3d, 0x66, 0x28, 0xec, 0x3d, 0x28, 0x0d, 0xd8, 0xb0, 0x3f, 0x10, 0x35, 0xbd, 0xae, 0x9f, 0x19,
|
||||
0x41, 0x72, 0x73, 0xcf, 0xc1, 0x7a, 0xe0, 0x89, 0x12, 0xc7, 0x3c, 0x42, 0xf6, 0xa7, 0xf4, 0x06,
|
||||
0xcc, 0x4d, 0xe1, 0x05, 0x14, 0x69, 0x24, 0xe9, 0xca, 0x8d, 0x7d, 0x4f, 0xf1, 0x29, 0xfd, 0x4b,
|
||||
0xbd, 0x54, 0xb9, 0x16, 0x98, 0x6d, 0xd1, 0x15, 0x31, 0x26, 0x9e, 0xdc, 0x6b, 0xa8, 0xae, 0x0a,
|
||||
0xf9, 0xa3, 0x6d, 0x1b, 0x0a, 0x61, 0x17, 0x59, 0xed, 0x1f, 0x55, 0xe9, 0xec, 0x7e, 0x1a, 0xf0,
|
||||
0xff, 0x9e, 0x21, 0x76, 0xfb, 0xcc, 0xbe, 0x05, 0x93, 0x66, 0x74, 0x26, 0x12, 0x9d, 0x38, 0x72,
|
||||
0xbd, 0x6d, 0xc9, 0x79, 0x6a, 0x30, 0x2d, 0x2d, 0xa8, 0x84, 0x6a, 0x50, 0x6d, 0xd8, 0x89, 0x78,
|
||||
0x67, 0x45, 0x93, 0xbe, 0x68, 0x6e, 0xb9, 0x71, 0xb2, 0x1d, 0x97, 0xc9, 0xaf, 0xa5, 0x05, 0x56,
|
||||
0x94, 0x89, 0xf4, 0x0e, 0xaa, 0x19, 0xa2, 0x41, 0xc4, 0xa3, 0x5c, 0x83, 0x29, 0xcf, 0x0c, 0xb3,
|
||||
0x34, 0xa4, 0xdc, 0xd2, 0xdf, 0x2d, 0xe4, 0xd1, 0x36, 0x42, 0x5f, 0xd2, 0x50, 0x2d, 0xd8, 0x8f,
|
||||
0x60, 0xa5, 0xb4, 0xc4, 0x5c, 0x91, 0x70, 0xc7, 0xf9, 0xb8, 0xd4, 0x5d, 0x15, 0x37, 0x2a, 0xcd,
|
||||
0x22, 0x18, 0x18, 0x8f, 0x9a, 0x4f, 0x5f, 0x73, 0x47, 0x9f, 0xcd, 0x1d, 0xfd, 0x67, 0xee, 0xe8,
|
||||
0x1f, 0x0b, 0x47, 0x9b, 0x2d, 0x1c, 0xed, 0x7b, 0xe1, 0x68, 0xcf, 0x57, 0xfd, 0xa1, 0x18, 0xc4,
|
||||
0xa1, 0xd7, 0xe3, 0x23, 0x5f, 0x5d, 0xe2, 0xf5, 0x91, 0x76, 0xd8, 0xdf, 0xf6, 0x30, 0xc2, 0x12,
|
||||
0x7d, 0xbb, 0xfc, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xf5, 0x1c, 0xa3, 0x45, 0x37, 0x03, 0x00, 0x00,
|
||||
// 436 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x94, 0xcd, 0x4e, 0xab, 0x40,
|
||||
0x14, 0xc7, 0xe1, 0xf6, 0x2b, 0x39, 0x2d, 0x90, 0x4b, 0x6e, 0xee, 0x6d, 0x6e, 0x0c, 0x69, 0xf0,
|
||||
0x7b, 0x21, 0x98, 0xba, 0xd4, 0xb8, 0x60, 0x55, 0x93, 0x6a, 0x13, 0x1a, 0x37, 0x6e, 0x1a, 0x68,
|
||||
0x27, 0xa5, 0xd1, 0x42, 0x65, 0x86, 0x45, 0xdf, 0xc2, 0x07, 0xf2, 0x01, 0x5c, 0x76, 0xe9, 0xd2,
|
||||
0xb4, 0x2f, 0x62, 0x98, 0x99, 0x52, 0x40, 0xc4, 0xdd, 0x70, 0xe6, 0x7f, 0xfe, 0xe7, 0x77, 0xe6,
|
||||
0xcc, 0x00, 0x1d, 0x82, 0xfc, 0x09, 0x0a, 0xe7, 0x33, 0x9f, 0x98, 0xee, 0x53, 0x30, 0x7e, 0xc4,
|
||||
0x4b, 0x7f, 0x6c, 0x92, 0xe5, 0x02, 0x61, 0x63, 0x11, 0x06, 0x24, 0x50, 0xff, 0xec, 0x14, 0x46,
|
||||
0xa2, 0xf8, 0xbf, 0x97, 0xca, 0xa3, 0x6a, 0x96, 0xcd, 0x72, 0x0a, 0x76, 0x53, 0x8e, 0xfa, 0x11,
|
||||
0xb4, 0xac, 0x58, 0x6c, 0xa3, 0xe7, 0x08, 0x61, 0xa2, 0xfe, 0x85, 0xba, 0x87, 0x66, 0x53, 0x8f,
|
||||
0xb4, 0xc5, 0x8e, 0x78, 0x52, 0xb1, 0xf9, 0x97, 0x7e, 0x0a, 0xca, 0x5d, 0xc0, 0x95, 0x78, 0x11,
|
||||
0xf8, 0x18, 0x7d, 0x2b, 0xbd, 0x06, 0x29, 0x2b, 0x3c, 0x83, 0x1a, 0x05, 0xa2, 0xba, 0x66, 0xf7,
|
||||
0x9f, 0x91, 0xea, 0x82, 0xb1, 0x30, 0x3d, 0x53, 0xe9, 0xc7, 0x20, 0xf5, 0x90, 0x33, 0x41, 0xe1,
|
||||
0x4f, 0x4c, 0x16, 0xc8, 0x5b, 0x21, 0xaf, 0x74, 0x1e, 0x2b, 0xe3, 0x08, 0x2f, 0xd5, 0xfe, 0x5a,
|
||||
0x8a, 0x67, 0x70, 0x9d, 0xae, 0x80, 0x34, 0x24, 0x0e, 0x89, 0x30, 0x2f, 0xa6, 0x5f, 0x81, 0xbc,
|
||||
0x0d, 0x94, 0xf7, 0xa9, 0xaa, 0x50, 0x75, 0x1d, 0x8c, 0xda, 0xbf, 0x68, 0x94, 0xae, 0xf5, 0xd7,
|
||||
0x2a, 0x34, 0x6e, 0x11, 0xc6, 0xce, 0x14, 0xa9, 0x37, 0x20, 0xd1, 0x86, 0x46, 0x21, 0xb3, 0xe6,
|
||||
0x4c, 0xba, 0x51, 0x34, 0x44, 0x23, 0x3d, 0x85, 0x9e, 0x60, 0xb7, 0xdc, 0xf4, 0x54, 0x86, 0xf0,
|
||||
0xdb, 0x0f, 0x46, 0x5b, 0x37, 0xc6, 0x45, 0xeb, 0x36, 0xbb, 0x87, 0xc5, 0x76, 0xb9, 0x61, 0xf5,
|
||||
0x04, 0x5b, 0xf1, 0x73, 0xf3, 0xeb, 0x83, 0x9c, 0x73, 0xac, 0x50, 0xc7, 0xfd, 0x52, 0xc0, 0xc4,
|
||||
0x4f, 0x72, 0xf3, 0x6e, 0x98, 0x9e, 0x5b, 0xd2, 0x6e, 0xb5, 0xcc, 0x2d, 0x73, 0xe8, 0xb1, 0x1b,
|
||||
0x4e, 0x07, 0xd4, 0x01, 0x28, 0x89, 0x1b, 0x87, 0xab, 0x51, 0xbb, 0x83, 0x72, 0xbb, 0x84, 0x4e,
|
||||
0xc6, 0xd9, 0x21, 0xf6, 0x41, 0x66, 0x13, 0x4f, 0xf0, 0xea, 0x65, 0x78, 0x99, 0x0b, 0x18, 0xe3,
|
||||
0x79, 0x99, 0x1b, 0x39, 0x00, 0x25, 0x71, 0xe3, 0x78, 0x8d, 0x32, 0xbc, 0xec, 0x35, 0x8d, 0xf1,
|
||||
0xbc, 0x4c, 0xc4, 0xaa, 0x41, 0x05, 0x47, 0x73, 0xeb, 0xfe, 0x6d, 0xad, 0x89, 0xab, 0xb5, 0x26,
|
||||
0x7e, 0xac, 0x35, 0xf1, 0x65, 0xa3, 0x09, 0xab, 0x8d, 0x26, 0xbc, 0x6f, 0x34, 0xe1, 0xe1, 0x72,
|
||||
0x3a, 0x23, 0x5e, 0xe4, 0x1a, 0xe3, 0x60, 0x6e, 0xa6, 0x1f, 0xf4, 0x6e, 0x49, 0xdf, 0xb3, 0x59,
|
||||
0xf4, 0x0b, 0x71, 0xeb, 0x74, 0xef, 0xe2, 0x33, 0x00, 0x00, 0xff, 0xff, 0x3b, 0x95, 0x0f, 0xbe,
|
||||
0x61, 0x04, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *BlockRequest) Marshal() (dAtA []byte, err error) {
|
||||
@@ -502,6 +625,69 @@ func (m *BlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *HeaderRequest) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *HeaderRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *HeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Height != 0 {
|
||||
i = encodeVarintTypes(dAtA, i, uint64(m.Height))
|
||||
i--
|
||||
dAtA[i] = 0x8
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *HeaderResponse) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *HeaderResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *HeaderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Header != nil {
|
||||
{
|
||||
size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *StatusRequest) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
@@ -695,6 +881,48 @@ func (m *Message_StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error)
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
func (m *Message_HeaderRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Message_HeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
if m.HeaderRequest != nil {
|
||||
{
|
||||
size, err := m.HeaderRequest.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x32
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
func (m *Message_HeaderResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Message_HeaderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
if m.HeaderResponse != nil {
|
||||
{
|
||||
size, err := m.HeaderResponse.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x3a
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
func encodeVarintTypes(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovTypes(v)
|
||||
base := offset
|
||||
@@ -743,6 +971,31 @@ func (m *BlockResponse) Size() (n int) {
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *HeaderRequest) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Height != 0 {
|
||||
n += 1 + sovTypes(uint64(m.Height))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *HeaderResponse) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Header != nil {
|
||||
l = m.Header.Size()
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *StatusRequest) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
@@ -839,6 +1092,30 @@ func (m *Message_StatusResponse) Size() (n int) {
|
||||
}
|
||||
return n
|
||||
}
|
||||
func (m *Message_HeaderRequest) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.HeaderRequest != nil {
|
||||
l = m.HeaderRequest.Size()
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
func (m *Message_HeaderResponse) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.HeaderResponse != nil {
|
||||
l = m.HeaderResponse.Size()
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovTypes(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
@@ -1070,6 +1347,161 @@ func (m *BlockResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *HeaderRequest) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: HeaderRequest: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: HeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType)
|
||||
}
|
||||
m.Height = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Height |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTypes(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *HeaderResponse) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: HeaderResponse: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: HeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Header == nil {
|
||||
m.Header = &types.Header{}
|
||||
}
|
||||
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTypes(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *StatusRequest) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
@@ -1412,6 +1844,76 @@ func (m *Message) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
m.Sum = &Message_StatusResponse{v}
|
||||
iNdEx = postIndex
|
||||
case 6:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field HeaderRequest", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
v := &HeaderRequest{}
|
||||
if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
m.Sum = &Message_HeaderRequest{v}
|
||||
iNdEx = postIndex
|
||||
case 7:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field HeaderResponse", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
v := &HeaderResponse{}
|
||||
if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
m.Sum = &Message_HeaderResponse{v}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTypes(dAtA[iNdEx:])
|
||||
|
||||
@@ -4,6 +4,7 @@ package tendermint.blocksync;
|
||||
option go_package = "github.com/tendermint/tendermint/proto/tendermint/blocksync";
|
||||
|
||||
import "tendermint/types/block.proto";
|
||||
import "tendermint/types/types.proto";
|
||||
|
||||
// BlockRequest requests a block for a specific height
|
||||
message BlockRequest {
|
||||
@@ -21,6 +22,16 @@ message BlockResponse {
|
||||
tendermint.types.Block block = 1;
|
||||
}
|
||||
|
||||
//Header requests requets the header from a specific heigth to validate
|
||||
// against witnesses
|
||||
message HeaderRequest {
|
||||
int64 height = 1;
|
||||
}
|
||||
|
||||
message HeaderResponse {
|
||||
tendermint.types.Header header = 1;
|
||||
}
|
||||
|
||||
// StatusRequest requests the status of a peer.
|
||||
message StatusRequest {}
|
||||
|
||||
@@ -37,5 +48,7 @@ message Message {
|
||||
BlockResponse block_response = 3;
|
||||
StatusRequest status_request = 4;
|
||||
StatusResponse status_response = 5;
|
||||
HeaderRequest header_request = 6;
|
||||
HeaderResponse header_response = 7;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -289,7 +289,7 @@ title: Methods
|
||||
|-------------------------|---------------------------------------------|------------------------------------------------------------------------------------------------------------------|--------------|
|
||||
| max_tx_bytes | int64 | Currently configured maximum size in bytes taken by the modified transactions. | 1 |
|
||||
| txs | repeated bytes | Preliminary list of transactions that have been picked as part of the block to propose. | 2 |
|
||||
| local_last_commit | [ExtendedCommitInfo](#extendedcommitinfo) | Info about the last commit, obtained locally from Tendermint's data structures. | 3 |
|
||||
| local_last_commit | [Info](#extendedcommitinfo) | Info about the last commit, obtained locally from Tendermint's data structures. | 3 |
|
||||
| byzantine_validators | repeated [Misbehavior](#misbehavior) | List of information about validators that acted incorrectly. | 4 |
|
||||
| height | int64 | The height of the block that will be proposed. | 5 |
|
||||
| time | [google.protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp) | Timestamp of the block that that will be proposed. | 6 |
|
||||
@@ -302,7 +302,7 @@ title: Methods
|
||||
|-------------------------|--------------------------------------------------|---------------------------------------------------------------------------------------------|--------------|
|
||||
| tx_records | repeated [TxRecord](#txrecord) | Possibly modified list of transactions that have been picked as part of the proposed block. | 2 |
|
||||
| app_hash | bytes | The Merkle root hash of the application state. | 3 |
|
||||
| tx_results | repeated [ExecTxResult](#txresult) | List of structures containing the data resulting from executing the transactions | 4 |
|
||||
| tx_results | repeated [ExecTxResult](#exectxresult) | List of structures containing the data resulting from executing the transactions | 4 |
|
||||
| validator_updates | repeated [ValidatorUpdate](#validatorupdate) | Changes to validator set (set voting power to 0 to remove). | 5 |
|
||||
| consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to consensus-critical gas, size, and other parameters. | 6 |
|
||||
|
||||
@@ -414,7 +414,7 @@ Note that, if _p_ has a non-`nil` _validValue_, Tendermint will use it as propos
|
||||
|-------------------------|--------------------------------------------------|-----------------------------------------------------------------------------------|--------------|
|
||||
| status | [ProposalStatus](#proposalstatus) | `enum` that signals if the application finds the proposal valid. | 1 |
|
||||
| app_hash | bytes | The Merkle root hash of the application state. | 2 |
|
||||
| tx_results | repeated [ExecTxResult](#txresult) | List of structures containing the data resulting from executing the transactions. | 3 |
|
||||
| tx_results | repeated [ExecTxResult](#exectxresult) | List of structures containing the data resulting from executing the transactions. | 3 |
|
||||
| validator_updates | repeated [ValidatorUpdate](#validatorupdate) | Changes to validator set (set voting power to 0 to remove). | 4 |
|
||||
| consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to consensus-critical gas, size, and other parameters. | 5 |
|
||||
|
||||
@@ -582,7 +582,7 @@ from this condition, but not sure), and _p_ receives a Precommit message for rou
|
||||
| Name | Type | Description | Field Number |
|
||||
|-------------------------|-------------------------------------------------------------|----------------------------------------------------------------------------------|--------------|
|
||||
| events | repeated [Event](abci++_basic_concepts_002_draft.md#events) | Type & Key-Value events for indexing | 1 |
|
||||
| tx_results | repeated [ExecTxResult](#txresult) | List of structures containing the data resulting from executing the transactions | 2 |
|
||||
| tx_results | repeated [ExecTxResult](#exectxresult) | List of structures containing the data resulting from executing the transactions | 2 |
|
||||
| validator_updates | repeated [ValidatorUpdate](#validatorupdate) | Changes to validator set (set voting power to 0 to remove). | 3 |
|
||||
| consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to consensus-critical gas, size, and other parameters. | 4 |
|
||||
| app_hash | bytes | The Merkle root hash of the application state. | 5 |
|
||||
|
||||
Reference in New Issue
Block a user