Compare commits

..

10 Commits

Author SHA1 Message Date
William Banfield
e812bf2f34 update mempool fuzz target in github action 2021-07-26 13:03:33 -04:00
William Banfield
85e82abba3 add jsonrpc test case 2021-07-26 12:58:47 -04:00
William Banfield
765b43a87c lint ++ 2021-07-26 12:58:08 -04:00
William Banfield
f2e52c3dfe remove multi element qualifier from handler slice check 2021-07-26 12:42:27 -04:00
William Banfield
d812e3c786 handle server output as array if possible 2021-07-26 12:41:06 -04:00
William Banfield
7889a64dd7 Merge branch 'master' into wb/fuzz-testdata-cases 2021-07-26 12:31:17 -04:00
William Banfield
ca5ddc83e3 test/fuzz: add default testdata for fuzz tests 2021-07-26 12:22:14 -04:00
William Banfield
9212ad7c7f fix for single element json array 2021-07-23 18:05:55 -04:00
William Banfield
0e22c88229 pr feedback 2021-07-23 16:40:51 -04:00
William Banfield
cbdf0072fc test/fuzz: add test to reproduce found fuzz errors 2021-07-23 12:24:33 -04:00
83 changed files with 954 additions and 1923 deletions

View File

@@ -14,7 +14,6 @@ jobs:
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions."
days-before-stale: -1
days-before-close: -1
days-before-pr-stale: 10
days-before-pr-close: 4
exempt-pr-labels: "S:wip"

View File

@@ -30,7 +30,6 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
- [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface
- [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`.
- [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
- [ABCI] \#3546 Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield)
- [Version] \#6494 `TMCoreSemVer` has been renamed to `TMVersion`.
- It is not required any longer to set ldflags to set version strings
- [abci/counter] \#6684 Delete counter example app
@@ -72,8 +71,6 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
- [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning)
- [types] \#6627 Move `NodeKey` to types to make the type public.
- [config] \#6627 Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID`
- [blocksync] \#6755 Rename `FastSync` and `Blockchain` package to `BlockSync`
(@cmwaters)
- Blockchain Protocol
@@ -84,7 +81,6 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
- Tooling
- [tools] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
- [cli/indexer] \#6676 Reindex events command line tooling. (@JayT106)
### FEATURES

View File

@@ -17,7 +17,7 @@ This guide provides instructions for upgrading to specific versions of Tendermin
### Config Changes
* `fast_sync = "v1"` and `fast_sync = "v2"` are no longer supported. Please use `v0` instead.
* `fast_sync = "v1"` is no longer supported. Please use `v2` instead.
* All config parameters are now hyphen-case (also known as kebab-case) instead of snake_case. Before restarting the node make sure
you have updated all the variables in your `config.toml` file.
@@ -29,11 +29,10 @@ This guide provides instructions for upgrading to specific versions of Tendermin
`Seeds`. Bootstrap peers are connected with on startup if needed for peer discovery. Unlike
persistent peers, there's no gaurantee that the node will remain connected with these peers.
* configuration values starting with `priv-validator-` have moved to the new
- configuration values starting with `priv-validator-` have moved to the new
`priv-validator` section, without the `priv-validator-` prefix.
* The fast sync process as well as the blockchain package and service has all
been renamed to block sync
* Fast Sync v2 has been deprecated, please use v0 to sync a node.
### CLI Changes

View File

@@ -1827,19 +1827,17 @@ func (m *ResponseBeginBlock) GetEvents() []Event {
}
type ResponseCheckTx struct {
Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"`
Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"`
GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"`
GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"`
Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"`
Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"`
Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"`
Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"`
// mempool_error is set by Tendermint.
// ABCI applictions creating a ResponseCheckTX should not set mempool_error.
MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"`
Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"`
Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"`
GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"`
GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"`
Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"`
Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"`
Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"`
Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"`
MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"`
}
func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} }

View File

@@ -1,251 +0,0 @@
package commands
import (
"errors"
"fmt"
"strings"
"github.com/spf13/cobra"
tmdb "github.com/tendermint/tm-db"
abcitypes "github.com/tendermint/tendermint/abci/types"
tmcfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/libs/progressbar"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/indexer"
"github.com/tendermint/tendermint/state/indexer/sink/kv"
"github.com/tendermint/tendermint/state/indexer/sink/psql"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
)
const (
reindexFailed = "event re-index failed: "
)
// ReIndexEventCmd allows re-index the event by given block height interval
var ReIndexEventCmd = &cobra.Command{
Use: "reindex-event",
Short: "reindex events to the event store backends",
Long: `
reindex-event is an offline tooling to re-index block and tx events to the eventsinks,
you can run this command when the event store backend dropped/disconnected or you want to replace the backend.
The default start-height is 0, meaning the tooling will start reindex from the base block height(inclusive); and the
default end-height is 0, meaning the tooling will reindex until the latest block height(inclusive). User can omits
either or both arguments.
`,
Example: `
tendermint reindex-event
tendermint reindex-event --start-height 2
tendermint reindex-event --end-height 10
tendermint reindex-event --start-height 2 --end-height 10
`,
Run: func(cmd *cobra.Command, args []string) {
bs, ss, err := loadStateAndBlockStore(config)
if err != nil {
fmt.Println(reindexFailed, err)
return
}
if err := checkValidHeight(bs); err != nil {
fmt.Println(reindexFailed, err)
return
}
es, err := loadEventSinks(config)
if err != nil {
fmt.Println(reindexFailed, err)
return
}
if err = eventReIndex(cmd, es, bs, ss); err != nil {
fmt.Println(reindexFailed, err)
return
}
fmt.Println("event re-index finished")
},
}
var (
startHeight int64
endHeight int64
)
func init() {
ReIndexEventCmd.Flags().Int64Var(&startHeight, "start-height", 0, "the block height would like to start for re-index")
ReIndexEventCmd.Flags().Int64Var(&endHeight, "end-height", 0, "the block height would like to finish for re-index")
}
func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) {
// Check duplicated sinks.
sinks := map[string]bool{}
for _, s := range cfg.TxIndex.Indexer {
sl := strings.ToLower(s)
if sinks[sl] {
return nil, errors.New("found duplicated sinks, please check the tx-index section in the config.toml")
}
sinks[sl] = true
}
eventSinks := []indexer.EventSink{}
for k := range sinks {
switch k {
case string(indexer.NULL):
return nil, errors.New("found null event sink, please check the tx-index section in the config.toml")
case string(indexer.KV):
store, err := tmcfg.DefaultDBProvider(&tmcfg.DBContext{ID: "tx_index", Config: cfg})
if err != nil {
return nil, err
}
eventSinks = append(eventSinks, kv.NewEventSink(store))
case string(indexer.PSQL):
conn := cfg.TxIndex.PsqlConn
if conn == "" {
return nil, errors.New("the psql connection settings cannot be empty")
}
es, _, err := psql.NewEventSink(conn, chainID)
if err != nil {
return nil, err
}
eventSinks = append(eventSinks, es)
default:
return nil, errors.New("unsupported event sink type")
}
}
if len(eventSinks) == 0 {
return nil, errors.New("no proper event sink can do event re-indexing," +
" please check the tx-index section in the config.toml")
}
if !indexer.IndexingEnabled(eventSinks) {
return nil, fmt.Errorf("no event sink has been enabled")
}
return eventSinks, nil
}
func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, error) {
dbType := tmdb.BackendType(cfg.DBBackend)
// Get BlockStore
blockStoreDB, err := tmdb.NewDB("blockstore", dbType, cfg.DBDir())
if err != nil {
return nil, nil, err
}
blockStore := store.NewBlockStore(blockStoreDB)
// Get StateStore
stateDB, err := tmdb.NewDB("state", dbType, cfg.DBDir())
if err != nil {
return nil, nil, err
}
stateStore := state.NewStore(stateDB)
return blockStore, stateStore, nil
}
func eventReIndex(cmd *cobra.Command, es []indexer.EventSink, bs state.BlockStore, ss state.Store) error {
var bar progressbar.Bar
bar.NewOption(startHeight-1, endHeight)
fmt.Println("start re-indexing events:")
defer bar.Finish()
for i := startHeight; i <= endHeight; i++ {
select {
case <-cmd.Context().Done():
return fmt.Errorf("event re-index terminated at height %d: %w", i, cmd.Context().Err())
default:
b := bs.LoadBlock(i)
if b == nil {
return fmt.Errorf("not able to load block at height %d from the blockstore", i)
}
r, err := ss.LoadABCIResponses(i)
if err != nil {
return fmt.Errorf("not able to load ABCI Response at height %d from the statestore", i)
}
e := types.EventDataNewBlockHeader{
Header: b.Header,
NumTxs: int64(len(b.Txs)),
ResultBeginBlock: *r.BeginBlock,
ResultEndBlock: *r.EndBlock,
}
var batch *indexer.Batch
if e.NumTxs > 0 {
batch = indexer.NewBatch(e.NumTxs)
for i, tx := range b.Data.Txs {
tr := abcitypes.TxResult{
Height: b.Height,
Index: uint32(i),
Tx: tx,
Result: *(r.DeliverTxs[i]),
}
_ = batch.Add(&tr)
}
}
for _, sink := range es {
if err := sink.IndexBlockEvents(e); err != nil {
return fmt.Errorf("block event re-index at height %d failed: %w", i, err)
}
if batch != nil {
if err := sink.IndexTxEvents(batch.Ops); err != nil {
return fmt.Errorf("tx event re-index at height %d failed: %w", i, err)
}
}
}
}
bar.Play(i)
}
return nil
}
func checkValidHeight(bs state.BlockStore) error {
base := bs.Base()
if startHeight == 0 {
startHeight = base
fmt.Printf("set the start block height to the base height of the blockstore %d \n", base)
}
if startHeight < base {
return fmt.Errorf("%s (requested start height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, startHeight, base)
}
height := bs.Height()
if startHeight > height {
return fmt.Errorf(
"%s (requested start height: %d, store height: %d)", ctypes.ErrHeightNotAvailable, startHeight, height)
}
if endHeight == 0 || endHeight > height {
endHeight = height
fmt.Printf("set the end block height to the latest height of the blockstore %d \n", height)
}
if endHeight < base {
return fmt.Errorf(
"%s (requested end height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, endHeight, base)
}
if endHeight < startHeight {
return fmt.Errorf(
"%s (requested the end height: %d is less than the start height: %d)",
ctypes.ErrInvalidRequest, startHeight, endHeight)
}
return nil
}

View File

@@ -1,171 +0,0 @@
package commands
import (
"context"
"errors"
"testing"
"github.com/spf13/cobra"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
abcitypes "github.com/tendermint/tendermint/abci/types"
tmcfg "github.com/tendermint/tendermint/config"
prototmstate "github.com/tendermint/tendermint/proto/tendermint/state"
"github.com/tendermint/tendermint/state/indexer"
"github.com/tendermint/tendermint/state/mocks"
"github.com/tendermint/tendermint/types"
)
const (
height int64 = 10
base int64 = 2
)
func setupReIndexEventCmd() *cobra.Command {
reIndexEventCmd := &cobra.Command{
Use: ReIndexEventCmd.Use,
Run: func(cmd *cobra.Command, args []string) {},
}
_ = reIndexEventCmd.ExecuteContext(context.Background())
return reIndexEventCmd
}
func TestReIndexEventCheckHeight(t *testing.T) {
mockBlockStore := &mocks.BlockStore{}
mockBlockStore.
On("Base").Return(base).
On("Height").Return(height)
testCases := []struct {
startHeight int64
endHeight int64
validHeight bool
}{
{0, 0, true},
{0, base, true},
{0, base - 1, false},
{0, height, true},
{0, height + 1, true},
{0, 0, true},
{base - 1, 0, false},
{base, 0, true},
{base, base, true},
{base, base - 1, false},
{base, height, true},
{base, height + 1, true},
{height, 0, true},
{height, base, false},
{height, height - 1, false},
{height, height, true},
{height, height + 1, true},
{height + 1, 0, false},
}
for _, tc := range testCases {
startHeight = tc.startHeight
endHeight = tc.endHeight
err := checkValidHeight(mockBlockStore)
if tc.validHeight {
require.NoError(t, err)
} else {
require.Error(t, err)
}
}
}
func TestLoadEventSink(t *testing.T) {
testCases := []struct {
sinks []string
connURL string
loadErr bool
}{
{[]string{}, "", true},
{[]string{"NULL"}, "", true},
{[]string{"KV"}, "", false},
{[]string{"KV", "KV"}, "", true},
{[]string{"PSQL"}, "", true}, // true because empty connect url
{[]string{"PSQL"}, "wrongUrl", true}, // true because wrong connect url
// skip to test PSQL connect with correct url
{[]string{"UnsupportedSinkType"}, "wrongUrl", true},
}
for _, tc := range testCases {
cfg := tmcfg.TestConfig()
cfg.TxIndex.Indexer = tc.sinks
cfg.TxIndex.PsqlConn = tc.connURL
_, err := loadEventSinks(cfg)
if tc.loadErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
}
}
func TestLoadBlockStore(t *testing.T) {
bs, ss, err := loadStateAndBlockStore(tmcfg.TestConfig())
require.NoError(t, err)
require.NotNil(t, bs)
require.NotNil(t, ss)
}
func TestReIndexEvent(t *testing.T) {
mockBlockStore := &mocks.BlockStore{}
mockStateStore := &mocks.Store{}
mockEventSink := &mocks.EventSink{}
mockBlockStore.
On("Base").Return(base).
On("Height").Return(height).
On("LoadBlock", base).Return(nil).Once().
On("LoadBlock", base).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}}).
On("LoadBlock", height).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}})
mockEventSink.
On("Type").Return(indexer.KV).
On("IndexBlockEvents", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(errors.New("")).Once().
On("IndexBlockEvents", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(nil).
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(errors.New("")).Once().
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(nil)
dtx := abcitypes.ResponseDeliverTx{}
abciResp := &prototmstate.ABCIResponses{
DeliverTxs: []*abcitypes.ResponseDeliverTx{&dtx},
EndBlock: &abcitypes.ResponseEndBlock{},
BeginBlock: &abcitypes.ResponseBeginBlock{},
}
mockStateStore.
On("LoadABCIResponses", base).Return(nil, errors.New("")).Once().
On("LoadABCIResponses", base).Return(abciResp, nil).
On("LoadABCIResponses", height).Return(abciResp, nil)
testCases := []struct {
startHeight int64
endHeight int64
reIndexErr bool
}{
{base, height, true}, // LoadBlock error
{base, height, true}, // LoadABCIResponses error
{base, height, true}, // index block event error
{base, height, true}, // index tx event error
{base, base, false},
{height, height, false},
}
for _, tc := range testCases {
startHeight = tc.startHeight
endHeight = tc.endHeight
err := eventReIndex(setupReIndexEventCmd(), []indexer.EventSink{mockEventSink}, mockBlockStore, mockStateStore)
if tc.reIndexErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
}
}

View File

@@ -15,7 +15,6 @@ func main() {
rootCmd := cmd.RootCmd
rootCmd.AddCommand(
cmd.GenValidatorCmd,
cmd.ReIndexEventCmd,
cmd.InitFilesCmd,
cmd.ProbeUpnpCmd,
cmd.LightCmd,

View File

@@ -29,8 +29,8 @@ const (
ModeValidator = "validator"
ModeSeed = "seed"
BlockSyncV0 = "v0"
BlockSyncV2 = "v2"
BlockchainV0 = "v0"
BlockchainV2 = "v2"
MempoolV0 = "v0"
MempoolV1 = "v1"
@@ -76,7 +76,7 @@ type Config struct {
P2P *P2PConfig `mapstructure:"p2p"`
Mempool *MempoolConfig `mapstructure:"mempool"`
StateSync *StateSyncConfig `mapstructure:"statesync"`
BlockSync *BlockSyncConfig `mapstructure:"fastsync"`
FastSync *FastSyncConfig `mapstructure:"fastsync"`
Consensus *ConsensusConfig `mapstructure:"consensus"`
TxIndex *TxIndexConfig `mapstructure:"tx-index"`
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
@@ -91,7 +91,7 @@ func DefaultConfig() *Config {
P2P: DefaultP2PConfig(),
Mempool: DefaultMempoolConfig(),
StateSync: DefaultStateSyncConfig(),
BlockSync: DefaultBlockSyncConfig(),
FastSync: DefaultFastSyncConfig(),
Consensus: DefaultConsensusConfig(),
TxIndex: DefaultTxIndexConfig(),
Instrumentation: DefaultInstrumentationConfig(),
@@ -114,7 +114,7 @@ func TestConfig() *Config {
P2P: TestP2PConfig(),
Mempool: TestMempoolConfig(),
StateSync: TestStateSyncConfig(),
BlockSync: TestBlockSyncConfig(),
FastSync: TestFastSyncConfig(),
Consensus: TestConsensusConfig(),
TxIndex: TestTxIndexConfig(),
Instrumentation: TestInstrumentationConfig(),
@@ -151,7 +151,7 @@ func (cfg *Config) ValidateBasic() error {
if err := cfg.StateSync.ValidateBasic(); err != nil {
return fmt.Errorf("error in [statesync] section: %w", err)
}
if err := cfg.BlockSync.ValidateBasic(); err != nil {
if err := cfg.FastSync.ValidateBasic(); err != nil {
return fmt.Errorf("error in [fastsync] section: %w", err)
}
if err := cfg.Consensus.ValidateBasic(); err != nil {
@@ -197,7 +197,6 @@ type BaseConfig struct { //nolint: maligned
// If this node is many blocks behind the tip of the chain, FastSync
// allows them to catchup quickly by downloading blocks in parallel
// and verifying their commits
// TODO: This should be moved to the blocksync config
FastSyncMode bool `mapstructure:"fast-sync"`
// Database backend: goleveldb | cleveldb | boltdb | rocksdb
@@ -912,7 +911,7 @@ func DefaultStateSyncConfig() *StateSyncConfig {
}
}
// TestStateSyncConfig returns a default configuration for the state sync service
// TestFastSyncConfig returns a default configuration for the state sync service
func TestStateSyncConfig() *StateSyncConfig {
return DefaultStateSyncConfig()
}
@@ -968,33 +967,34 @@ func (cfg *StateSyncConfig) ValidateBasic() error {
}
//-----------------------------------------------------------------------------
// FastSyncConfig
// BlockSyncConfig (formerly known as FastSync) defines the configuration for the Tendermint block sync service
type BlockSyncConfig struct {
// FastSyncConfig defines the configuration for the Tendermint fast sync service
type FastSyncConfig struct {
Version string `mapstructure:"version"`
}
// DefaultBlockSyncConfig returns a default configuration for the block sync service
func DefaultBlockSyncConfig() *BlockSyncConfig {
return &BlockSyncConfig{
Version: BlockSyncV0,
// DefaultFastSyncConfig returns a default configuration for the fast sync service
func DefaultFastSyncConfig() *FastSyncConfig {
return &FastSyncConfig{
Version: BlockchainV0,
}
}
// TestBlockSyncConfig returns a default configuration for the block sync.
func TestBlockSyncConfig() *BlockSyncConfig {
return DefaultBlockSyncConfig()
// TestFastSyncConfig returns a default configuration for the fast sync.
func TestFastSyncConfig() *FastSyncConfig {
return DefaultFastSyncConfig()
}
// ValidateBasic performs basic validation.
func (cfg *BlockSyncConfig) ValidateBasic() error {
func (cfg *FastSyncConfig) ValidateBasic() error {
switch cfg.Version {
case BlockSyncV0:
case BlockchainV0:
return nil
case BlockSyncV2:
return errors.New("blocksync version v2 is no longer supported. Please use v0")
case BlockchainV2:
return errors.New("fastsync version v2 is no longer supported. Please use v0")
default:
return fmt.Errorf("unknown blocksync version %s", cfg.Version)
return fmt.Errorf("unknown fastsync version %s", cfg.Version)
}
}

View File

@@ -125,8 +125,8 @@ func TestStateSyncConfigValidateBasic(t *testing.T) {
require.NoError(t, cfg.ValidateBasic())
}
func TestBlockSyncConfigValidateBasic(t *testing.T) {
cfg := TestBlockSyncConfig()
func TestFastSyncConfigValidateBasic(t *testing.T) {
cfg := TestFastSyncConfig()
assert.NoError(t, cfg.ValidateBasic())
// tamper with version

View File

@@ -452,14 +452,14 @@ chunk-request-timeout = "{{ .StateSync.ChunkRequestTimeout }}"
fetchers = "{{ .StateSync.Fetchers }}"
#######################################################
### Block Sync Configuration Connections ###
### Fast Sync Configuration Connections ###
#######################################################
[fastsync]
# Block Sync version to use:
# 1) "v0" (default) - the legacy block sync implementation
# Fast Sync version to use:
# 1) "v0" (default) - the legacy fast sync implementation
# 2) "v2" - DEPRECATED, please use v0
version = "{{ .BlockSync.Version }}"
version = "{{ .FastSync.Version }}"
#######################################################
### Consensus Configuration Options ###

View File

@@ -36,7 +36,7 @@ proxy-app = "tcp://127.0.0.1:26658"
# A custom human readable name for this node
moniker = "anonymous"
# If this node is many blocks behind the tip of the chain, BlockSync
# If this node is many blocks behind the tip of the chain, FastSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
fast-sync = true
@@ -354,12 +354,12 @@ discovery-time = "15s"
temp-dir = ""
#######################################################
### BlockSync Configuration Connections ###
### Fast Sync Configuration Connections ###
#######################################################
[fastsync]
# Block Sync version to use:
# 1) "v0" (default) - the legacy block sync implementation
# Fast Sync version to use:
# 1) "v0" (default) - the legacy fast sync implementation
# 2) "v2" - complete redesign of v0, optimized for testability & readability
version = "v0"

View File

@@ -14,7 +14,7 @@ This section dives into the internals of Go-Tendermint.
- [Subscribing to events](./subscription.md)
- [Block Structure](./block-structure.md)
- [RPC](./rpc.md)
- [Block Sync](./block-sync.md)
- [Fast Sync](./fast-sync.md)
- [State Sync](./state-sync.md)
- [Mempool](./mempool.md)
- [Light Client](./light-client.md)

View File

@@ -2,8 +2,7 @@
order: 10
---
# Block Sync
*Formerly known as Fast Sync*
# Fast Sync
In a proof of work blockchain, syncing with the chain is the same
process as staying up-to-date with the consensus: download blocks, and
@@ -15,7 +14,7 @@ scratch can take a very long time. It's much faster to just download
blocks and check the merkle tree of validators than to run the real-time
consensus gossip protocol.
## Using Block Sync
## Using Fast Sync
To support faster syncing, Tendermint offers a `fast-sync` mode, which
is enabled by default, and can be toggled in the `config.toml` or via
@@ -23,36 +22,36 @@ is enabled by default, and can be toggled in the `config.toml` or via
In this mode, the Tendermint daemon will sync hundreds of times faster
than if it used the real-time consensus process. Once caught up, the
daemon will switch out of Block Sync and into the normal consensus mode.
daemon will switch out of fast sync and into the normal consensus mode.
After running for some time, the node is considered `caught up` if it
has at least one peer and it's height is at least as high as the max
reported peer height. See [the IsCaughtUp
method](https://github.com/tendermint/tendermint/blob/b467515719e686e4678e6da4e102f32a491b85a0/blockchain/pool.go#L128).
Note: There are two versions of Block Sync. We recommend using v0 as v2 is still in beta.
Note: There are three versions of fast sync. We recommend using v0 as v2 is still in beta.
If you would like to use a different version you can do so by changing the version in the `config.toml`:
```toml
#######################################################
### Block Sync Configuration Connections ###
### Fast Sync Configuration Connections ###
#######################################################
[fastsync]
# Block Sync version to use:
# 1) "v0" (default) - the legacy Block Sync implementation
# Fast Sync version to use:
# 1) "v0" (default) - the legacy fast sync implementation
# 2) "v2" - complete redesign of v0, optimized for testability & readability
version = "v0"
```
If we're lagging sufficiently, we should go back to block syncing, but
If we're lagging sufficiently, we should go back to fast syncing, but
this is an [open issue](https://github.com/tendermint/tendermint/issues/129).
## The Block Sync event
When the tendermint blockchain core launches, it might switch to the `block-sync`
## The Fast Sync event
When the tendermint blockchain core launches, it might switch to the `fast-sync`
mode to catch up the states to the current network best height. the core will emits
a fast-sync event to expose the current status and the sync height. Once it catched
the network best height, it will switches to the state sync mechanism and then emit
another event for exposing the fast-sync `complete` status and the state `height`.
The user can query the events by subscribing `EventQueryBlockSyncStatus`
The user can query the events by subscribing `EventQueryFastSyncStatus`
Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details.

View File

@@ -4,7 +4,7 @@ order: 11
# State Sync
With block sync a node is downloading all of the data of an application from genesis and verifying it.
With fast sync a node is downloading all of the data of an application from genesis and verifying it.
With state sync your node will download data related to the head or near the head of the chain and verify the data.
This leads to drastically shorter times for joining a network.

View File

@@ -1,19 +1,19 @@
/*
Package blocksync implements two versions of a reactor Service that are
responsible for block propagation and gossip between peers. This mechanism was
formerly known as fast-sync.
Package blockchain implements two versions of a reactor Service that are
responsible for block propagation and gossip between peers. This mechanism is
more formally known as fast-sync.
In order for a full node to successfully participate in consensus, it must have
the latest view of state. The blocksync protocol is a mechanism in which peers
the latest view of state. The fast-sync protocol is a mechanism in which peers
may exchange and gossip entire blocks with one another, in a request/response
type model, until they've successfully synced to the latest head block. Once
succussfully synced, the full node can switch to an active role in consensus and
will no longer blocksync and thus no longer run the blocksync process.
will no longer fast-sync and thus no longer run the fast-sync process.
Note, the blocksync reactor Service gossips entire block and relevant data such
that each receiving peer may construct the entire view of the blocksync state.
Note, the blockchain reactor Service gossips entire block and relevant data such
that each receiving peer may construct the entire view of the blockchain state.
There are currently two versions of the blocksync reactor Service:
There are two versions of the blockchain reactor Service, i.e. fast-sync:
- v0: The initial implementation that is battle-tested, but whose test coverage
is lacking and is not formally verifiable.
@@ -22,7 +22,7 @@ There are currently two versions of the blocksync reactor Service:
is known to have various bugs that could make it unreliable in production
environments.
The v0 blocksync reactor Service has one p2p channel, BlockchainChannel. This
The v0 blockchain reactor Service has one p2p channel, BlockchainChannel. This
channel is responsible for handling messages that both request blocks and respond
to block requests from peers. For every block request from a peer, the reactor
will execute respondToPeer which will fetch the block from the node's state store
@@ -33,4 +33,4 @@ Internally, v0 runs a poolRoutine that constantly checks for what blocks it need
and requests them. The poolRoutine is also responsible for taking blocks from the
pool, saving and executing each block.
*/
package blocksync
package blockchain

View File

@@ -1,7 +1,7 @@
package blocksync
package blockchain
import (
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/types"
)

View File

@@ -65,7 +65,7 @@ type BlockRequest struct {
PeerID types.NodeID
}
// BlockPool keeps track of the block sync peers, block requests and block responses.
// BlockPool keeps track of the fast sync peers, block requests and block responses.
type BlockPool struct {
service.BaseService
lastAdvance time.Time

View File

@@ -6,13 +6,13 @@ import (
"sync"
"time"
bc "github.com/tendermint/tendermint/internal/blocksync"
bc "github.com/tendermint/tendermint/internal/blockchain"
cons "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
tmSync "github.com/tendermint/tendermint/libs/sync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
@@ -60,7 +60,7 @@ const (
)
type consensusReactor interface {
// For when we switch from blockchain reactor and block sync to the consensus
// For when we switch from blockchain reactor and fast sync to the consensus
// machine.
SwitchToConsensus(state sm.State, skipWAL bool)
}
@@ -85,7 +85,7 @@ type Reactor struct {
store *store.BlockStore
pool *BlockPool
consReactor consensusReactor
blockSync *tmSync.AtomicBool
fastSync *tmSync.AtomicBool
blockchainCh *p2p.Channel
// blockchainOutBridgeCh defines a channel that acts as a bridge between sending Envelope
@@ -121,7 +121,7 @@ func NewReactor(
consReactor consensusReactor,
blockchainCh *p2p.Channel,
peerUpdates *p2p.PeerUpdates,
blockSync bool,
fastSync bool,
metrics *cons.Metrics,
) (*Reactor, error) {
if state.LastBlockHeight != store.Height() {
@@ -142,7 +142,7 @@ func NewReactor(
store: store,
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
consReactor: consReactor,
blockSync: tmSync.NewBool(blockSync),
fastSync: tmSync.NewBool(fastSync),
requestsCh: requestsCh,
errorsCh: errorsCh,
blockchainCh: blockchainCh,
@@ -162,10 +162,10 @@ func NewReactor(
// messages on that p2p channel accordingly. The caller must be sure to execute
// OnStop to ensure the outbound p2p Channels are closed.
//
// If blockSync is enabled, we also start the pool and the pool processing
// If fastSync is enabled, we also start the pool and the pool processing
// goroutine. If the pool fails to start, an error is returned.
func (r *Reactor) OnStart() error {
if r.blockSync.IsSet() {
if r.fastSync.IsSet() {
if err := r.pool.Start(); err != nil {
return err
}
@@ -183,7 +183,7 @@ func (r *Reactor) OnStart() error {
// OnStop stops the reactor by signaling to all spawned goroutines to exit and
// blocking until they all exit.
func (r *Reactor) OnStop() {
if r.blockSync.IsSet() {
if r.fastSync.IsSet() {
if err := r.pool.Stop(); err != nil {
r.Logger.Error("failed to stop pool", "err", err)
}
@@ -371,10 +371,10 @@ func (r *Reactor) processPeerUpdates() {
}
}
// SwitchToBlockSync is called by the state sync reactor when switching to fast
// SwitchToFastSync is called by the state sync reactor when switching to fast
// sync.
func (r *Reactor) SwitchToBlockSync(state sm.State) error {
r.blockSync.Set()
func (r *Reactor) SwitchToFastSync(state sm.State) error {
r.fastSync.Set()
r.initialState = state
r.pool.height = state.LastBlockHeight + 1
@@ -496,7 +496,7 @@ FOR_LOOP:
r.Logger.Error("failed to stop pool", "err", err)
}
r.blockSync.UnSet()
r.fastSync.UnSet()
if r.consReactor != nil {
r.consReactor.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
@@ -591,7 +591,7 @@ FOR_LOOP:
if blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
r.Logger.Info(
"block sync rate",
"fast sync rate",
"height", r.pool.height,
"max_peer_height", r.pool.MaxPeerHeight(),
"blocks/s", lastRate,
@@ -614,14 +614,14 @@ func (r *Reactor) GetMaxPeerBlockHeight() int64 {
}
func (r *Reactor) GetTotalSyncedTime() time.Duration {
if !r.blockSync.IsSet() || r.syncStartTime.IsZero() {
if !r.fastSync.IsSet() || r.syncStartTime.IsZero() {
return time.Duration(0)
}
return time.Since(r.syncStartTime)
}
func (r *Reactor) GetRemainingSyncTime() time.Duration {
if !r.blockSync.IsSet() {
if !r.fastSync.IsSet() {
return time.Duration(0)
}

View File

@@ -15,7 +15,7 @@ import (
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
"github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/libs/log"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
sf "github.com/tendermint/tendermint/state/test/factory"
@@ -36,7 +36,7 @@ type reactorTestSuite struct {
peerChans map[types.NodeID]chan p2p.PeerUpdate
peerUpdates map[types.NodeID]*p2p.PeerUpdates
blockSync bool
fastSync bool
}
func setup(
@@ -61,7 +61,7 @@ func setup(
blockchainChannels: make(map[types.NodeID]*p2p.Channel, numNodes),
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes),
peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes),
blockSync: true,
fastSync: true,
}
chDesc := p2p.ChannelDescriptor{ID: byte(BlockchainChannel)}
@@ -163,7 +163,7 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
nil,
rts.blockchainChannels[nodeID],
rts.peerUpdates[nodeID],
rts.blockSync,
rts.fastSync,
cons.NopMetrics())
require.NoError(t, err)

View File

@@ -4,7 +4,7 @@ import (
"sync"
"testing"
bh "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior"
bh "github.com/tendermint/tendermint/internal/blockchain/v2/internal/behavior"
"github.com/tendermint/tendermint/types"
)

View File

@@ -5,7 +5,7 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/tendermint/tendermint/internal/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@@ -42,7 +42,7 @@ const (
)
type consensusReactor interface {
// for when we switch from blockchain reactor and block sync to
// for when we switch from blockchain reactor and fast sync to
// the consensus machine
SwitchToConsensus(state state.State, skipWAL bool)
}

View File

@@ -7,14 +7,14 @@ import (
proto "github.com/gogo/protobuf/proto"
bc "github.com/tendermint/tendermint/internal/blocksync"
"github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior"
bc "github.com/tendermint/tendermint/internal/blockchain"
"github.com/tendermint/tendermint/internal/blockchain/v2/internal/behavior"
cons "github.com/tendermint/tendermint/internal/consensus"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/sync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@@ -31,12 +31,12 @@ type blockStore interface {
Height() int64
}
// BlockchainReactor handles block sync protocol.
// BlockchainReactor handles fast sync protocol.
type BlockchainReactor struct {
p2p.BaseReactor
blockSync *sync.AtomicBool // enable block sync on start when it's been Set
stateSynced bool // set to true when SwitchToBlockSync is called by state sync
fastSync *sync.AtomicBool // enable fast sync on start when it's been Set
stateSynced bool // set to true when SwitchToFastSync is called by state sync
scheduler *Routine
processor *Routine
logger log.Logger
@@ -44,7 +44,7 @@ type BlockchainReactor struct {
mtx tmsync.RWMutex
maxPeerHeight int64
syncHeight int64
events chan Event // non-nil during a block sync
events chan Event // non-nil during a fast sync
reporter behavior.Reporter
io iIO
@@ -61,7 +61,7 @@ type blockApplier interface {
// XXX: unify naming in this package around tmState
func newReactor(state state.State, store blockStore, reporter behavior.Reporter,
blockApplier blockApplier, blockSync bool, metrics *cons.Metrics) *BlockchainReactor {
blockApplier blockApplier, fastSync bool, metrics *cons.Metrics) *BlockchainReactor {
initHeight := state.LastBlockHeight + 1
if initHeight == 1 {
initHeight = state.InitialHeight
@@ -78,7 +78,7 @@ func newReactor(state state.State, store blockStore, reporter behavior.Reporter,
store: store,
reporter: reporter,
logger: log.NewNopLogger(),
blockSync: sync.NewBool(blockSync),
fastSync: sync.NewBool(fastSync),
syncStartHeight: initHeight,
syncStartTime: time.Time{},
lastSyncRate: 0,
@@ -90,10 +90,10 @@ func NewBlockchainReactor(
state state.State,
blockApplier blockApplier,
store blockStore,
blockSync bool,
fastSync bool,
metrics *cons.Metrics) *BlockchainReactor {
reporter := behavior.NewMockReporter()
return newReactor(state, store, reporter, blockApplier, blockSync, metrics)
return newReactor(state, store, reporter, blockApplier, fastSync, metrics)
}
// SetSwitch implements Reactor interface.
@@ -137,22 +137,22 @@ func (r *BlockchainReactor) SetLogger(logger log.Logger) {
// Start implements cmn.Service interface
func (r *BlockchainReactor) Start() error {
r.reporter = behavior.NewSwitchReporter(r.BaseReactor.Switch)
if r.blockSync.IsSet() {
if r.fastSync.IsSet() {
err := r.startSync(nil)
if err != nil {
return fmt.Errorf("failed to start block sync: %w", err)
return fmt.Errorf("failed to start fast sync: %w", err)
}
}
return nil
}
// startSync begins a block sync, signaled by r.events being non-nil. If state is non-nil,
// startSync begins a fast sync, signaled by r.events being non-nil. If state is non-nil,
// the scheduler and processor is updated with this state on startup.
func (r *BlockchainReactor) startSync(state *state.State) error {
r.mtx.Lock()
defer r.mtx.Unlock()
if r.events != nil {
return errors.New("block sync already in progress")
return errors.New("fast sync already in progress")
}
r.events = make(chan Event, chBufferSize)
go r.scheduler.start()
@@ -167,7 +167,7 @@ func (r *BlockchainReactor) startSync(state *state.State) error {
return nil
}
// endSync ends a block sync
// endSync ends a fast sync
func (r *BlockchainReactor) endSync() {
r.mtx.Lock()
defer r.mtx.Unlock()
@@ -179,8 +179,8 @@ func (r *BlockchainReactor) endSync() {
r.processor.stop()
}
// SwitchToBlockSync is called by the state sync reactor when switching to block sync.
func (r *BlockchainReactor) SwitchToBlockSync(state state.State) error {
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
func (r *BlockchainReactor) SwitchToFastSync(state state.State) error {
r.stateSynced = true
state = state.Copy()
@@ -434,7 +434,7 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
} else {
r.lastSyncRate = 0.9*r.lastSyncRate + 0.1*newSyncRate
}
r.logger.Info("block sync Rate", "height", r.syncHeight,
r.logger.Info("Fast Sync Rate", "height", r.syncHeight,
"max_peer_height", r.maxPeerHeight, "blocks/s", r.lastSyncRate)
lastHundred = time.Now()
}
@@ -442,12 +442,12 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
case pcBlockVerificationFailure:
r.scheduler.send(event)
case pcFinished:
r.logger.Info("block sync complete, switching to consensus")
r.logger.Info("Fast sync complete, switching to consensus")
if !r.io.trySwitchToConsensus(event.tmState, event.blocksSynced > 0 || r.stateSynced) {
r.logger.Error("Failed to switch to consensus reactor")
}
r.endSync()
r.blockSync.UnSet()
r.fastSync.UnSet()
return
case noOpEvent:
default:
@@ -617,14 +617,14 @@ func (r *BlockchainReactor) GetMaxPeerBlockHeight() int64 {
}
func (r *BlockchainReactor) GetTotalSyncedTime() time.Duration {
if !r.blockSync.IsSet() || r.syncStartTime.IsZero() {
if !r.fastSync.IsSet() || r.syncStartTime.IsZero() {
return time.Duration(0)
}
return time.Since(r.syncStartTime)
}
func (r *BlockchainReactor) GetRemainingSyncTime() time.Duration {
if !r.blockSync.IsSet() {
if !r.fastSync.IsSet() {
return time.Duration(0)
}

View File

@@ -15,7 +15,7 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior"
"github.com/tendermint/tendermint/internal/blockchain/v2/internal/behavior"
cons "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/mempool/mock"
"github.com/tendermint/tendermint/internal/p2p"
@@ -23,7 +23,7 @@ import (
"github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
sf "github.com/tendermint/tendermint/state/test/factory"

View File

@@ -163,7 +163,7 @@ type scheduler struct {
height int64
// lastAdvance tracks the last time a block execution happened.
// syncTimeout is the maximum time the scheduler waits to advance in the block sync process before finishing.
// syncTimeout is the maximum time the scheduler waits to advance in the fast sync process before finishing.
// This covers the cases where there are no peers or all peers have a lower height.
lastAdvance time.Time
syncTimeout time.Duration

View File

@@ -54,8 +54,8 @@ type Metrics struct {
TotalTxs metrics.Gauge
// The latest block height.
CommittedHeight metrics.Gauge
// Whether or not a node is block syncing. 1 if yes, 0 if no.
BlockSyncing metrics.Gauge
// Whether or not a node is fast syncing. 1 if yes, 0 if no.
FastSyncing metrics.Gauge
// Whether or not a node is state syncing. 1 if yes, 0 if no.
StateSyncing metrics.Gauge
@@ -169,11 +169,11 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
Name: "latest_block_height",
Help: "The latest block height.",
}, labels).With(labelsAndValues...),
BlockSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
FastSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "block_syncing",
Help: "Whether or not a node is block syncing. 1 if yes, 0 if no.",
Name: "fast_syncing",
Help: "Whether or not a node is fast syncing. 1 if yes, 0 if no.",
}, labels).With(labelsAndValues...),
StateSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: namespace,
@@ -214,7 +214,7 @@ func NopMetrics() *Metrics {
BlockSizeBytes: discard.NewHistogram(),
TotalTxs: discard.NewGauge(),
CommittedHeight: discard.NewGauge(),
BlockSyncing: discard.NewGauge(),
FastSyncing: discard.NewGauge(),
StateSyncing: discard.NewGauge(),
BlockParts: discard.NewCounter(),
}

View File

@@ -12,8 +12,8 @@ type ConsSyncReactor struct {
mock.Mock
}
// SetBlockSyncingMetrics provides a mock function with given fields: _a0
func (_m *ConsSyncReactor) SetBlockSyncingMetrics(_a0 float64) {
// SetFastSyncingMetrics provides a mock function with given fields: _a0
func (_m *ConsSyncReactor) SetFastSyncingMetrics(_a0 float64) {
_m.Called(_a0)
}

View File

@@ -9,13 +9,13 @@ import (
time "time"
)
// BlockSyncReactor is an autogenerated mock type for the BlockSyncReactor type
type BlockSyncReactor struct {
// FastSyncReactor is an autogenerated mock type for the FastSyncReactor type
type FastSyncReactor struct {
mock.Mock
}
// GetMaxPeerBlockHeight provides a mock function with given fields:
func (_m *BlockSyncReactor) GetMaxPeerBlockHeight() int64 {
func (_m *FastSyncReactor) GetMaxPeerBlockHeight() int64 {
ret := _m.Called()
var r0 int64
@@ -29,7 +29,7 @@ func (_m *BlockSyncReactor) GetMaxPeerBlockHeight() int64 {
}
// GetRemainingSyncTime provides a mock function with given fields:
func (_m *BlockSyncReactor) GetRemainingSyncTime() time.Duration {
func (_m *FastSyncReactor) GetRemainingSyncTime() time.Duration {
ret := _m.Called()
var r0 time.Duration
@@ -43,7 +43,7 @@ func (_m *BlockSyncReactor) GetRemainingSyncTime() time.Duration {
}
// GetTotalSyncedTime provides a mock function with given fields:
func (_m *BlockSyncReactor) GetTotalSyncedTime() time.Duration {
func (_m *FastSyncReactor) GetTotalSyncedTime() time.Duration {
ret := _m.Called()
var r0 time.Duration
@@ -56,8 +56,8 @@ func (_m *BlockSyncReactor) GetTotalSyncedTime() time.Duration {
return r0
}
// SwitchToBlockSync provides a mock function with given fields: _a0
func (_m *BlockSyncReactor) SwitchToBlockSync(_a0 state.State) error {
// SwitchToFastSync provides a mock function with given fields: _a0
func (_m *FastSyncReactor) SwitchToFastSync(_a0 state.State) error {
ret := _m.Called(_a0)
var r0 error

View File

@@ -96,18 +96,18 @@ const (
type ReactorOption func(*Reactor)
// NOTE: Temporary interface for switching to block sync, we should get rid of v0.
// Temporary interface for switching to fast sync, we should get rid of v0.
// See: https://github.com/tendermint/tendermint/issues/4595
type BlockSyncReactor interface {
SwitchToBlockSync(sm.State) error
type FastSyncReactor interface {
SwitchToFastSync(sm.State) error
GetMaxPeerBlockHeight() int64
// GetTotalSyncedTime returns the time duration since the blocksync starting.
// GetTotalSyncedTime returns the time duration since the fastsync starting.
GetTotalSyncedTime() time.Duration
// GetRemainingSyncTime returns the estimating time the node will be fully synced,
// if will return 0 if the blocksync does not perform or the number of block synced is
// if will return 0 if the fastsync does not perform or the number of block synced is
// too small (less than 100).
GetRemainingSyncTime() time.Duration
}
@@ -117,7 +117,7 @@ type BlockSyncReactor interface {
type ConsSyncReactor interface {
SwitchToConsensus(sm.State, bool)
SetStateSyncingMetrics(float64)
SetBlockSyncingMetrics(float64)
SetFastSyncingMetrics(float64)
}
// Reactor defines a reactor for the consensus service.
@@ -265,7 +265,7 @@ func (r *Reactor) SetEventBus(b *types.EventBus) {
r.state.SetEventBus(b)
}
// WaitSync returns whether the consensus reactor is waiting for state/block sync.
// WaitSync returns whether the consensus reactor is waiting for state/fast sync.
func (r *Reactor) WaitSync() bool {
r.mtx.RLock()
defer r.mtx.RUnlock()
@@ -278,8 +278,8 @@ func ReactorMetrics(metrics *Metrics) ReactorOption {
return func(r *Reactor) { r.Metrics = metrics }
}
// SwitchToConsensus switches from block-sync mode to consensus mode. It resets
// the state, turns off block-sync, and starts the consensus state-machine.
// SwitchToConsensus switches from fast-sync mode to consensus mode. It resets
// the state, turns off fast-sync, and starts the consensus state-machine.
func (r *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) {
r.Logger.Info("switching to consensus")
@@ -296,7 +296,7 @@ func (r *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) {
r.waitSync = false
r.mtx.Unlock()
r.Metrics.BlockSyncing.Set(0)
r.Metrics.FastSyncing.Set(0)
r.Metrics.StateSyncing.Set(0)
if skipWAL {
@@ -313,9 +313,9 @@ conR:
%+v`, err, r.state, r))
}
d := types.EventDataBlockSyncStatus{Complete: true, Height: state.LastBlockHeight}
if err := r.eventBus.PublishEventBlockSyncStatus(d); err != nil {
r.Logger.Error("failed to emit the blocksync complete event", "err", err)
d := types.EventDataFastSyncStatus{Complete: true, Height: state.LastBlockHeight}
if err := r.eventBus.PublishEventFastSyncStatus(d); err != nil {
r.Logger.Error("failed to emit the fastsync complete event", "err", err)
}
}
@@ -969,7 +969,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
go r.gossipVotesRoutine(ps)
go r.queryMaj23Routine(ps)
// Send our state to the peer. If we're block-syncing, broadcast a
// Send our state to the peer. If we're fast-syncing, broadcast a
// RoundStepMessage later upon SwitchToConsensus().
if !r.waitSync {
go r.sendNewRoundStepMessage(ps.peerID)
@@ -1219,7 +1219,7 @@ func (r *Reactor) handleVoteSetBitsMessage(envelope p2p.Envelope, msgI Message)
// It will handle errors and any possible panics gracefully. A caller can handle
// any error returned by sending a PeerError on the respective channel.
//
// NOTE: We process these messages even when we're block syncing. Messages affect
// NOTE: We process these messages even when we're fast_syncing. Messages affect
// either a peer state or the consensus state. Peer state updates can happen in
// parallel, but processing of proposals, block parts, and votes are ordered by
// the p2p channel.
@@ -1442,6 +1442,6 @@ func (r *Reactor) SetStateSyncingMetrics(v float64) {
r.Metrics.StateSyncing.Set(v)
}
func (r *Reactor) SetBlockSyncingMetrics(v float64) {
r.Metrics.BlockSyncing.Set(v)
func (r *Reactor) SetFastSyncingMetrics(v float64) {
r.Metrics.FastSyncing.Set(v)
}

View File

@@ -43,7 +43,7 @@ type reactorTestSuite struct {
states map[types.NodeID]*State
reactors map[types.NodeID]*Reactor
subs map[types.NodeID]types.Subscription
blocksyncSubs map[types.NodeID]types.Subscription
fastsyncSubs map[types.NodeID]types.Subscription
stateChannels map[types.NodeID]*p2p.Channel
dataChannels map[types.NodeID]*p2p.Channel
voteChannels map[types.NodeID]*p2p.Channel
@@ -60,11 +60,11 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
t.Helper()
rts := &reactorTestSuite{
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
states: make(map[types.NodeID]*State),
reactors: make(map[types.NodeID]*Reactor, numNodes),
subs: make(map[types.NodeID]types.Subscription, numNodes),
blocksyncSubs: make(map[types.NodeID]types.Subscription, numNodes),
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
states: make(map[types.NodeID]*State),
reactors: make(map[types.NodeID]*Reactor, numNodes),
subs: make(map[types.NodeID]types.Subscription, numNodes),
fastsyncSubs: make(map[types.NodeID]types.Subscription, numNodes),
}
rts.stateChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(StateChannel), new(tmcons.Message), size)
@@ -94,13 +94,13 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
blocksSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, size)
require.NoError(t, err)
fsSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryBlockSyncStatus, size)
fsSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryFastSyncStatus, size)
require.NoError(t, err)
rts.states[nodeID] = state
rts.subs[nodeID] = blocksSub
rts.reactors[nodeID] = reactor
rts.blocksyncSubs[nodeID] = fsSub
rts.fastsyncSubs[nodeID] = fsSub
// simulate handle initChain in handshake
if state.state.LastBlockHeight == 0 {
@@ -263,9 +263,9 @@ func waitForBlockWithUpdatedValsAndValidateIt(
wg.Wait()
}
func ensureBlockSyncStatus(t *testing.T, msg tmpubsub.Message, complete bool, height int64) {
func ensureFastSyncStatus(t *testing.T, msg tmpubsub.Message, complete bool, height int64) {
t.Helper()
status, ok := msg.Data().(types.EventDataBlockSyncStatus)
status, ok := msg.Data().(types.EventDataFastSyncStatus)
require.True(t, ok)
require.Equal(t, complete, status.Complete)
@@ -301,14 +301,14 @@ func TestReactorBasic(t *testing.T) {
wg.Wait()
for _, sub := range rts.blocksyncSubs {
for _, sub := range rts.fastsyncSubs {
wg.Add(1)
// wait till everyone makes the consensus switch
go func(s types.Subscription) {
defer wg.Done()
msg := <-s.Out()
ensureBlockSyncStatus(t, msg, true, 0)
ensureFastSyncStatus(t, msg, true, 0)
}(sub)
}

View File

@@ -1,41 +0,0 @@
package progressbar
import "fmt"
// the progressbar indicates the current status and progress would be desired.
// ref: https://www.pixelstech.net/article/1596946473-A-simple-example-on-implementing-progress-bar-in-GoLang
type Bar struct {
percent int64 // progress percentage
cur int64 // current progress
start int64 // the init starting value for progress
total int64 // total value for progress
rate string // the actual progress bar to be printed
graph string // the fill value for progress bar
}
func (bar *Bar) NewOption(start, total int64) {
bar.cur = start
bar.start = start
bar.total = total
bar.graph = "█"
bar.percent = bar.getPercent()
}
func (bar *Bar) getPercent() int64 {
return int64(float32(bar.cur-bar.start) / float32(bar.total-bar.start) * 100)
}
func (bar *Bar) Play(cur int64) {
bar.cur = cur
last := bar.percent
bar.percent = bar.getPercent()
if bar.percent != last && bar.percent%2 == 0 {
bar.rate += bar.graph
}
fmt.Printf("\r[%-50s]%3d%% %8d/%d", bar.rate, bar.percent, bar.cur, bar.total)
}
func (bar *Bar) Finish() {
fmt.Println()
}

View File

@@ -1,41 +0,0 @@
package progressbar
import (
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestProgressBar(t *testing.T) {
zero := int64(0)
hundred := int64(100)
var bar Bar
bar.NewOption(zero, hundred)
require.Equal(t, zero, bar.start)
require.Equal(t, zero, bar.cur)
require.Equal(t, hundred, bar.total)
require.Equal(t, zero, bar.percent)
require.Equal(t, "█", bar.graph)
require.Equal(t, "", bar.rate)
defer bar.Finish()
for i := zero; i <= hundred; i++ {
time.Sleep(1 * time.Millisecond)
bar.Play(i)
}
require.Equal(t, zero, bar.start)
require.Equal(t, hundred, bar.cur)
require.Equal(t, hundred, bar.total)
require.Equal(t, hundred, bar.percent)
var rate string
for i := zero; i < hundred/2; i++ {
rate += "█"
}
require.Equal(t, rate, bar.rate)
}

View File

@@ -188,8 +188,8 @@ func (txmp *TxMempool) WaitForNextTx() <-chan struct{} {
// NextGossipTx returns the next valid transaction to gossip. A caller must wait
// for WaitForNextTx to signal a transaction is available to gossip first. It is
// thread-safe.
func (txmp *TxMempool) NextGossipTx() *clist.CElement {
return txmp.gossipIndex.Front()
func (txmp *TxMempool) NextGossipTx() *WrappedTx {
return txmp.gossipIndex.Front().Value.(*WrappedTx)
}
// EnableTxsAvailable enables the mempool to trigger events when transactions

View File

@@ -9,7 +9,6 @@ import (
"time"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/libs/clist"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/internal/p2p"
@@ -307,7 +306,7 @@ func (r *Reactor) processPeerUpdates() {
func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) {
peerMempoolID := r.ids.GetForPeer(peerID)
var nextGossipTx *clist.CElement
var memTx *WrappedTx
// remove the peer ID from the map of routines and mark the waitgroup as done
defer func() {
@@ -334,10 +333,10 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer)
// This happens because the CElement we were looking at got garbage
// collected (removed). That is, .NextWait() returned nil. Go ahead and
// start from the beginning.
if nextGossipTx == nil {
if memTx == nil {
select {
case <-r.mempool.WaitForNextTx(): // wait until a tx is available
if nextGossipTx = r.mempool.NextGossipTx(); nextGossipTx == nil {
if memTx = r.mempool.NextGossipTx(); memTx == nil {
continue
}
@@ -353,8 +352,6 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer)
}
}
memTx := nextGossipTx.Value.(*WrappedTx)
if r.peerMgr != nil {
height := r.peerMgr.GetHeight(peerID)
if height > 0 && height < memTx.height-1 {
@@ -383,8 +380,16 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer)
}
select {
case <-nextGossipTx.NextWaitChan():
nextGossipTx = nextGossipTx.Next()
case <-memTx.gossipEl.NextWaitChan():
// If there is a next element in gossip index, we point memTx to that node's
// value, otherwise we reset memTx to nil which will be checked at the
// parent for loop.
next := memTx.gossipEl.Next()
if next != nil {
memTx = next.Value.(*WrappedTx)
} else {
memTx = nil
}
case <-closer.Done():
// The peer is marked for removal via a PeerUpdate as the doneCh was

View File

@@ -1,39 +0,0 @@
package p2p
import (
"testing"
"time"
"github.com/tendermint/tendermint/libs/log"
)
func TestCloseWhileDequeueFull(t *testing.T) {
enqueueLength := 5
chDescs := []ChannelDescriptor{
{ID: 0x01, Priority: 1, MaxSendBytes: 4},
}
pqueue := newPQScheduler(log.NewNopLogger(), NopMetrics(), chDescs, uint(enqueueLength), 1, 120)
for i := 0; i < enqueueLength; i++ {
pqueue.enqueue() <- Envelope{
channelID: 0x01,
Message: &testMessage{Value: "foo"}, // 5 bytes
}
}
go pqueue.process()
// sleep to allow context switch for process() to run
time.Sleep(10 * time.Millisecond)
doneCh := make(chan struct{})
go func() {
pqueue.close()
close(doneCh)
}()
select {
case <-doneCh:
case <-time.After(2 * time.Second):
t.Fatal("pqueue failed to close")
}
}

View File

@@ -33,8 +33,11 @@ func GetFreePort() (int, error) {
if err != nil {
return 0, err
}
l, err := net.ListenTCP("tcp", addr)
l, err := net.ListenTCP("tcp", addr)
if err != nil {
return 0, err
}
defer l.Close()
return l.Addr().(*net.TCPAddr).Port, nil
}

View File

@@ -10,8 +10,8 @@ import (
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/light"
"github.com/tendermint/tendermint/light/provider"
mockp "github.com/tendermint/tendermint/light/provider/mock"
dbs "github.com/tendermint/tendermint/light/store/db"
"github.com/tendermint/tendermint/types"
)
// NOTE: block is produced every minute. Make sure the verification time
@@ -21,50 +21,12 @@ import (
// or -benchtime 100x.
//
// Remember that none of these benchmarks account for network latency.
var ()
type providerBenchmarkImpl struct {
currentHeight int64
blocks map[int64]*types.LightBlock
}
func newProviderBenchmarkImpl(headers map[int64]*types.SignedHeader,
vals map[int64]*types.ValidatorSet) provider.Provider {
impl := providerBenchmarkImpl{
blocks: make(map[int64]*types.LightBlock, len(headers)),
}
for height, header := range headers {
if height > impl.currentHeight {
impl.currentHeight = height
}
impl.blocks[height] = &types.LightBlock{
SignedHeader: header,
ValidatorSet: vals[height],
}
}
return &impl
}
func (impl *providerBenchmarkImpl) LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) {
if height == 0 {
return impl.blocks[impl.currentHeight], nil
}
lb, ok := impl.blocks[height]
if !ok {
return nil, provider.ErrLightBlockNotFound
}
return lb, nil
}
func (impl *providerBenchmarkImpl) ReportEvidence(_ context.Context, _ types.Evidence) error {
panic("not implemented")
}
var (
benchmarkFullNode = mockp.New(genMockNode(chainID, 1000, 100, 1, bTime))
genesisBlock, _ = benchmarkFullNode.LightBlock(context.Background(), 1)
)
func BenchmarkSequence(b *testing.B) {
headers, vals, _ := genLightBlocksWithKeys(chainID, 1000, 100, 1, bTime)
benchmarkFullNode := newProviderBenchmarkImpl(headers, vals)
genesisBlock, _ := benchmarkFullNode.LightBlock(context.Background(), 1)
c, err := light.NewClient(
context.Background(),
chainID,
@@ -93,10 +55,6 @@ func BenchmarkSequence(b *testing.B) {
}
func BenchmarkBisection(b *testing.B) {
headers, vals, _ := genLightBlocksWithKeys(chainID, 1000, 100, 1, bTime)
benchmarkFullNode := newProviderBenchmarkImpl(headers, vals)
genesisBlock, _ := benchmarkFullNode.LightBlock(context.Background(), 1)
c, err := light.NewClient(
context.Background(),
chainID,
@@ -124,10 +82,7 @@ func BenchmarkBisection(b *testing.B) {
}
func BenchmarkBackwards(b *testing.B) {
headers, vals, _ := genLightBlocksWithKeys(chainID, 1000, 100, 1, bTime)
benchmarkFullNode := newProviderBenchmarkImpl(headers, vals)
trustedBlock, _ := benchmarkFullNode.LightBlock(context.Background(), 0)
c, err := light.NewClient(
context.Background(),
chainID,

View File

@@ -3,13 +3,11 @@ package light_test
import (
"context"
"errors"
"fmt"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
@@ -18,7 +16,7 @@ import (
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/light"
"github.com/tendermint/tendermint/light/provider"
provider_mocks "github.com/tendermint/tendermint/light/provider/mocks"
mockp "github.com/tendermint/tendermint/light/provider/mock"
dbs "github.com/tendermint/tendermint/light/store/db"
"github.com/tendermint/tendermint/types"
)
@@ -59,9 +57,14 @@ var (
// last header (3/3 signed)
3: h3,
}
l1 = &types.LightBlock{SignedHeader: h1, ValidatorSet: vals}
l2 = &types.LightBlock{SignedHeader: h2, ValidatorSet: vals}
l3 = &types.LightBlock{SignedHeader: h3, ValidatorSet: vals}
l1 = &types.LightBlock{SignedHeader: h1, ValidatorSet: vals}
fullNode = mockp.New(
chainID,
headerSet,
valSet,
)
deadNode = mockp.NewDeadMock(chainID)
largeFullNode = mockp.New(genMockNode(chainID, 10, 3, 0, bTime))
)
func TestValidateTrustOptions(t *testing.T) {
@@ -110,6 +113,11 @@ func TestValidateTrustOptions(t *testing.T) {
}
func TestMock(t *testing.T) {
l, _ := fullNode.LightBlock(ctx, 3)
assert.Equal(t, int64(3), l.Height)
}
func TestClient_SequentialVerification(t *testing.T) {
newKeys := genPrivKeys(4)
newVals := newKeys.ToValidators(10, 1)
@@ -208,22 +216,28 @@ func TestClient_SequentialVerification(t *testing.T) {
}
for _, tc := range testCases {
testCase := tc
t.Run(testCase.name, func(t *testing.T) {
mockNode := mockNodeFromHeadersAndVals(testCase.otherHeaders, testCase.vals)
mockNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound)
tc := tc
t.Run(tc.name, func(t *testing.T) {
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
mockNode,
[]provider.Provider{mockNode},
mockp.New(
chainID,
tc.otherHeaders,
tc.vals,
),
[]provider.Provider{mockp.New(
chainID,
tc.otherHeaders,
tc.vals,
)},
dbs.New(dbm.NewMemDB()),
light.SequentialVerification(),
light.Logger(log.TestingLogger()),
)
if testCase.initErr {
if tc.initErr {
require.Error(t, err)
return
}
@@ -231,12 +245,11 @@ func TestClient_SequentialVerification(t *testing.T) {
require.NoError(t, err)
_, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(3*time.Hour))
if testCase.verifyErr {
if tc.verifyErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
mockNode.AssertExpectations(t)
})
}
}
@@ -330,14 +343,20 @@ func TestClient_SkippingVerification(t *testing.T) {
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
mockNode := mockNodeFromHeadersAndVals(tc.otherHeaders, tc.vals)
mockNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound)
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
mockNode,
[]provider.Provider{mockNode},
mockp.New(
chainID,
tc.otherHeaders,
tc.vals,
),
[]provider.Provider{mockp.New(
chainID,
tc.otherHeaders,
tc.vals,
)},
dbs.New(dbm.NewMemDB()),
light.SkippingVerification(light.DefaultTrustLevel),
light.Logger(log.TestingLogger()),
@@ -363,23 +382,8 @@ func TestClient_SkippingVerification(t *testing.T) {
// start from a large light block to make sure that the pivot height doesn't select a height outside
// the appropriate range
func TestClientLargeBisectionVerification(t *testing.T) {
numBlocks := int64(300)
mockHeaders, mockVals, _ := genLightBlocksWithKeys(chainID, numBlocks, 101, 2, bTime)
lastBlock := &types.LightBlock{SignedHeader: mockHeaders[numBlocks], ValidatorSet: mockVals[numBlocks]}
mockNode := &provider_mocks.Provider{}
mockNode.On("LightBlock", mock.Anything, numBlocks).
Return(lastBlock, nil)
mockNode.On("LightBlock", mock.Anything, int64(200)).
Return(&types.LightBlock{SignedHeader: mockHeaders[200], ValidatorSet: mockVals[200]}, nil)
mockNode.On("LightBlock", mock.Anything, int64(256)).
Return(&types.LightBlock{SignedHeader: mockHeaders[256], ValidatorSet: mockVals[256]}, nil)
mockNode.On("LightBlock", mock.Anything, int64(0)).Return(lastBlock, nil)
trustedLightBlock, err := mockNode.LightBlock(ctx, int64(200))
veryLargeFullNode := mockp.New(genMockNode(chainID, 100, 3, 0, bTime))
trustedLightBlock, err := veryLargeFullNode.LightBlock(ctx, 5)
require.NoError(t, err)
c, err := light.NewClient(
ctx,
@@ -389,25 +393,20 @@ func TestClientLargeBisectionVerification(t *testing.T) {
Height: trustedLightBlock.Height,
Hash: trustedLightBlock.Hash(),
},
mockNode,
[]provider.Provider{mockNode},
veryLargeFullNode,
[]provider.Provider{veryLargeFullNode},
dbs.New(dbm.NewMemDB()),
light.SkippingVerification(light.DefaultTrustLevel),
)
require.NoError(t, err)
h, err := c.Update(ctx, bTime.Add(300*time.Minute))
h, err := c.Update(ctx, bTime.Add(100*time.Minute))
assert.NoError(t, err)
height, err := c.LastTrustedHeight()
require.NoError(t, err)
require.Equal(t, numBlocks, height)
h2, err := mockNode.LightBlock(ctx, numBlocks)
h2, err := veryLargeFullNode.LightBlock(ctx, 100)
require.NoError(t, err)
assert.Equal(t, h, h2)
mockNode.AssertExpectations(t)
}
func TestClientBisectionBetweenTrustedHeaders(t *testing.T) {
mockFullNode := mockNodeFromHeadersAndVals(headerSet, valSet)
c, err := light.NewClient(
ctx,
chainID,
@@ -416,8 +415,8 @@ func TestClientBisectionBetweenTrustedHeaders(t *testing.T) {
Height: 1,
Hash: h1.Hash(),
},
mockFullNode,
[]provider.Provider{mockFullNode},
fullNode,
[]provider.Provider{fullNode},
dbs.New(dbm.NewMemDB()),
light.SkippingVerification(light.DefaultTrustLevel),
)
@@ -433,18 +432,15 @@ func TestClientBisectionBetweenTrustedHeaders(t *testing.T) {
// verify using bisection the light block between the two trusted light blocks
_, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour))
assert.NoError(t, err)
mockFullNode.AssertExpectations(t)
}
func TestClient_Cleanup(t *testing.T) {
mockFullNode := &provider_mocks.Provider{}
mockFullNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil)
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
mockFullNode,
[]provider.Provider{mockFullNode},
fullNode,
[]provider.Provider{fullNode},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
@@ -459,14 +455,12 @@ func TestClient_Cleanup(t *testing.T) {
l, err := c.TrustedLightBlock(1)
assert.Error(t, err)
assert.Nil(t, l)
mockFullNode.AssertExpectations(t)
}
// trustedHeader.Height == options.Height
func TestClientRestoresTrustedHeaderAfterStartup(t *testing.T) {
// 1. options.Hash == trustedHeader.Hash
t.Run("hashes should match", func(t *testing.T) {
mockNode := &provider_mocks.Provider{}
{
trustedStore := dbs.New(dbm.NewMemDB())
err := trustedStore.SaveLightBlock(l1)
require.NoError(t, err)
@@ -475,8 +469,8 @@ func TestClientRestoresTrustedHeaderAfterStartup(t *testing.T) {
ctx,
chainID,
trustOptions,
mockNode,
[]provider.Provider{mockNode},
fullNode,
[]provider.Provider{fullNode},
trustedStore,
light.Logger(log.TestingLogger()),
)
@@ -487,11 +481,10 @@ func TestClientRestoresTrustedHeaderAfterStartup(t *testing.T) {
assert.NotNil(t, l)
assert.Equal(t, l.Hash(), h1.Hash())
assert.Equal(t, l.ValidatorSet.Hash(), h1.ValidatorsHash.Bytes())
mockNode.AssertExpectations(t)
})
}
// 2. options.Hash != trustedHeader.Hash
t.Run("hashes should not match", func(t *testing.T) {
{
trustedStore := dbs.New(dbm.NewMemDB())
err := trustedStore.SaveLightBlock(l1)
require.NoError(t, err)
@@ -499,7 +492,15 @@ func TestClientRestoresTrustedHeaderAfterStartup(t *testing.T) {
// header1 != h1
header1 := keys.GenSignedHeader(chainID, 1, bTime.Add(1*time.Hour), nil, vals, vals,
hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys))
mockNode := &provider_mocks.Provider{}
primary := mockp.New(
chainID,
map[int64]*types.SignedHeader{
// trusted header
1: header1,
},
valSet,
)
c, err := light.NewClient(
ctx,
@@ -509,8 +510,8 @@ func TestClientRestoresTrustedHeaderAfterStartup(t *testing.T) {
Height: 1,
Hash: header1.Hash(),
},
mockNode,
[]provider.Provider{mockNode},
primary,
[]provider.Provider{primary},
trustedStore,
light.Logger(log.TestingLogger()),
)
@@ -523,21 +524,16 @@ func TestClientRestoresTrustedHeaderAfterStartup(t *testing.T) {
assert.Equal(t, l.Hash(), l1.Hash())
assert.NoError(t, l.ValidateBasic(chainID))
}
mockNode.AssertExpectations(t)
})
}
}
func TestClient_Update(t *testing.T) {
mockFullNode := &provider_mocks.Provider{}
mockFullNode.On("LightBlock", mock.Anything, int64(0)).Return(l3, nil)
mockFullNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil)
mockFullNode.On("LightBlock", mock.Anything, int64(3)).Return(l3, nil)
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
mockFullNode,
[]provider.Provider{mockFullNode},
fullNode,
[]provider.Provider{fullNode},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
@@ -550,19 +546,15 @@ func TestClient_Update(t *testing.T) {
assert.EqualValues(t, 3, l.Height)
assert.NoError(t, l.ValidateBasic(chainID))
}
mockFullNode.AssertExpectations(t)
}
func TestClient_Concurrency(t *testing.T) {
mockFullNode := &provider_mocks.Provider{}
mockFullNode.On("LightBlock", mock.Anything, int64(2)).Return(l2, nil)
mockFullNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil)
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
mockFullNode,
[]provider.Provider{mockFullNode},
fullNode,
[]provider.Provider{fullNode},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
@@ -595,20 +587,15 @@ func TestClient_Concurrency(t *testing.T) {
}
wg.Wait()
mockFullNode.AssertExpectations(t)
}
func TestClient_AddProviders(t *testing.T) {
mockFullNode := mockNodeFromHeadersAndVals(map[int64]*types.SignedHeader{
1: h1,
2: h2,
}, valSet)
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
mockFullNode,
[]provider.Provider{mockFullNode},
fullNode,
[]provider.Provider{fullNode},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
@@ -623,28 +610,22 @@ func TestClient_AddProviders(t *testing.T) {
}()
// NOTE: the light client doesn't check uniqueness of providers
c.AddProvider(mockFullNode)
c.AddProvider(fullNode)
require.Len(t, c.Witnesses(), 2)
select {
case <-closeCh:
case <-time.After(5 * time.Second):
t.Fatal("concurent light block verification failed to finish in 5s")
}
mockFullNode.AssertExpectations(t)
}
func TestClientReplacesPrimaryWithWitnessIfPrimaryIsUnavailable(t *testing.T) {
mockFullNode := &provider_mocks.Provider{}
mockFullNode.On("LightBlock", mock.Anything, mock.Anything).Return(l1, nil)
mockDeadNode := &provider_mocks.Provider{}
mockDeadNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrNoResponse)
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
mockDeadNode,
[]provider.Provider{mockFullNode, mockFullNode},
deadNode,
[]provider.Provider{fullNode, fullNode},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
@@ -654,25 +635,16 @@ func TestClientReplacesPrimaryWithWitnessIfPrimaryIsUnavailable(t *testing.T) {
require.NoError(t, err)
// the primary should no longer be the deadNode
assert.NotEqual(t, c.Primary(), mockDeadNode)
assert.NotEqual(t, c.Primary(), deadNode)
// we should still have the dead node as a witness because it
// hasn't repeatedly been unresponsive yet
assert.Equal(t, 2, len(c.Witnesses()))
mockDeadNode.AssertExpectations(t)
mockFullNode.AssertExpectations(t)
}
func TestClient_BackwardsVerification(t *testing.T) {
{
headers, vals, _ := genLightBlocksWithKeys(chainID, 9, 3, 0, bTime)
delete(headers, 1)
delete(headers, 2)
delete(vals, 1)
delete(vals, 2)
mockLargeFullNode := mockNodeFromHeadersAndVals(headers, vals)
trustHeader, _ := mockLargeFullNode.LightBlock(ctx, 6)
trustHeader, _ := largeFullNode.LightBlock(ctx, 6)
c, err := light.NewClient(
ctx,
chainID,
@@ -681,8 +653,8 @@ func TestClient_BackwardsVerification(t *testing.T) {
Height: trustHeader.Height,
Hash: trustHeader.Hash(),
},
mockLargeFullNode,
[]provider.Provider{mockLargeFullNode},
largeFullNode,
[]provider.Provider{largeFullNode},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
@@ -720,36 +692,41 @@ func TestClient_BackwardsVerification(t *testing.T) {
// so expect error
_, err = c.VerifyLightBlockAtHeight(ctx, 8, bTime.Add(12*time.Minute))
assert.Error(t, err)
mockLargeFullNode.AssertExpectations(t)
}
{
testCases := []struct {
headers map[int64]*types.SignedHeader
vals map[int64]*types.ValidatorSet
provider provider.Provider
}{
{
// 7) provides incorrect height
headers: map[int64]*types.SignedHeader{
2: keys.GenSignedHeader(chainID, 1, bTime.Add(30*time.Minute), nil, vals, vals,
hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)),
3: h3,
},
vals: valSet,
mockp.New(
chainID,
map[int64]*types.SignedHeader{
1: h1,
2: keys.GenSignedHeader(chainID, 1, bTime.Add(30*time.Minute), nil, vals, vals,
hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)),
3: h3,
},
valSet,
),
},
{
// 8) provides incorrect hash
headers: map[int64]*types.SignedHeader{
2: keys.GenSignedHeader(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals,
hash("app_hash2"), hash("cons_hash23"), hash("results_hash30"), 0, len(keys)),
3: h3,
},
vals: valSet,
mockp.New(
chainID,
map[int64]*types.SignedHeader{
1: h1,
2: keys.GenSignedHeader(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals,
hash("app_hash2"), hash("cons_hash23"), hash("results_hash30"), 0, len(keys)),
3: h3,
},
valSet,
),
},
}
for idx, tc := range testCases {
mockNode := mockNodeFromHeadersAndVals(tc.headers, tc.vals)
c, err := light.NewClient(
ctx,
chainID,
@@ -758,8 +735,8 @@ func TestClient_BackwardsVerification(t *testing.T) {
Height: 3,
Hash: h3.Hash(),
},
mockNode,
[]provider.Provider{mockNode},
tc.provider,
[]provider.Provider{tc.provider},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
@@ -767,7 +744,6 @@ func TestClient_BackwardsVerification(t *testing.T) {
_, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour).Add(1*time.Second))
assert.Error(t, err, idx)
mockNode.AssertExpectations(t)
}
}
}
@@ -777,62 +753,60 @@ func TestClient_NewClientFromTrustedStore(t *testing.T) {
db := dbs.New(dbm.NewMemDB())
err := db.SaveLightBlock(l1)
require.NoError(t, err)
mockNode := &provider_mocks.Provider{}
c, err := light.NewClientFromTrustedStore(
chainID,
trustPeriod,
mockNode,
[]provider.Provider{mockNode},
deadNode,
[]provider.Provider{deadNode},
db,
)
require.NoError(t, err)
// 2) Check light block exists
// 2) Check light block exists (deadNode is being used to ensure we're not getting
// it from primary)
h, err := c.TrustedLightBlock(1)
assert.NoError(t, err)
assert.EqualValues(t, l1.Height, h.Height)
mockNode.AssertExpectations(t)
}
func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) {
// different headers hash then primary plus less than 1/3 signed (no fork)
headers1 := map[int64]*types.SignedHeader{
1: h1,
2: keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals,
hash("app_hash2"), hash("cons_hash"), hash("results_hash"),
len(keys), len(keys), types.BlockID{Hash: h1.Hash()}),
}
vals1 := map[int64]*types.ValidatorSet{
1: vals,
2: vals,
}
mockBadNode1 := mockNodeFromHeadersAndVals(headers1, vals1)
mockBadNode1.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound)
badProvider1 := mockp.New(
chainID,
map[int64]*types.SignedHeader{
1: h1,
2: keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals,
hash("app_hash2"), hash("cons_hash"), hash("results_hash"),
len(keys), len(keys), types.BlockID{Hash: h1.Hash()}),
},
map[int64]*types.ValidatorSet{
1: vals,
2: vals,
},
)
// header is empty
headers2 := map[int64]*types.SignedHeader{
1: h1,
2: h2,
}
vals2 := map[int64]*types.ValidatorSet{
1: vals,
2: vals,
}
mockBadNode2 := mockNodeFromHeadersAndVals(headers2, vals2)
mockBadNode2.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound)
badProvider2 := mockp.New(
chainID,
map[int64]*types.SignedHeader{
1: h1,
2: h2,
},
map[int64]*types.ValidatorSet{
1: vals,
2: vals,
},
)
mockFullNode := mockNodeFromHeadersAndVals(headerSet, valSet)
lb1, _ := mockBadNode1.LightBlock(ctx, 2)
lb1, _ := badProvider1.LightBlock(ctx, 2)
require.NotEqual(t, lb1.Hash(), l1.Hash())
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
mockFullNode,
[]provider.Provider{mockBadNode1, mockBadNode2},
fullNode,
[]provider.Provider{badProvider1, badProvider2},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
@@ -854,13 +828,12 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) {
}
// witness does not have a light block -> left in the list
assert.EqualValues(t, 1, len(c.Witnesses()))
mockBadNode1.AssertExpectations(t)
mockBadNode2.AssertExpectations(t)
}
func TestClient_TrustedValidatorSet(t *testing.T) {
differentVals, _ := factory.RandValidatorSet(10, 100)
mockBadValSetNode := mockNodeFromHeadersAndVals(
badValSetNode := mockp.New(
chainID,
map[int64]*types.SignedHeader{
1: h1,
// 3/3 signed, but validator set at height 2 below is invalid -> witness
@@ -868,27 +841,21 @@ func TestClient_TrustedValidatorSet(t *testing.T) {
2: keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals,
hash("app_hash2"), hash("cons_hash"), hash("results_hash"),
0, len(keys), types.BlockID{Hash: h1.Hash()}),
3: h3,
},
map[int64]*types.ValidatorSet{
1: vals,
2: differentVals,
})
mockFullNode := mockNodeFromHeadersAndVals(
map[int64]*types.SignedHeader{
1: h1,
2: h2,
3: differentVals,
},
map[int64]*types.ValidatorSet{
1: vals,
2: vals,
})
)
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
mockFullNode,
[]provider.Provider{mockBadValSetNode, mockFullNode},
fullNode,
[]provider.Provider{badValSetNode, fullNode},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
@@ -898,29 +865,15 @@ func TestClient_TrustedValidatorSet(t *testing.T) {
_, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour).Add(1*time.Second))
assert.NoError(t, err)
assert.Equal(t, 1, len(c.Witnesses()))
mockBadValSetNode.AssertExpectations(t)
mockFullNode.AssertExpectations(t)
}
func TestClientPrunesHeadersAndValidatorSets(t *testing.T) {
mockFullNode := mockNodeFromHeadersAndVals(
map[int64]*types.SignedHeader{
1: h1,
3: h3,
0: h3,
},
map[int64]*types.ValidatorSet{
1: vals,
3: vals,
0: vals,
})
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
mockFullNode,
[]provider.Provider{mockFullNode},
fullNode,
[]provider.Provider{fullNode},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
light.PruningSize(1),
@@ -935,7 +888,6 @@ func TestClientPrunesHeadersAndValidatorSets(t *testing.T) {
_, err = c.TrustedLightBlock(1)
assert.Error(t, err)
mockFullNode.AssertExpectations(t)
}
func TestClientEnsureValidHeadersAndValSets(t *testing.T) {
@@ -947,108 +899,86 @@ func TestClientEnsureValidHeadersAndValSets(t *testing.T) {
testCases := []struct {
headers map[int64]*types.SignedHeader
vals map[int64]*types.ValidatorSet
errorToThrow error
errorHeight int64
err bool
err bool
}{
{
headers: map[int64]*types.SignedHeader{
1: h1,
3: h3,
},
vals: map[int64]*types.ValidatorSet{
1: vals,
3: vals,
},
err: false,
headerSet,
valSet,
false,
},
{
headers: map[int64]*types.SignedHeader{
1: h1,
},
vals: map[int64]*types.ValidatorSet{
headerSet,
map[int64]*types.ValidatorSet{
1: vals,
2: vals,
3: nil,
},
errorToThrow: provider.ErrBadLightBlock{Reason: errors.New("nil header or vals")},
errorHeight: 3,
err: true,
true,
},
{
headers: map[int64]*types.SignedHeader{
map[int64]*types.SignedHeader{
1: h1,
2: h2,
3: nil,
},
errorToThrow: provider.ErrBadLightBlock{Reason: errors.New("nil header or vals")},
errorHeight: 3,
vals: valSet,
err: true,
valSet,
true,
},
{
headers: map[int64]*types.SignedHeader{
1: h1,
3: h3,
},
vals: map[int64]*types.ValidatorSet{
headerSet,
map[int64]*types.ValidatorSet{
1: vals,
2: vals,
3: emptyValSet,
},
err: true,
true,
},
}
for i, tc := range testCases {
testCase := tc
t.Run(fmt.Sprintf("case: %d", i), func(t *testing.T) {
mockBadNode := mockNodeFromHeadersAndVals(testCase.headers, testCase.vals)
if testCase.errorToThrow != nil {
mockBadNode.On("LightBlock", mock.Anything, testCase.errorHeight).Return(nil, testCase.errorToThrow)
}
for _, tc := range testCases {
badNode := mockp.New(
chainID,
tc.headers,
tc.vals,
)
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
badNode,
[]provider.Provider{badNode, badNode},
dbs.New(dbm.NewMemDB()),
)
require.NoError(t, err)
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
mockBadNode,
[]provider.Provider{mockBadNode, mockBadNode},
dbs.New(dbm.NewMemDB()),
)
require.NoError(t, err)
_, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(2*time.Hour))
if testCase.err {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
mockBadNode.AssertExpectations(t)
})
_, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(2*time.Hour))
if tc.err {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
}
}
func TestClientHandlesContexts(t *testing.T) {
mockNode := &provider_mocks.Provider{}
mockNode.On("LightBlock",
mock.MatchedBy(func(ctx context.Context) bool { return ctx.Err() == nil }),
int64(1)).Return(l1, nil)
mockNode.On("LightBlock",
mock.MatchedBy(func(ctx context.Context) bool { return ctx.Err() == context.DeadlineExceeded }),
mock.Anything).Return(nil, context.DeadlineExceeded)
mockNode.On("LightBlock",
mock.MatchedBy(func(ctx context.Context) bool { return ctx.Err() == context.Canceled }),
mock.Anything).Return(nil, context.Canceled)
p := mockp.New(genMockNode(chainID, 100, 10, 1, bTime))
genBlock, err := p.LightBlock(ctx, 1)
require.NoError(t, err)
// instantiate the light client with a timeout
ctxTimeOut, cancel := context.WithTimeout(ctx, 1*time.Nanosecond)
ctxTimeOut, cancel := context.WithTimeout(ctx, 10*time.Millisecond)
defer cancel()
_, err := light.NewClient(
_, err = light.NewClient(
ctxTimeOut,
chainID,
trustOptions,
mockNode,
[]provider.Provider{mockNode, mockNode},
light.TrustOptions{
Period: 24 * time.Hour,
Height: 1,
Hash: genBlock.Hash(),
},
p,
[]provider.Provider{p, p},
dbs.New(dbm.NewMemDB()),
)
require.Error(t, ctxTimeOut.Err())
@@ -1059,15 +989,19 @@ func TestClientHandlesContexts(t *testing.T) {
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
mockNode,
[]provider.Provider{mockNode, mockNode},
light.TrustOptions{
Period: 24 * time.Hour,
Height: 1,
Hash: genBlock.Hash(),
},
p,
[]provider.Provider{p, p},
dbs.New(dbm.NewMemDB()),
)
require.NoError(t, err)
// verify a block with a timeout
ctxTimeOutBlock, cancel := context.WithTimeout(ctx, 1*time.Nanosecond)
ctxTimeOutBlock, cancel := context.WithTimeout(ctx, 10*time.Millisecond)
defer cancel()
_, err = c.VerifyLightBlockAtHeight(ctxTimeOutBlock, 100, bTime.Add(100*time.Minute))
require.Error(t, ctxTimeOutBlock.Err())
@@ -1076,11 +1010,11 @@ func TestClientHandlesContexts(t *testing.T) {
// verify a block with a cancel
ctxCancel, cancel := context.WithCancel(ctx)
cancel()
defer cancel()
time.AfterFunc(10*time.Millisecond, cancel)
_, err = c.VerifyLightBlockAtHeight(ctxCancel, 100, bTime.Add(100*time.Minute))
require.Error(t, ctxCancel.Err())
require.Error(t, err)
require.True(t, errors.Is(err, context.Canceled))
mockNode.AssertExpectations(t)
}

View File

@@ -111,7 +111,6 @@ func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan erro
witness provider.Provider, witnessIndex int) {
lightBlock, err := witness.LightBlock(ctx, h.Height)
// fmt.Println(lightBlock.SignedHeader)
switch err {
// no error means we move on to checking the hash of the two headers
case nil:

View File

@@ -1,12 +1,10 @@
package light_test
import (
"bytes"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
@@ -14,7 +12,7 @@ import (
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/light"
"github.com/tendermint/tendermint/light/provider"
provider_mocks "github.com/tendermint/tendermint/light/provider/mocks"
mockp "github.com/tendermint/tendermint/light/provider/mock"
dbs "github.com/tendermint/tendermint/light/store/db"
"github.com/tendermint/tendermint/types"
)
@@ -22,15 +20,15 @@ import (
func TestLightClientAttackEvidence_Lunatic(t *testing.T) {
// primary performs a lunatic attack
var (
latestHeight = int64(3)
latestHeight = int64(10)
valSize = 5
divergenceHeight = int64(2)
divergenceHeight = int64(6)
primaryHeaders = make(map[int64]*types.SignedHeader, latestHeight)
primaryValidators = make(map[int64]*types.ValidatorSet, latestHeight)
)
witnessHeaders, witnessValidators, chainKeys := genLightBlocksWithKeys(chainID, latestHeight, valSize, 2, bTime)
witnessHeaders, witnessValidators, chainKeys := genMockNodeWithKeys(chainID, latestHeight, valSize, 2, bTime)
witness := mockp.New(chainID, witnessHeaders, witnessValidators)
forgedKeys := chainKeys[divergenceHeight-1].ChangeKeys(3) // we change 3 out of the 5 validators (still 2/5 remain)
forgedVals := forgedKeys.ToValidators(2, 0)
@@ -44,38 +42,7 @@ func TestLightClientAttackEvidence_Lunatic(t *testing.T) {
nil, forgedVals, forgedVals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(forgedKeys))
primaryValidators[height] = forgedVals
}
// never called, delete it to make mockery asserts pass
delete(witnessHeaders, 2)
delete(primaryHeaders, 2)
mockWitness := mockNodeFromHeadersAndVals(witnessHeaders, witnessValidators)
mockPrimary := mockNodeFromHeadersAndVals(primaryHeaders, primaryValidators)
mockWitness.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(evidence types.Evidence) bool {
evAgainstPrimary := &types.LightClientAttackEvidence{
// after the divergence height the valset doesn't change so we expect the evidence to be for the latest height
ConflictingBlock: &types.LightBlock{
SignedHeader: primaryHeaders[latestHeight],
ValidatorSet: primaryValidators[latestHeight],
},
CommonHeight: 1,
}
return bytes.Equal(evidence.Hash(), evAgainstPrimary.Hash())
})).Return(nil)
mockPrimary.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(evidence types.Evidence) bool {
evAgainstWitness := &types.LightClientAttackEvidence{
// when forming evidence against witness we learn that the canonical chain continued to change validator sets
// hence the conflicting block is at 7
ConflictingBlock: &types.LightBlock{
SignedHeader: witnessHeaders[divergenceHeight+1],
ValidatorSet: witnessValidators[divergenceHeight+1],
},
CommonHeight: divergenceHeight - 1,
}
return bytes.Equal(evidence.Hash(), evAgainstWitness.Hash())
})).Return(nil)
primary := mockp.New(chainID, primaryHeaders, primaryValidators)
c, err := light.NewClient(
ctx,
@@ -85,134 +52,121 @@ func TestLightClientAttackEvidence_Lunatic(t *testing.T) {
Height: 1,
Hash: primaryHeaders[1].Hash(),
},
mockPrimary,
[]provider.Provider{mockWitness},
primary,
[]provider.Provider{witness},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
require.NoError(t, err)
// Check verification returns an error.
_, err = c.VerifyLightBlockAtHeight(ctx, latestHeight, bTime.Add(1*time.Hour))
_, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour))
if assert.Error(t, err) {
assert.Equal(t, light.ErrLightClientAttack, err)
}
mockWitness.AssertExpectations(t)
mockPrimary.AssertExpectations(t)
// Check evidence was sent to both full nodes.
evAgainstPrimary := &types.LightClientAttackEvidence{
// after the divergence height the valset doesn't change so we expect the evidence to be for height 10
ConflictingBlock: &types.LightBlock{
SignedHeader: primaryHeaders[10],
ValidatorSet: primaryValidators[10],
},
CommonHeight: 4,
}
assert.True(t, witness.HasEvidence(evAgainstPrimary))
evAgainstWitness := &types.LightClientAttackEvidence{
// when forming evidence against witness we learn that the canonical chain continued to change validator sets
// hence the conflicting block is at 7
ConflictingBlock: &types.LightBlock{
SignedHeader: witnessHeaders[7],
ValidatorSet: witnessValidators[7],
},
CommonHeight: 4,
}
assert.True(t, primary.HasEvidence(evAgainstWitness))
}
func TestLightClientAttackEvidence_Equivocation(t *testing.T) {
cases := []struct {
name string
lightOption light.Option
unusedWitnessBlockHeights []int64
unusedPrimaryBlockHeights []int64
latestHeight int64
divergenceHeight int64
}{
{
name: "sequential",
lightOption: light.SequentialVerification(),
unusedWitnessBlockHeights: []int64{4, 6},
latestHeight: int64(5),
divergenceHeight: int64(3),
},
{
name: "skipping",
lightOption: light.SkippingVerification(light.DefaultTrustLevel),
unusedWitnessBlockHeights: []int64{2, 4, 6},
unusedPrimaryBlockHeights: []int64{2, 4, 6},
latestHeight: int64(5),
divergenceHeight: int64(3),
},
verificationOptions := map[string]light.Option{
"sequential": light.SequentialVerification(),
"skipping": light.SkippingVerification(light.DefaultTrustLevel),
}
for _, tc := range cases {
testCase := tc
t.Run(testCase.name, func(t *testing.T) {
// primary performs an equivocation attack
var (
valSize = 5
primaryHeaders = make(map[int64]*types.SignedHeader, testCase.latestHeight)
// validators don't change in this network (however we still use a map just for convenience)
primaryValidators = make(map[int64]*types.ValidatorSet, testCase.latestHeight)
)
witnessHeaders, witnessValidators, chainKeys := genLightBlocksWithKeys(chainID,
testCase.latestHeight+1, valSize, 2, bTime)
for height := int64(1); height <= testCase.latestHeight; height++ {
if height < testCase.divergenceHeight {
primaryHeaders[height] = witnessHeaders[height]
primaryValidators[height] = witnessValidators[height]
continue
}
// we don't have a network partition so we will make 4/5 (greater than 2/3) malicious and vote again for
// a different block (which we do by adding txs)
primaryHeaders[height] = chainKeys[height].GenSignedHeader(chainID, height,
bTime.Add(time.Duration(height)*time.Minute), []types.Tx{[]byte("abcd")},
witnessValidators[height], witnessValidators[height+1], hash("app_hash"),
hash("cons_hash"), hash("results_hash"), 0, len(chainKeys[height])-1)
for s, verificationOption := range verificationOptions {
t.Log("==> verification", s)
// primary performs an equivocation attack
var (
latestHeight = int64(10)
valSize = 5
divergenceHeight = int64(6)
primaryHeaders = make(map[int64]*types.SignedHeader, latestHeight)
primaryValidators = make(map[int64]*types.ValidatorSet, latestHeight)
)
// validators don't change in this network (however we still use a map just for convenience)
witnessHeaders, witnessValidators, chainKeys := genMockNodeWithKeys(chainID, latestHeight+2, valSize, 2, bTime)
witness := mockp.New(chainID, witnessHeaders, witnessValidators)
for height := int64(1); height <= latestHeight; height++ {
if height < divergenceHeight {
primaryHeaders[height] = witnessHeaders[height]
primaryValidators[height] = witnessValidators[height]
continue
}
// we don't have a network partition so we will make 4/5 (greater than 2/3) malicious and vote again for
// a different block (which we do by adding txs)
primaryHeaders[height] = chainKeys[height].GenSignedHeader(chainID, height,
bTime.Add(time.Duration(height)*time.Minute), []types.Tx{[]byte("abcd")},
witnessValidators[height], witnessValidators[height+1], hash("app_hash"),
hash("cons_hash"), hash("results_hash"), 0, len(chainKeys[height])-1)
primaryValidators[height] = witnessValidators[height]
}
primary := mockp.New(chainID, primaryHeaders, primaryValidators)
for _, height := range testCase.unusedWitnessBlockHeights {
delete(witnessHeaders, height)
}
mockWitness := mockNodeFromHeadersAndVals(witnessHeaders, witnessValidators)
for _, height := range testCase.unusedPrimaryBlockHeights {
delete(primaryHeaders, height)
}
mockPrimary := mockNodeFromHeadersAndVals(primaryHeaders, primaryValidators)
c, err := light.NewClient(
ctx,
chainID,
light.TrustOptions{
Period: 4 * time.Hour,
Height: 1,
Hash: primaryHeaders[1].Hash(),
},
primary,
[]provider.Provider{witness},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
verificationOption,
)
require.NoError(t, err)
// Check evidence was sent to both full nodes.
// Common height should be set to the height of the divergent header in the instance
// of an equivocation attack and the validator sets are the same as what the witness has
mockWitness.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(evidence types.Evidence) bool {
evAgainstPrimary := &types.LightClientAttackEvidence{
ConflictingBlock: &types.LightBlock{
SignedHeader: primaryHeaders[testCase.divergenceHeight],
ValidatorSet: primaryValidators[testCase.divergenceHeight],
},
CommonHeight: testCase.divergenceHeight,
}
return bytes.Equal(evidence.Hash(), evAgainstPrimary.Hash())
})).Return(nil)
mockPrimary.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(evidence types.Evidence) bool {
evAgainstWitness := &types.LightClientAttackEvidence{
ConflictingBlock: &types.LightBlock{
SignedHeader: witnessHeaders[testCase.divergenceHeight],
ValidatorSet: witnessValidators[testCase.divergenceHeight],
},
CommonHeight: testCase.divergenceHeight,
}
return bytes.Equal(evidence.Hash(), evAgainstWitness.Hash())
})).Return(nil)
// Check verification returns an error.
_, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour))
if assert.Error(t, err) {
assert.Equal(t, light.ErrLightClientAttack, err)
}
c, err := light.NewClient(
ctx,
chainID,
light.TrustOptions{
Period: 4 * time.Hour,
Height: 1,
Hash: primaryHeaders[1].Hash(),
},
mockPrimary,
[]provider.Provider{mockWitness},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
testCase.lightOption,
)
require.NoError(t, err)
// Check evidence was sent to both full nodes.
// Common height should be set to the height of the divergent header in the instance
// of an equivocation attack and the validator sets are the same as what the witness has
evAgainstPrimary := &types.LightClientAttackEvidence{
ConflictingBlock: &types.LightBlock{
SignedHeader: primaryHeaders[divergenceHeight],
ValidatorSet: primaryValidators[divergenceHeight],
},
CommonHeight: divergenceHeight,
}
assert.True(t, witness.HasEvidence(evAgainstPrimary))
// Check verification returns an error.
_, err = c.VerifyLightBlockAtHeight(ctx, testCase.latestHeight, bTime.Add(300*time.Second))
if assert.Error(t, err) {
assert.Equal(t, light.ErrLightClientAttack, err)
}
mockWitness.AssertExpectations(t)
mockPrimary.AssertExpectations(t)
})
evAgainstWitness := &types.LightClientAttackEvidence{
ConflictingBlock: &types.LightBlock{
SignedHeader: witnessHeaders[divergenceHeight],
ValidatorSet: witnessValidators[divergenceHeight],
},
CommonHeight: divergenceHeight,
}
assert.True(t, primary.HasEvidence(evAgainstWitness))
}
}
@@ -228,10 +182,7 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) {
primaryValidators = make(map[int64]*types.ValidatorSet, forgedHeight)
)
witnessHeaders, witnessValidators, chainKeys := genLightBlocksWithKeys(chainID, latestHeight, valSize, 2, bTime)
for _, unusedHeader := range []int64{3, 5, 6, 8} {
delete(witnessHeaders, unusedHeader)
}
witnessHeaders, witnessValidators, chainKeys := genMockNodeWithKeys(chainID, latestHeight, valSize, 2, bTime)
// primary has the exact same headers except it forges one extra header in the future using keys from 2/5ths of
// the validators
@@ -239,9 +190,6 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) {
primaryHeaders[h] = witnessHeaders[h]
primaryValidators[h] = witnessValidators[h]
}
for _, unusedHeader := range []int64{3, 5, 6, 8} {
delete(primaryHeaders, unusedHeader)
}
forgedKeys := chainKeys[latestHeight].ChangeKeys(3) // we change 3 out of the 5 validators (still 2/5 remain)
primaryValidators[forgedHeight] = forgedKeys.ToValidators(2, 0)
primaryHeaders[forgedHeight] = forgedKeys.GenSignedHeader(
@@ -256,36 +204,15 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) {
hash("results_hash"),
0, len(forgedKeys),
)
mockPrimary := mockNodeFromHeadersAndVals(primaryHeaders, primaryValidators)
lastBlock, _ := mockPrimary.LightBlock(ctx, forgedHeight)
mockPrimary.On("LightBlock", mock.Anything, int64(0)).Return(lastBlock, nil)
mockPrimary.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound)
/*
for _, unusedHeader := range []int64{3, 5, 6, 8} {
delete(witnessHeaders, unusedHeader)
}
*/
mockWitness := mockNodeFromHeadersAndVals(witnessHeaders, witnessValidators)
lastBlock, _ = mockWitness.LightBlock(ctx, latestHeight)
mockWitness.On("LightBlock", mock.Anything, int64(0)).Return(lastBlock, nil).Once()
mockWitness.On("LightBlock", mock.Anything, int64(12)).Return(nil, provider.ErrHeightTooHigh)
witness := mockp.New(chainID, witnessHeaders, witnessValidators)
primary := mockp.New(chainID, primaryHeaders, primaryValidators)
mockWitness.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(evidence types.Evidence) bool {
// Check evidence was sent to the witness against the full node
evAgainstPrimary := &types.LightClientAttackEvidence{
ConflictingBlock: &types.LightBlock{
SignedHeader: primaryHeaders[forgedHeight],
ValidatorSet: primaryValidators[forgedHeight],
},
CommonHeight: latestHeight,
}
return bytes.Equal(evidence.Hash(), evAgainstPrimary.Hash())
})).Return(nil).Twice()
laggingWitness := witness.Copy("laggingWitness")
// In order to perform the attack, the primary needs at least one accomplice as a witness to also
// send the forged block
accomplice := mockPrimary
accomplice := primary
c, err := light.NewClient(
ctx,
@@ -295,8 +222,8 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) {
Height: 1,
Hash: primaryHeaders[1].Hash(),
},
mockPrimary,
[]provider.Provider{mockWitness, accomplice},
primary,
[]provider.Provider{witness, accomplice},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
light.MaxClockDrift(1*time.Second),
@@ -324,7 +251,7 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) {
}
go func() {
time.Sleep(2 * time.Second)
mockWitness.On("LightBlock", mock.Anything, int64(0)).Return(newLb, nil)
witness.AddLightBlock(newLb)
}()
// Now assert that verification returns an error. We craft the light clients time to be a little ahead of the chain
@@ -334,19 +261,26 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) {
assert.Equal(t, light.ErrLightClientAttack, err)
}
// Check evidence was sent to the witness against the full node
evAgainstPrimary := &types.LightClientAttackEvidence{
ConflictingBlock: &types.LightBlock{
SignedHeader: primaryHeaders[forgedHeight],
ValidatorSet: primaryValidators[forgedHeight],
},
CommonHeight: latestHeight,
}
assert.True(t, witness.HasEvidence(evAgainstPrimary))
// We attempt the same call but now the supporting witness has a block which should
// immediately conflict in time with the primary
_, err = c.VerifyLightBlockAtHeight(ctx, forgedHeight, bTime.Add(time.Duration(forgedHeight)*time.Minute))
if assert.Error(t, err) {
assert.Equal(t, light.ErrLightClientAttack, err)
}
assert.True(t, witness.HasEvidence(evAgainstPrimary))
// Lastly we test the unfortunate case where the light clients supporting witness doesn't update
// in enough time
mockLaggingWitness := mockNodeFromHeadersAndVals(witnessHeaders, witnessValidators)
mockLaggingWitness.On("LightBlock", mock.Anything, int64(12)).Return(nil, provider.ErrHeightTooHigh)
lastBlock, _ = mockLaggingWitness.LightBlock(ctx, latestHeight)
mockLaggingWitness.On("LightBlock", mock.Anything, int64(0)).Return(lastBlock, nil)
c, err = light.NewClient(
ctx,
chainID,
@@ -355,8 +289,8 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) {
Height: 1,
Hash: primaryHeaders[1].Hash(),
},
mockPrimary,
[]provider.Provider{mockLaggingWitness, accomplice},
primary,
[]provider.Provider{laggingWitness, accomplice},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
light.MaxClockDrift(1*time.Second),
@@ -366,20 +300,17 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) {
_, err = c.Update(ctx, bTime.Add(time.Duration(forgedHeight)*time.Minute))
assert.NoError(t, err)
mockPrimary.AssertExpectations(t)
mockWitness.AssertExpectations(t)
}
// 1. Different nodes therefore a divergent header is produced.
// => light client returns an error upon creation because primary and witness
// have a different view.
func TestClientDivergentTraces1(t *testing.T) {
headers, vals, _ := genLightBlocksWithKeys(chainID, 1, 5, 2, bTime)
mockPrimary := mockNodeFromHeadersAndVals(headers, vals)
firstBlock, err := mockPrimary.LightBlock(ctx, 1)
primary := mockp.New(genMockNode(chainID, 10, 5, 2, bTime))
firstBlock, err := primary.LightBlock(ctx, 1)
require.NoError(t, err)
headers, vals, _ = genLightBlocksWithKeys(chainID, 1, 5, 2, bTime)
mockWitness := mockNodeFromHeadersAndVals(headers, vals)
witness := mockp.New(genMockNode(chainID, 10, 5, 2, bTime))
_, err = light.NewClient(
ctx,
@@ -389,25 +320,20 @@ func TestClientDivergentTraces1(t *testing.T) {
Hash: firstBlock.Hash(),
Period: 4 * time.Hour,
},
mockPrimary,
[]provider.Provider{mockWitness},
primary,
[]provider.Provider{witness},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
require.Error(t, err)
assert.Contains(t, err.Error(), "does not match primary")
mockWitness.AssertExpectations(t)
mockPrimary.AssertExpectations(t)
}
// 2. Two out of three nodes don't respond but the third has a header that matches
// => verification should be successful and all the witnesses should remain
func TestClientDivergentTraces2(t *testing.T) {
headers, vals, _ := genLightBlocksWithKeys(chainID, 2, 5, 2, bTime)
mockPrimaryNode := mockNodeFromHeadersAndVals(headers, vals)
mockDeadNode := &provider_mocks.Provider{}
mockDeadNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrNoResponse)
firstBlock, err := mockPrimaryNode.LightBlock(ctx, 1)
primary := mockp.New(genMockNode(chainID, 10, 5, 2, bTime))
firstBlock, err := primary.LightBlock(ctx, 1)
require.NoError(t, err)
c, err := light.NewClient(
ctx,
@@ -417,35 +343,31 @@ func TestClientDivergentTraces2(t *testing.T) {
Hash: firstBlock.Hash(),
Period: 4 * time.Hour,
},
mockPrimaryNode,
[]provider.Provider{mockDeadNode, mockDeadNode, mockPrimaryNode},
primary,
[]provider.Provider{deadNode, deadNode, primary},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
require.NoError(t, err)
_, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour))
_, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour))
assert.NoError(t, err)
assert.Equal(t, 3, len(c.Witnesses()))
mockDeadNode.AssertExpectations(t)
mockPrimaryNode.AssertExpectations(t)
}
// 3. witness has the same first header, but different second header
// => creation should succeed, but the verification should fail
//nolint: dupl
func TestClientDivergentTraces3(t *testing.T) {
//
primaryHeaders, primaryVals, _ := genLightBlocksWithKeys(chainID, 2, 5, 2, bTime)
mockPrimary := mockNodeFromHeadersAndVals(primaryHeaders, primaryVals)
_, primaryHeaders, primaryVals := genMockNode(chainID, 10, 5, 2, bTime)
primary := mockp.New(chainID, primaryHeaders, primaryVals)
firstBlock, err := mockPrimary.LightBlock(ctx, 1)
firstBlock, err := primary.LightBlock(ctx, 1)
require.NoError(t, err)
mockHeaders, mockVals, _ := genLightBlocksWithKeys(chainID, 2, 5, 2, bTime)
_, mockHeaders, mockVals := genMockNode(chainID, 10, 5, 2, bTime)
mockHeaders[1] = primaryHeaders[1]
mockVals[1] = primaryVals[1]
mockWitness := mockNodeFromHeadersAndVals(mockHeaders, mockVals)
witness := mockp.New(chainID, mockHeaders, mockVals)
c, err := light.NewClient(
ctx,
@@ -455,35 +377,33 @@ func TestClientDivergentTraces3(t *testing.T) {
Hash: firstBlock.Hash(),
Period: 4 * time.Hour,
},
mockPrimary,
[]provider.Provider{mockWitness},
primary,
[]provider.Provider{witness},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
require.NoError(t, err)
_, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour))
_, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour))
assert.Error(t, err)
assert.Equal(t, 1, len(c.Witnesses()))
mockWitness.AssertExpectations(t)
mockPrimary.AssertExpectations(t)
}
// 4. Witness has a divergent header but can not produce a valid trace to back it up.
// It should be ignored
//nolint: dupl
func TestClientDivergentTraces4(t *testing.T) {
//
primaryHeaders, primaryVals, _ := genLightBlocksWithKeys(chainID, 2, 5, 2, bTime)
mockPrimary := mockNodeFromHeadersAndVals(primaryHeaders, primaryVals)
_, primaryHeaders, primaryVals := genMockNode(chainID, 10, 5, 2, bTime)
primary := mockp.New(chainID, primaryHeaders, primaryVals)
firstBlock, err := mockPrimary.LightBlock(ctx, 1)
firstBlock, err := primary.LightBlock(ctx, 1)
require.NoError(t, err)
witnessHeaders, witnessVals, _ := genLightBlocksWithKeys(chainID, 2, 5, 2, bTime)
primaryHeaders[2] = witnessHeaders[2]
primaryVals[2] = witnessVals[2]
mockWitness := mockNodeFromHeadersAndVals(primaryHeaders, primaryVals)
_, mockHeaders, mockVals := genMockNode(chainID, 10, 5, 2, bTime)
witness := primary.Copy("witness")
witness.AddLightBlock(&types.LightBlock{
SignedHeader: mockHeaders[10],
ValidatorSet: mockVals[10],
})
c, err := light.NewClient(
ctx,
@@ -493,16 +413,14 @@ func TestClientDivergentTraces4(t *testing.T) {
Hash: firstBlock.Hash(),
Period: 4 * time.Hour,
},
mockPrimary,
[]provider.Provider{mockWitness},
primary,
[]provider.Provider{witness},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
require.NoError(t, err)
_, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour))
_, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour))
assert.Error(t, err)
assert.Equal(t, 1, len(c.Witnesses()))
mockWitness.AssertExpectations(t)
mockPrimary.AssertExpectations(t)
}

View File

@@ -2,6 +2,7 @@ package light_test
import (
"context"
"fmt"
"io/ioutil"
stdlog "log"
"os"
@@ -18,22 +19,23 @@ import (
rpctest "github.com/tendermint/tendermint/rpc/test"
)
// Manually getting light blocks and verifying them.
func ExampleClient() {
// Automatically getting new headers and verifying them.
func ExampleClient_Update() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
conf := rpctest.CreateConfig("ExampleClient_VerifyLightBlockAtHeight")
logger := log.TestingLogger()
conf := rpctest.CreateConfig("ExampleClient_Update")
// Start a test application
app := kvstore.NewApplication()
_, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout)
if err != nil {
stdlog.Fatal(err)
}
defer func() { _ = closer(ctx) }()
// give Tendermint time to generate some blocks
time.Sleep(5 * time.Second)
dbDir, err := ioutil.TempDir("", "light-client-example")
if err != nil {
stdlog.Fatal(err)
@@ -47,9 +49,83 @@ func ExampleClient() {
stdlog.Fatal(err)
}
block, err := primary.LightBlock(ctx, 2)
if err != nil {
stdlog.Fatal(err)
}
db, err := dbm.NewGoLevelDB("light-client-db", dbDir)
if err != nil {
stdlog.Fatal(err)
}
c, err := light.NewClient(
ctx,
chainID,
light.TrustOptions{
Period: 504 * time.Hour, // 21 days
Height: 2,
Hash: block.Hash(),
},
primary,
[]provider.Provider{primary}, // NOTE: primary should not be used here
dbs.New(db),
light.Logger(log.TestingLogger()),
)
if err != nil {
stdlog.Fatal(err)
}
defer func() {
if err := c.Cleanup(); err != nil {
stdlog.Fatal(err)
}
}()
time.Sleep(2 * time.Second)
h, err := c.Update(ctx, time.Now())
if err != nil {
stdlog.Fatal(err)
}
if h != nil && h.Height > 2 {
fmt.Println("successful update")
} else {
fmt.Println("update failed")
}
}
// Manually getting light blocks and verifying them.
func ExampleClient_VerifyLightBlockAtHeight() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
conf := rpctest.CreateConfig("ExampleClient_VerifyLightBlockAtHeight")
// Start a test application
app := kvstore.NewApplication()
_, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout)
if err != nil {
stdlog.Fatal(err)
}
defer func() { _ = closer(ctx) }()
// give Tendermint time to generate some blocks
time.Sleep(5 * time.Second)
dbDir, err := ioutil.TempDir("", "light-client-example")
if err != nil {
stdlog.Fatal(err)
}
defer os.RemoveAll(dbDir)
chainID := conf.ChainID()
primary, err := httpp.New(chainID, conf.RPC.ListenAddress)
if err != nil {
stdlog.Fatal(err)
}
block, err := primary.LightBlock(ctx, 2)
if err != nil {
stdlog.Fatal(err)
@@ -70,7 +146,7 @@ func ExampleClient() {
primary,
[]provider.Provider{primary}, // NOTE: primary should not be used here
dbs.New(db),
light.Logger(logger),
light.Logger(log.TestingLogger()),
)
if err != nil {
stdlog.Fatal(err)
@@ -81,26 +157,15 @@ func ExampleClient() {
}
}()
// wait for a few more blocks to be produced
time.Sleep(2 * time.Second)
// veify the block at height 3
_, err = c.VerifyLightBlockAtHeight(context.Background(), 3, time.Now())
if err != nil {
stdlog.Fatal(err)
}
// retrieve light block at height 3
_, err = c.TrustedLightBlock(3)
h, err := c.TrustedLightBlock(3)
if err != nil {
stdlog.Fatal(err)
}
// update to the latest height
lb, err := c.Update(ctx, time.Now())
if err != nil {
stdlog.Fatal(err)
}
logger.Info("verified light block", "light-block", lb)
fmt.Println("got header", h.Height)
}

View File

@@ -3,12 +3,10 @@ package light_test
import (
"time"
"github.com/stretchr/testify/mock"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/crypto/tmhash"
tmtime "github.com/tendermint/tendermint/libs/time"
provider_mocks "github.com/tendermint/tendermint/light/provider/mocks"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/version"
@@ -171,12 +169,12 @@ func (pkz privKeys) ChangeKeys(delta int) privKeys {
return newKeys.Extend(delta)
}
// genLightBlocksWithKeys generates the header and validator set to create
// blocks to height. BlockIntervals are in per minute.
// Generates the header and validator set to create a full entire mock node with blocks to height (
// blockSize) and with variation in validator sets. BlockIntervals are in per minute.
// NOTE: Expected to have a large validator set size ~ 100 validators.
func genLightBlocksWithKeys(
func genMockNodeWithKeys(
chainID string,
numBlocks int64,
blockSize int64,
valSize int,
valVariation float32,
bTime time.Time) (
@@ -185,9 +183,9 @@ func genLightBlocksWithKeys(
map[int64]privKeys) {
var (
headers = make(map[int64]*types.SignedHeader, numBlocks)
valset = make(map[int64]*types.ValidatorSet, numBlocks+1)
keymap = make(map[int64]privKeys, numBlocks+1)
headers = make(map[int64]*types.SignedHeader, blockSize)
valset = make(map[int64]*types.ValidatorSet, blockSize+1)
keymap = make(map[int64]privKeys, blockSize+1)
keys = genPrivKeys(valSize)
totalVariation = valVariation
valVariationInt int
@@ -209,7 +207,7 @@ func genLightBlocksWithKeys(
valset[1] = keys.ToValidators(2, 0)
keys = newKeys
for height := int64(2); height <= numBlocks; height++ {
for height := int64(2); height <= blockSize; height++ {
totalVariation += valVariation
valVariationInt = int(totalVariation)
totalVariation = -float32(valVariationInt)
@@ -228,14 +226,17 @@ func genLightBlocksWithKeys(
return headers, valset, keymap
}
func mockNodeFromHeadersAndVals(headers map[int64]*types.SignedHeader,
vals map[int64]*types.ValidatorSet) *provider_mocks.Provider {
mockNode := &provider_mocks.Provider{}
for i, header := range headers {
lb := &types.LightBlock{SignedHeader: header, ValidatorSet: vals[i]}
mockNode.On("LightBlock", mock.Anything, i).Return(lb, nil)
}
return mockNode
func genMockNode(
chainID string,
blockSize int64,
valSize int,
valVariation float32,
bTime time.Time) (
string,
map[int64]*types.SignedHeader,
map[int64]*types.ValidatorSet) {
headers, valset, _ := genMockNodeWithKeys(chainID, blockSize, valSize, valVariation, bTime)
return chainID, headers, valset
}
func hash(s string) []byte {

View File

@@ -2,7 +2,6 @@ package light_test
import (
"context"
"fmt"
"io/ioutil"
"os"
"testing"
@@ -18,7 +17,6 @@ import (
httpp "github.com/tendermint/tendermint/light/provider/http"
dbs "github.com/tendermint/tendermint/light/store/db"
rpctest "github.com/tendermint/tendermint/rpc/test"
"github.com/tendermint/tendermint/types"
)
// NOTE: these are ports of the tests from example_test.go but
@@ -36,11 +34,10 @@ func TestClientIntegration_Update(t *testing.T) {
app := kvstore.NewApplication()
_, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout)
require.NoError(t, err)
defer func() {
fmt.Println("close")
defer func() { require.NoError(t, closer(ctx)) }()
require.NoError(t, closer(ctx))
}()
// give Tendermint time to generate some blocks
time.Sleep(5 * time.Second)
dbDir, err := ioutil.TempDir("", "light-client-test-update-example")
require.NoError(t, err)
@@ -51,8 +48,7 @@ func TestClientIntegration_Update(t *testing.T) {
primary, err := httpp.New(chainID, conf.RPC.ListenAddress)
require.NoError(t, err)
// give Tendermint time to generate some blocks
block, err := waitForBlock(ctx, primary, 2)
block, err := primary.LightBlock(ctx, 2)
require.NoError(t, err)
db, err := dbm.NewGoLevelDB("light-client-db", dbDir)
@@ -75,17 +71,8 @@ func TestClientIntegration_Update(t *testing.T) {
defer func() { require.NoError(t, c.Cleanup()) }()
// ensure Tendermint is at height 3 or higher
_, err = waitForBlock(ctx, primary, 3)
require.NoError(t, err)
for i := 0; i < 100; i++ {
b, pErr := primary.LightBlock(ctx, 0)
if pErr != nil {
fmt.Println("err")
fmt.Println(pErr)
fmt.Println(b)
}
}
time.Sleep(2 * time.Second)
h, err := c.Update(ctx, time.Now())
require.NoError(t, err)
require.NotNil(t, h)
@@ -107,6 +94,9 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) {
require.NoError(t, err)
defer func() { require.NoError(t, closer(ctx)) }()
// give Tendermint time to generate some blocks
time.Sleep(5 * time.Second)
dbDir, err := ioutil.TempDir("", "light-client-test-verify-example")
require.NoError(t, err)
defer os.RemoveAll(dbDir)
@@ -116,8 +106,7 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) {
primary, err := httpp.New(chainID, conf.RPC.ListenAddress)
require.NoError(t, err)
// give Tendermint time to generate some blocks
block, err := waitForBlock(ctx, primary, 2)
block, err := primary.LightBlock(ctx, 2)
require.NoError(t, err)
db, err := dbm.NewGoLevelDB("light-client-db", dbDir)
@@ -139,10 +128,6 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) {
defer func() { require.NoError(t, c.Cleanup()) }()
// ensure Tendermint is at height 3 or higher
_, err = waitForBlock(ctx, primary, 3)
require.NoError(t, err)
_, err = c.VerifyLightBlockAtHeight(ctx, 3, time.Now())
require.NoError(t, err)
@@ -151,23 +136,3 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) {
require.EqualValues(t, 3, h.Height)
}
func waitForBlock(ctx context.Context, p provider.Provider, height int64) (*types.LightBlock, error) {
for {
block, err := p.LightBlock(ctx, height)
switch err {
case nil:
return block, nil
// node isn't running yet, wait 1 second and repeat
case provider.ErrNoResponse, provider.ErrHeightTooHigh:
timer := time.NewTimer(1 * time.Second)
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-timer.C:
}
default:
return nil, err
}
}
}

View File

@@ -114,7 +114,6 @@ func (p *http) LightBlock(ctx context.Context, height int64) (*types.LightBlock,
sh, err := p.signedHeader(ctx, h)
if err != nil {
fmt.Println("nil signed header")
return nil, err
}
@@ -139,9 +138,6 @@ func (p *http) LightBlock(ctx context.Context, height int64) (*types.LightBlock,
SignedHeader: sh,
ValidatorSet: vs,
}
if lb.SignedHeader.Commit == nil {
fmt.Println(lb)
}
err = lb.ValidateBasic(p.chainID)
if err != nil {

View File

@@ -0,0 +1,30 @@
package mock
import (
"context"
"fmt"
"github.com/tendermint/tendermint/light/provider"
"github.com/tendermint/tendermint/types"
)
type deadMock struct {
id string
}
// NewDeadMock creates a mock provider that always errors. id is used in case of multiple providers.
func NewDeadMock(id string) provider.Provider {
return &deadMock{id: id}
}
func (p *deadMock) String() string {
return fmt.Sprintf("DeadMock-%s", p.id)
}
func (p *deadMock) LightBlock(_ context.Context, height int64) (*types.LightBlock, error) {
return nil, provider.ErrNoResponse
}
func (p *deadMock) ReportEvidence(_ context.Context, ev types.Evidence) error {
return provider.ErrNoResponse
}

125
light/provider/mock/mock.go Normal file
View File

@@ -0,0 +1,125 @@
package mock
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"time"
"github.com/tendermint/tendermint/light/provider"
"github.com/tendermint/tendermint/types"
)
type Mock struct {
id string
mtx sync.Mutex
headers map[int64]*types.SignedHeader
vals map[int64]*types.ValidatorSet
evidenceToReport map[string]types.Evidence // hash => evidence
latestHeight int64
}
var _ provider.Provider = (*Mock)(nil)
// New creates a mock provider with the given set of headers and validator
// sets.
func New(id string, headers map[int64]*types.SignedHeader, vals map[int64]*types.ValidatorSet) *Mock {
height := int64(0)
for h := range headers {
if h > height {
height = h
}
}
return &Mock{
id: id,
headers: headers,
vals: vals,
evidenceToReport: make(map[string]types.Evidence),
latestHeight: height,
}
}
func (p *Mock) String() string {
var headers strings.Builder
for _, h := range p.headers {
fmt.Fprintf(&headers, " %d:%X", h.Height, h.Hash())
}
var vals strings.Builder
for _, v := range p.vals {
fmt.Fprintf(&vals, " %X", v.Hash())
}
return fmt.Sprintf("Mock{id: %s, headers: %s, vals: %v}", p.id, headers.String(), vals.String())
}
func (p *Mock) LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) {
p.mtx.Lock()
defer p.mtx.Unlock()
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-time.After(10 * time.Millisecond):
}
var lb *types.LightBlock
if height > p.latestHeight {
return nil, provider.ErrHeightTooHigh
}
if height == 0 && len(p.headers) > 0 {
height = p.latestHeight
}
if _, ok := p.headers[height]; ok {
sh := p.headers[height]
vals := p.vals[height]
lb = &types.LightBlock{
SignedHeader: sh,
ValidatorSet: vals,
}
}
if lb == nil {
return nil, provider.ErrLightBlockNotFound
}
if lb.SignedHeader == nil || lb.ValidatorSet == nil {
return nil, provider.ErrBadLightBlock{Reason: errors.New("nil header or vals")}
}
if err := lb.ValidateBasic(lb.ChainID); err != nil {
return nil, provider.ErrBadLightBlock{Reason: err}
}
return lb, nil
}
func (p *Mock) ReportEvidence(_ context.Context, ev types.Evidence) error {
p.evidenceToReport[string(ev.Hash())] = ev
return nil
}
func (p *Mock) HasEvidence(ev types.Evidence) bool {
_, ok := p.evidenceToReport[string(ev.Hash())]
return ok
}
func (p *Mock) AddLightBlock(lb *types.LightBlock) {
p.mtx.Lock()
defer p.mtx.Unlock()
if err := lb.ValidateBasic(lb.ChainID); err != nil {
panic(fmt.Sprintf("unable to add light block, err: %v", err))
}
p.headers[lb.Height] = lb.SignedHeader
p.vals[lb.Height] = lb.ValidatorSet
if lb.Height > p.latestHeight {
p.latestHeight = lb.Height
}
}
func (p *Mock) Copy(id string) *Mock {
return New(id, p.headers, p.vals)
}

View File

@@ -1,53 +0,0 @@
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
package mocks
import (
context "context"
mock "github.com/stretchr/testify/mock"
types "github.com/tendermint/tendermint/types"
)
// Provider is an autogenerated mock type for the Provider type
type Provider struct {
mock.Mock
}
// LightBlock provides a mock function with given fields: ctx, height
func (_m *Provider) LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) {
ret := _m.Called(ctx, height)
var r0 *types.LightBlock
if rf, ok := ret.Get(0).(func(context.Context, int64) *types.LightBlock); ok {
r0 = rf(ctx, height)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.LightBlock)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok {
r1 = rf(ctx, height)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ReportEvidence provides a mock function with given fields: _a0, _a1
func (_m *Provider) ReportEvidence(_a0 context.Context, _a1 types.Evidence) error {
ret := _m.Called(_a0, _a1)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, types.Evidence) error); ok {
r0 = rf(_a0, _a1)
} else {
r0 = ret.Error(0)
}
return r0
}

View File

@@ -66,7 +66,7 @@ type nodeImpl struct {
eventBus *types.EventBus // pub/sub for services
stateStore sm.Store
blockStore *store.BlockStore // store the blockchain to disk
bcReactor service.Service // for block-syncing
bcReactor service.Service // for fast-syncing
mempoolReactor service.Service // for gossipping transactions
mempool mempool.Mempool
stateSync bool // whether the node should state sync on startup
@@ -225,9 +225,9 @@ func makeNode(config *cfg.Config,
}
}
// Determine whether we should do block sync. This must happen after the handshake, since the
// Determine whether we should do fast sync. This must happen after the handshake, since the
// app may modify the validator set, specifying ourself as the only validator.
blockSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey)
fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey)
logNodeStartupInfo(state, pubKey, logger, consensusLogger, config.Mode)
@@ -281,15 +281,15 @@ func makeNode(config *cfg.Config,
csReactorShim, csReactor, csState := createConsensusReactor(
config, state, blockExec, blockStore, mp, evPool,
privValidator, csMetrics, stateSync || blockSync, eventBus,
privValidator, csMetrics, stateSync || fastSync, eventBus,
peerManager, router, consensusLogger,
)
// Create the blockchain reactor. Note, we do not start block sync if we're
// Create the blockchain reactor. Note, we do not start fast sync if we're
// doing a state sync first.
bcReactorShim, bcReactor, err := createBlockchainReactor(
logger, config, state, blockExec, blockStore, csReactor,
peerManager, router, blockSync && !stateSync, csMetrics,
peerManager, router, fastSync && !stateSync, csMetrics,
)
if err != nil {
return nil, fmt.Errorf("could not create blockchain reactor: %w", err)
@@ -303,16 +303,16 @@ func makeNode(config *cfg.Config,
bcReactorForSwitch = bcReactor.(p2p.Reactor)
}
// Make ConsensusReactor. Don't enable fully if doing a state sync and/or block sync first.
// Make ConsensusReactor. Don't enable fully if doing a state sync and/or fast sync first.
// FIXME We need to update metrics here, since other reactors don't have access to them.
if stateSync {
csMetrics.StateSyncing.Set(1)
} else if blockSync {
csMetrics.BlockSyncing.Set(1)
} else if fastSync {
csMetrics.FastSyncing.Set(1)
}
// Set up state sync reactor, and schedule a sync if requested.
// FIXME The way we do phased startups (e.g. replay -> block sync -> consensus) is very messy,
// FIXME The way we do phased startups (e.g. replay -> fast sync -> consensus) is very messy,
// we should clean this whole thing up. See:
// https://github.com/tendermint/tendermint/issues/4644
var (
@@ -610,7 +610,7 @@ func (n *nodeImpl) OnStart() error {
}
if n.config.Mode != cfg.ModeSeed {
if n.config.BlockSync.Version == cfg.BlockSyncV0 {
if n.config.FastSync.Version == cfg.BlockchainV0 {
// Start the real blockchain reactor separately since the switch uses the shim.
if err := n.bcReactor.Start(); err != nil {
return err
@@ -653,7 +653,7 @@ func (n *nodeImpl) OnStart() error {
// Run state sync
if n.stateSync {
bcR, ok := n.bcReactor.(cs.BlockSyncReactor)
bcR, ok := n.bcReactor.(cs.FastSyncReactor)
if !ok {
return fmt.Errorf("this blockchain reactor does not support switching from state sync")
}
@@ -695,7 +695,7 @@ func (n *nodeImpl) OnStop() {
if n.config.Mode != cfg.ModeSeed {
// now stop the reactors
if n.config.BlockSync.Version == cfg.BlockSyncV0 {
if n.config.FastSync.Version == cfg.BlockchainV0 {
// Stop the real blockchain reactor separately since the switch uses the shim.
if err := n.bcReactor.Stop(); err != nil {
n.Logger.Error("failed to stop the blockchain reactor", "err", err)
@@ -788,8 +788,8 @@ func (n *nodeImpl) ConfigureRPC() (*rpccore.Environment, error) {
Logger: n.Logger.With("module", "rpc"),
Config: *n.config.RPC,
BlockSyncReactor: n.bcReactor.(cs.BlockSyncReactor),
Config: *n.config.RPC,
FastSyncReactor: n.bcReactor.(cs.FastSyncReactor),
}
if n.config.Mode == cfg.ModeValidator {
pubKey, err := n.privValidator.GetPubKey(context.TODO())
@@ -1033,14 +1033,14 @@ func (n *nodeImpl) NodeInfo() types.NodeInfo {
return n.nodeInfo
}
// startStateSync starts an asynchronous state sync process, then switches to block sync mode.
// startStateSync starts an asynchronous state sync process, then switches to fast sync mode.
func startStateSync(
ssR statesync.SyncReactor,
bcR cs.BlockSyncReactor,
bcR cs.FastSyncReactor,
conR cs.ConsSyncReactor,
sp statesync.StateProvider,
config *cfg.StateSyncConfig,
blockSync bool,
fastSync bool,
stateInitHeight int64,
eb *types.EventBus,
) error {
@@ -1074,17 +1074,17 @@ func startStateSync(
stateSyncLogger.Error("failed to emit the statesync start event", "err", err)
}
if blockSync {
if fastSync {
// FIXME Very ugly to have these metrics bleed through here.
conR.SetBlockSyncingMetrics(1)
if err := bcR.SwitchToBlockSync(state); err != nil {
stateSyncLogger.Error("failed to switch to block sync", "err", err)
conR.SetFastSyncingMetrics(1)
if err := bcR.SwitchToFastSync(state); err != nil {
stateSyncLogger.Error("failed to switch to fast sync", "err", err)
return
}
d := types.EventDataBlockSyncStatus{Complete: false, Height: state.LastBlockHeight}
if err := eb.PublishEventBlockSyncStatus(d); err != nil {
stateSyncLogger.Error("failed to emit the block sync starting event", "err", err)
d := types.EventDataFastSyncStatus{Complete: false, Height: state.LastBlockHeight}
if err := eb.PublishEventFastSyncStatus(d); err != nil {
stateSyncLogger.Error("failed to emit the fastsync starting event", "err", err)
}
} else {

View File

@@ -659,7 +659,7 @@ func loadStatefromGenesis(t *testing.T) sm.State {
func TestNodeStartStateSync(t *testing.T) {
mockSSR := &statesync.MockSyncReactor{}
mockFSR := &consmocks.BlockSyncReactor{}
mockFSR := &consmocks.FastSyncReactor{}
mockCSR := &consmocks.ConsSyncReactor{}
mockSP := &ssmocks.StateProvider{}
state := loadStatefromGenesis(t)

View File

@@ -16,8 +16,8 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto"
bcv0 "github.com/tendermint/tendermint/internal/blocksync/v0"
bcv2 "github.com/tendermint/tendermint/internal/blocksync/v2"
bcv0 "github.com/tendermint/tendermint/internal/blockchain/v0"
bcv2 "github.com/tendermint/tendermint/internal/blockchain/v2"
cs "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/evidence"
"github.com/tendermint/tendermint/internal/mempool"
@@ -337,14 +337,14 @@ func createBlockchainReactor(
csReactor *cs.Reactor,
peerManager *p2p.PeerManager,
router *p2p.Router,
blockSync bool,
fastSync bool,
metrics *cs.Metrics,
) (*p2p.ReactorShim, service.Service, error) {
logger = logger.With("module", "blockchain")
switch config.BlockSync.Version {
case cfg.BlockSyncV0:
switch config.FastSync.Version {
case cfg.BlockchainV0:
reactorShim := p2p.NewReactorShim(logger, "BlockchainShim", bcv0.ChannelShims)
var (
@@ -362,7 +362,7 @@ func createBlockchainReactor(
reactor, err := bcv0.NewReactor(
logger, state.Copy(), blockExec, blockStore, csReactor,
channels[bcv0.BlockchainChannel], peerUpdates, blockSync,
channels[bcv0.BlockchainChannel], peerUpdates, fastSync,
metrics,
)
if err != nil {
@@ -371,11 +371,11 @@ func createBlockchainReactor(
return reactorShim, reactor, nil
case cfg.BlockSyncV2:
return nil, nil, errors.New("block sync version v2 is no longer supported. Please use v0")
case cfg.BlockchainV2:
return nil, nil, errors.New("fastsync version v2 is no longer supported. Please use v0")
default:
return nil, nil, fmt.Errorf("unknown block sync version %s", config.BlockSync.Version)
return nil, nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
}
}
@@ -725,15 +725,15 @@ func makeNodeInfo(
}
var bcChannel byte
switch config.BlockSync.Version {
case cfg.BlockSyncV0:
switch config.FastSync.Version {
case cfg.BlockchainV0:
bcChannel = byte(bcv0.BlockchainChannel)
case cfg.BlockSyncV2:
case cfg.BlockchainV2:
bcChannel = bcv2.BlockchainChannel
default:
return types.NodeInfo{}, fmt.Errorf("unknown blocksync version %s", config.BlockSync.Version)
return types.NodeInfo{}, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
}
nodeInfo := types.NodeInfo{

View File

@@ -186,20 +186,17 @@ message ResponseBeginBlock {
}
message ResponseCheckTx {
uint32 code = 1;
bytes data = 2;
string log = 3; // nondeterministic
string info = 4; // nondeterministic
int64 gas_wanted = 5 [json_name = "gas_wanted"];
int64 gas_used = 6 [json_name = "gas_used"];
repeated Event events = 7 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"];
string codespace = 8;
string sender = 9;
int64 priority = 10;
// mempool_error is set by Tendermint.
// ABCI applictions creating a ResponseCheckTX should not set mempool_error.
string mempool_error = 11;
uint32 code = 1;
bytes data = 2;
string log = 3; // nondeterministic
string info = 4; // nondeterministic
int64 gas_wanted = 5 [json_name = "gas_wanted"];
int64 gas_used = 6 [json_name = "gas_used"];
repeated Event events = 7 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"];
string codespace = 8;
string sender = 9;
int64 priority = 10;
string mempool_error = 11;
}
message ResponseDeliverTx {

View File

@@ -1,4 +1,4 @@
package blocksync
package blockchain
import (
"errors"

View File

@@ -1,4 +1,4 @@
package blocksync_test
package blockchain_test
import (
"encoding/hex"
@@ -8,7 +8,7 @@ import (
proto "github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/require"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/types"
)

View File

@@ -1,7 +1,7 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: tendermint/blocksync/types.proto
// source: tendermint/blockchain/types.proto
package blocksync
package blockchain
import (
fmt "fmt"
@@ -32,7 +32,7 @@ func (m *BlockRequest) Reset() { *m = BlockRequest{} }
func (m *BlockRequest) String() string { return proto.CompactTextString(m) }
func (*BlockRequest) ProtoMessage() {}
func (*BlockRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_19b397c236e0fa07, []int{0}
return fileDescriptor_2927480384e78499, []int{0}
}
func (m *BlockRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -77,7 +77,7 @@ func (m *NoBlockResponse) Reset() { *m = NoBlockResponse{} }
func (m *NoBlockResponse) String() string { return proto.CompactTextString(m) }
func (*NoBlockResponse) ProtoMessage() {}
func (*NoBlockResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_19b397c236e0fa07, []int{1}
return fileDescriptor_2927480384e78499, []int{1}
}
func (m *NoBlockResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -122,7 +122,7 @@ func (m *BlockResponse) Reset() { *m = BlockResponse{} }
func (m *BlockResponse) String() string { return proto.CompactTextString(m) }
func (*BlockResponse) ProtoMessage() {}
func (*BlockResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_19b397c236e0fa07, []int{2}
return fileDescriptor_2927480384e78499, []int{2}
}
func (m *BlockResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -166,7 +166,7 @@ func (m *StatusRequest) Reset() { *m = StatusRequest{} }
func (m *StatusRequest) String() string { return proto.CompactTextString(m) }
func (*StatusRequest) ProtoMessage() {}
func (*StatusRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_19b397c236e0fa07, []int{3}
return fileDescriptor_2927480384e78499, []int{3}
}
func (m *StatusRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -205,7 +205,7 @@ func (m *StatusResponse) Reset() { *m = StatusResponse{} }
func (m *StatusResponse) String() string { return proto.CompactTextString(m) }
func (*StatusResponse) ProtoMessage() {}
func (*StatusResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_19b397c236e0fa07, []int{4}
return fileDescriptor_2927480384e78499, []int{4}
}
func (m *StatusResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -262,7 +262,7 @@ func (m *Message) Reset() { *m = Message{} }
func (m *Message) String() string { return proto.CompactTextString(m) }
func (*Message) ProtoMessage() {}
func (*Message) Descriptor() ([]byte, []int) {
return fileDescriptor_19b397c236e0fa07, []int{5}
return fileDescriptor_2927480384e78499, []int{5}
}
func (m *Message) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -373,41 +373,42 @@ func (*Message) XXX_OneofWrappers() []interface{} {
}
func init() {
proto.RegisterType((*BlockRequest)(nil), "tendermint.blocksync.BlockRequest")
proto.RegisterType((*NoBlockResponse)(nil), "tendermint.blocksync.NoBlockResponse")
proto.RegisterType((*BlockResponse)(nil), "tendermint.blocksync.BlockResponse")
proto.RegisterType((*StatusRequest)(nil), "tendermint.blocksync.StatusRequest")
proto.RegisterType((*StatusResponse)(nil), "tendermint.blocksync.StatusResponse")
proto.RegisterType((*Message)(nil), "tendermint.blocksync.Message")
proto.RegisterType((*BlockRequest)(nil), "tendermint.blockchain.BlockRequest")
proto.RegisterType((*NoBlockResponse)(nil), "tendermint.blockchain.NoBlockResponse")
proto.RegisterType((*BlockResponse)(nil), "tendermint.blockchain.BlockResponse")
proto.RegisterType((*StatusRequest)(nil), "tendermint.blockchain.StatusRequest")
proto.RegisterType((*StatusResponse)(nil), "tendermint.blockchain.StatusResponse")
proto.RegisterType((*Message)(nil), "tendermint.blockchain.Message")
}
func init() { proto.RegisterFile("tendermint/blocksync/types.proto", fileDescriptor_19b397c236e0fa07) }
func init() { proto.RegisterFile("tendermint/blockchain/types.proto", fileDescriptor_2927480384e78499) }
var fileDescriptor_19b397c236e0fa07 = []byte{
// 368 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0x4d, 0x4f, 0xfa, 0x40,
0x10, 0xc6, 0xdb, 0x7f, 0x81, 0x7f, 0x32, 0x50, 0x1a, 0x1b, 0xa3, 0xc4, 0x98, 0x86, 0xd4, 0x97,
0xe8, 0xc1, 0x36, 0xc1, 0xa3, 0xc6, 0x03, 0x27, 0x4c, 0x7c, 0x49, 0x4a, 0xbc, 0x78, 0x21, 0x14,
0x37, 0x40, 0x94, 0x2e, 0x32, 0xdb, 0x03, 0xdf, 0xc2, 0x2f, 0xe0, 0xf7, 0xf1, 0xc8, 0xd1, 0xa3,
0x81, 0x2f, 0x62, 0x98, 0x2d, 0x65, 0x69, 0xb0, 0xb7, 0xdd, 0xe9, 0x33, 0xbf, 0x79, 0xfa, 0x64,
0x16, 0xea, 0x82, 0x45, 0x2f, 0x6c, 0x32, 0x1a, 0x46, 0xc2, 0x0f, 0xdf, 0x78, 0xef, 0x15, 0xa7,
0x51, 0xcf, 0x17, 0xd3, 0x31, 0x43, 0x6f, 0x3c, 0xe1, 0x82, 0xdb, 0xbb, 0x6b, 0x85, 0x97, 0x2a,
0x0e, 0x0e, 0x95, 0x3e, 0x52, 0xcb, 0x6e, 0xd9, 0xe3, 0x9e, 0x42, 0xa5, 0xb9, 0xbc, 0x06, 0xec,
0x3d, 0x66, 0x28, 0xec, 0x3d, 0x28, 0x0d, 0xd8, 0xb0, 0x3f, 0x10, 0x35, 0xbd, 0xae, 0x9f, 0x19,
0x41, 0x72, 0x73, 0xcf, 0xc1, 0x7a, 0xe0, 0x89, 0x12, 0xc7, 0x3c, 0x42, 0xf6, 0xa7, 0xf4, 0x06,
0xcc, 0x4d, 0xe1, 0x05, 0x14, 0x69, 0x24, 0xe9, 0xca, 0x8d, 0x7d, 0x4f, 0xf1, 0x29, 0xfd, 0x4b,
0xbd, 0x54, 0xb9, 0x16, 0x98, 0x6d, 0xd1, 0x15, 0x31, 0x26, 0x9e, 0xdc, 0x6b, 0xa8, 0xae, 0x0a,
0xf9, 0xa3, 0x6d, 0x1b, 0x0a, 0x61, 0x17, 0x59, 0xed, 0x1f, 0x55, 0xe9, 0xec, 0x7e, 0x1a, 0xf0,
0xff, 0x9e, 0x21, 0x76, 0xfb, 0xcc, 0xbe, 0x05, 0x93, 0x66, 0x74, 0x26, 0x12, 0x9d, 0x38, 0x72,
0xbd, 0x6d, 0xc9, 0x79, 0x6a, 0x30, 0x2d, 0x2d, 0xa8, 0x84, 0x6a, 0x50, 0x6d, 0xd8, 0x89, 0x78,
0x67, 0x45, 0x93, 0xbe, 0x68, 0x6e, 0xb9, 0x71, 0xb2, 0x1d, 0x97, 0xc9, 0xaf, 0xa5, 0x05, 0x56,
0x94, 0x89, 0xf4, 0x0e, 0xaa, 0x19, 0xa2, 0x41, 0xc4, 0xa3, 0x5c, 0x83, 0x29, 0xcf, 0x0c, 0xb3,
0x34, 0xa4, 0xdc, 0xd2, 0xdf, 0x2d, 0xe4, 0xd1, 0x36, 0x42, 0x5f, 0xd2, 0x50, 0x2d, 0xd8, 0x8f,
0x60, 0xa5, 0xb4, 0xc4, 0x5c, 0x91, 0x70, 0xc7, 0xf9, 0xb8, 0xd4, 0x5d, 0x15, 0x37, 0x2a, 0xcd,
0x22, 0x18, 0x18, 0x8f, 0x9a, 0x4f, 0x5f, 0x73, 0x47, 0x9f, 0xcd, 0x1d, 0xfd, 0x67, 0xee, 0xe8,
0x1f, 0x0b, 0x47, 0x9b, 0x2d, 0x1c, 0xed, 0x7b, 0xe1, 0x68, 0xcf, 0x57, 0xfd, 0xa1, 0x18, 0xc4,
0xa1, 0xd7, 0xe3, 0x23, 0x5f, 0x5d, 0xe2, 0xf5, 0x91, 0x76, 0xd8, 0xdf, 0xf6, 0x30, 0xc2, 0x12,
0x7d, 0xbb, 0xfc, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xf5, 0x1c, 0xa3, 0x45, 0x37, 0x03, 0x00, 0x00,
var fileDescriptor_2927480384e78499 = []byte{
// 370 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xc1, 0x4e, 0xfa, 0x40,
0x10, 0xc6, 0xdb, 0x7f, 0x81, 0x7f, 0x32, 0x50, 0x1a, 0x9b, 0xa8, 0xc4, 0x98, 0x46, 0xab, 0x12,
0x3d, 0xd8, 0x26, 0x78, 0x25, 0x1e, 0x38, 0x11, 0x13, 0x8c, 0xa9, 0xc6, 0x83, 0x17, 0xd2, 0xe2,
0x86, 0x36, 0x4a, 0x17, 0xd9, 0xed, 0xc1, 0xb7, 0xf0, 0x19, 0x7c, 0x1a, 0x8f, 0x1c, 0x3d, 0x1a,
0x78, 0x11, 0xc3, 0x6c, 0x29, 0x4b, 0x03, 0xf5, 0xb6, 0x3b, 0xfd, 0xe6, 0x37, 0xdf, 0x7e, 0x99,
0xc2, 0x31, 0x27, 0xf1, 0x33, 0x99, 0x8c, 0xa2, 0x98, 0xbb, 0xc1, 0x2b, 0x1d, 0xbc, 0x0c, 0x42,
0x3f, 0x8a, 0x5d, 0xfe, 0x3e, 0x26, 0xcc, 0x19, 0x4f, 0x28, 0xa7, 0xe6, 0xee, 0x4a, 0xe2, 0xac,
0x24, 0x07, 0x87, 0x52, 0x27, 0xca, 0x45, 0xbf, 0x68, 0xb2, 0x9b, 0x50, 0xeb, 0x2c, 0xae, 0x1e,
0x79, 0x4b, 0x08, 0xe3, 0xe6, 0x1e, 0x54, 0x42, 0x12, 0x0d, 0x43, 0xde, 0x50, 0x8f, 0xd4, 0x73,
0xcd, 0x4b, 0x6f, 0xf6, 0x05, 0x18, 0xb7, 0x34, 0x55, 0xb2, 0x31, 0x8d, 0x19, 0xd9, 0x2a, 0xbd,
0x06, 0x7d, 0x5d, 0x78, 0x09, 0x65, 0x1c, 0x89, 0xba, 0x6a, 0x6b, 0xdf, 0x91, 0x8c, 0x8a, 0x07,
0x08, 0xbd, 0x50, 0xd9, 0x06, 0xe8, 0xf7, 0xdc, 0xe7, 0x09, 0x4b, 0x3d, 0xd9, 0x6d, 0xa8, 0x2f,
0x0b, 0xc5, 0xa3, 0x4d, 0x13, 0x4a, 0x81, 0xcf, 0x48, 0xe3, 0x1f, 0x56, 0xf1, 0x6c, 0x7f, 0x6a,
0xf0, 0xbf, 0x47, 0x18, 0xf3, 0x87, 0xc4, 0xbc, 0x01, 0x1d, 0x67, 0xf4, 0x27, 0x02, 0x9d, 0x3a,
0x3a, 0x71, 0x36, 0x46, 0xe7, 0xc8, 0xc9, 0x74, 0x15, 0xaf, 0x16, 0xc8, 0x49, 0x3d, 0xc0, 0x4e,
0x4c, 0xfb, 0x4b, 0x9c, 0x30, 0x86, 0x83, 0xab, 0xad, 0xe6, 0x16, 0x5e, 0x2e, 0xc1, 0xae, 0xe2,
0x19, 0x71, 0x2e, 0xd4, 0x1e, 0xd4, 0x73, 0x48, 0x0d, 0x91, 0xa7, 0xc5, 0x16, 0x33, 0xa0, 0x1e,
0xe4, 0x71, 0x0c, 0xa3, 0xcb, 0x5e, 0x5c, 0x2a, 0xc4, 0xad, 0x05, 0xbf, 0xc0, 0x31, 0xb9, 0x60,
0xde, 0x81, 0x91, 0xe1, 0x52, 0x7b, 0x65, 0xe4, 0x9d, 0xfd, 0xc1, 0xcb, 0xfc, 0xd5, 0xd9, 0x5a,
0xa5, 0x53, 0x06, 0x8d, 0x25, 0xa3, 0xce, 0xe3, 0xd7, 0xcc, 0x52, 0xa7, 0x33, 0x4b, 0xfd, 0x99,
0x59, 0xea, 0xc7, 0xdc, 0x52, 0xa6, 0x73, 0x4b, 0xf9, 0x9e, 0x5b, 0xca, 0x53, 0x7b, 0x18, 0xf1,
0x30, 0x09, 0x9c, 0x01, 0x1d, 0xb9, 0xf2, 0x26, 0xaf, 0x8e, 0xb8, 0xc8, 0xee, 0xc6, 0xff, 0x23,
0xa8, 0xe0, 0xc7, 0xab, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5e, 0x59, 0x07, 0xbd, 0x3f, 0x03,
0x00, 0x00,
}
func (m *BlockRequest) Marshal() (dAtA []byte, err error) {

View File

@@ -1,7 +1,7 @@
syntax = "proto3";
package tendermint.blocksync;
package tendermint.blockchain;
option go_package = "github.com/tendermint/tendermint/proto/tendermint/blocksync";
option go_package = "github.com/tendermint/tendermint/proto/tendermint/blockchain";
import "tendermint/types/block.proto";

View File

@@ -136,8 +136,6 @@ func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes
// use a non-canonical commit
if height == env.BlockStore.Height() {
commit := env.BlockStore.LoadSeenCommit(height)
fmt.Println("not commited height")
fmt.Println(commit)
return ctypes.NewResultCommit(&header, commit, false), nil
}

View File

@@ -81,7 +81,7 @@ type Environment struct {
ConsensusReactor *consensus.Reactor
EventBus *types.EventBus // thread safe
Mempool mempl.Mempool
BlockSyncReactor consensus.BlockSyncReactor
FastSyncReactor consensus.FastSyncReactor
Logger log.Logger

View File

@@ -69,10 +69,10 @@ func (env *Environment) Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, err
EarliestAppHash: earliestAppHash,
EarliestBlockHeight: earliestBlockHeight,
EarliestBlockTime: time.Unix(0, earliestBlockTimeNano),
MaxPeerBlockHeight: env.BlockSyncReactor.GetMaxPeerBlockHeight(),
MaxPeerBlockHeight: env.FastSyncReactor.GetMaxPeerBlockHeight(),
CatchingUp: env.ConsensusReactor.WaitSync(),
TotalSyncedTime: env.BlockSyncReactor.GetTotalSyncedTime(),
RemainingTime: env.BlockSyncReactor.GetRemainingSyncTime(),
TotalSyncedTime: env.FastSyncReactor.GetTotalSyncedTime(),
RemainingTime: env.FastSyncReactor.GetRemainingSyncTime(),
},
ValidatorInfo: validatorInfo,
}

View File

@@ -16,8 +16,6 @@ const (
PSQL EventSinkType = "psql"
)
//go:generate mockery --case underscore --name EventSink
// EventSink interface is defined the APIs for the IndexerService to interact with the data store,
// including the block/transaction indexing and the search functions.
//

View File

@@ -1,194 +0,0 @@
// Code generated by mockery 2.7.5. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
types "github.com/tendermint/tendermint/types"
)
// BlockStore is an autogenerated mock type for the BlockStore type
type BlockStore struct {
mock.Mock
}
// Base provides a mock function with given fields:
func (_m *BlockStore) Base() int64 {
ret := _m.Called()
var r0 int64
if rf, ok := ret.Get(0).(func() int64); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(int64)
}
return r0
}
// Height provides a mock function with given fields:
func (_m *BlockStore) Height() int64 {
ret := _m.Called()
var r0 int64
if rf, ok := ret.Get(0).(func() int64); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(int64)
}
return r0
}
// LoadBaseMeta provides a mock function with given fields:
func (_m *BlockStore) LoadBaseMeta() *types.BlockMeta {
ret := _m.Called()
var r0 *types.BlockMeta
if rf, ok := ret.Get(0).(func() *types.BlockMeta); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.BlockMeta)
}
}
return r0
}
// LoadBlock provides a mock function with given fields: height
func (_m *BlockStore) LoadBlock(height int64) *types.Block {
ret := _m.Called(height)
var r0 *types.Block
if rf, ok := ret.Get(0).(func(int64) *types.Block); ok {
r0 = rf(height)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.Block)
}
}
return r0
}
// LoadBlockByHash provides a mock function with given fields: hash
func (_m *BlockStore) LoadBlockByHash(hash []byte) *types.Block {
ret := _m.Called(hash)
var r0 *types.Block
if rf, ok := ret.Get(0).(func([]byte) *types.Block); ok {
r0 = rf(hash)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.Block)
}
}
return r0
}
// LoadBlockCommit provides a mock function with given fields: height
func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit {
ret := _m.Called(height)
var r0 *types.Commit
if rf, ok := ret.Get(0).(func(int64) *types.Commit); ok {
r0 = rf(height)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.Commit)
}
}
return r0
}
// LoadBlockMeta provides a mock function with given fields: height
func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
ret := _m.Called(height)
var r0 *types.BlockMeta
if rf, ok := ret.Get(0).(func(int64) *types.BlockMeta); ok {
r0 = rf(height)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.BlockMeta)
}
}
return r0
}
// LoadBlockPart provides a mock function with given fields: height, index
func (_m *BlockStore) LoadBlockPart(height int64, index int) *types.Part {
ret := _m.Called(height, index)
var r0 *types.Part
if rf, ok := ret.Get(0).(func(int64, int) *types.Part); ok {
r0 = rf(height, index)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.Part)
}
}
return r0
}
// LoadSeenCommit provides a mock function with given fields: height
func (_m *BlockStore) LoadSeenCommit(height int64) *types.Commit {
ret := _m.Called(height)
var r0 *types.Commit
if rf, ok := ret.Get(0).(func(int64) *types.Commit); ok {
r0 = rf(height)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.Commit)
}
}
return r0
}
// PruneBlocks provides a mock function with given fields: height
func (_m *BlockStore) PruneBlocks(height int64) (uint64, error) {
ret := _m.Called(height)
var r0 uint64
if rf, ok := ret.Get(0).(func(int64) uint64); ok {
r0 = rf(height)
} else {
r0 = ret.Get(0).(uint64)
}
var r1 error
if rf, ok := ret.Get(1).(func(int64) error); ok {
r1 = rf(height)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// SaveBlock provides a mock function with given fields: block, blockParts, seenCommit
func (_m *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
_m.Called(block, blockParts, seenCommit)
}
// Size provides a mock function with given fields:
func (_m *BlockStore) Size() int64 {
ret := _m.Called()
var r0 int64
if rf, ok := ret.Get(0).(func() int64); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(int64)
}
return r0
}

View File

@@ -1,167 +0,0 @@
// Code generated by mockery 2.7.5. DO NOT EDIT.
package mocks
import (
context "context"
mock "github.com/stretchr/testify/mock"
indexer "github.com/tendermint/tendermint/state/indexer"
query "github.com/tendermint/tendermint/libs/pubsub/query"
tenderminttypes "github.com/tendermint/tendermint/types"
types "github.com/tendermint/tendermint/abci/types"
)
// EventSink is an autogenerated mock type for the EventSink type
type EventSink struct {
mock.Mock
}
// GetTxByHash provides a mock function with given fields: _a0
func (_m *EventSink) GetTxByHash(_a0 []byte) (*types.TxResult, error) {
ret := _m.Called(_a0)
var r0 *types.TxResult
if rf, ok := ret.Get(0).(func([]byte) *types.TxResult); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.TxResult)
}
}
var r1 error
if rf, ok := ret.Get(1).(func([]byte) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// HasBlock provides a mock function with given fields: _a0
func (_m *EventSink) HasBlock(_a0 int64) (bool, error) {
ret := _m.Called(_a0)
var r0 bool
if rf, ok := ret.Get(0).(func(int64) bool); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(bool)
}
var r1 error
if rf, ok := ret.Get(1).(func(int64) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// IndexBlockEvents provides a mock function with given fields: _a0
func (_m *EventSink) IndexBlockEvents(_a0 tenderminttypes.EventDataNewBlockHeader) error {
ret := _m.Called(_a0)
var r0 error
if rf, ok := ret.Get(0).(func(tenderminttypes.EventDataNewBlockHeader) error); ok {
r0 = rf(_a0)
} else {
r0 = ret.Error(0)
}
return r0
}
// IndexTxEvents provides a mock function with given fields: _a0
func (_m *EventSink) IndexTxEvents(_a0 []*types.TxResult) error {
ret := _m.Called(_a0)
var r0 error
if rf, ok := ret.Get(0).(func([]*types.TxResult) error); ok {
r0 = rf(_a0)
} else {
r0 = ret.Error(0)
}
return r0
}
// SearchBlockEvents provides a mock function with given fields: _a0, _a1
func (_m *EventSink) SearchBlockEvents(_a0 context.Context, _a1 *query.Query) ([]int64, error) {
ret := _m.Called(_a0, _a1)
var r0 []int64
if rf, ok := ret.Get(0).(func(context.Context, *query.Query) []int64); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]int64)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *query.Query) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// SearchTxEvents provides a mock function with given fields: _a0, _a1
func (_m *EventSink) SearchTxEvents(_a0 context.Context, _a1 *query.Query) ([]*types.TxResult, error) {
ret := _m.Called(_a0, _a1)
var r0 []*types.TxResult
if rf, ok := ret.Get(0).(func(context.Context, *query.Query) []*types.TxResult); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*types.TxResult)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *query.Query) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Stop provides a mock function with given fields:
func (_m *EventSink) Stop() error {
ret := _m.Called()
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
} else {
r0 = ret.Error(0)
}
return r0
}
// Type provides a mock function with given fields:
func (_m *EventSink) Type() indexer.EventSinkType {
ret := _m.Called()
var r0 indexer.EventSinkType
if rf, ok := ret.Get(0).(func() indexer.EventSinkType); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(indexer.EventSinkType)
}
return r0
}

View File

@@ -9,8 +9,6 @@ import (
// NOTE: Interfaces used by RPC must be thread safe!
//------------------------------------------------------
//go:generate mockery --case underscore --name BlockStore
//------------------------------------------------------
// blockstore

View File

@@ -268,7 +268,6 @@ func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit {
panic(err)
}
if len(bz) == 0 {
fmt.Println("nil bytes")
return nil
}
err = proto.Unmarshal(bz, pbc)

View File

@@ -23,7 +23,7 @@ The above should hold for any arbitrary, valid network configuration, and that c
A testnet configuration is specified as a TOML testnet manifest (see below). The testnet runner uses the manifest to configure a set of Docker containers and start them in some order. The manifests can be written manually (to test specific configurations) or generated randomly by the testnet generator (to test a wide range of configuration permutations).
When running a testnet, the runner will first start the Docker nodes in some sequence, submit random transactions, and wait for the nodes to come online and the first blocks to be produced. This may involve e.g. waiting for nodes to block sync and/or state sync. If specified, it will then run any misbehaviors (e.g. double-signing) and perturbations (e.g. killing or disconnecting nodes). It then waits for the testnet to stabilize, with all nodes online and having reached the latest height.
When running a testnet, the runner will first start the Docker nodes in some sequence, submit random transactions, and wait for the nodes to come online and the first blocks to be produced. This may involve e.g. waiting for nodes to fast sync and/or state sync. If specified, it will then run any misbehaviors (e.g. double-signing) and perturbations (e.g. killing or disconnecting nodes). It then waits for the testnet to stabilize, with all nodes online and having reached the latest height.
Once the testnet stabilizes, a set of Go end-to-end tests are run against the live testnet to verify network invariants (for example that blocks are identical across nodes). These use the RPC client to interact with the network, and should consider the entire network as a black box (i.e. it should not test any network or node internals, only externally visible behavior via RPC). The tests may use the `testNode()` helper to run parallel tests against each individual testnet node, and/or inspect the full blockchain history via `fetchBlockChain()`.

View File

@@ -30,7 +30,7 @@ var (
nodeABCIProtocols = uniformChoice{"unix", "tcp", "builtin", "grpc"}
nodePrivvalProtocols = uniformChoice{"file", "unix", "tcp", "grpc"}
// FIXME: v2 disabled due to flake
nodeBlockSyncs = uniformChoice{"v0"} // "v2"
nodeFastSyncs = uniformChoice{"v0"} // "v2"
nodeMempools = uniformChoice{"v0", "v1"}
nodeStateSyncs = uniformChoice{false, true}
nodePersistIntervals = uniformChoice{0, 1, 5}
@@ -273,7 +273,7 @@ func generateNode(
Database: nodeDatabases.Choose(r).(string),
ABCIProtocol: nodeABCIProtocols.Choose(r).(string),
PrivvalProtocol: nodePrivvalProtocols.Choose(r).(string),
BlockSync: nodeBlockSyncs.Choose(r).(string),
FastSync: nodeFastSyncs.Choose(r).(string),
Mempool: nodeMempools.Choose(r).(string),
StateSync: nodeStateSyncs.Choose(r).(bool) && startAt > 0,
PersistInterval: ptrUint64(uint64(nodePersistIntervals.Choose(r).(int))),
@@ -311,7 +311,7 @@ func generateNode(
}
if node.StateSync {
node.BlockSync = "v0"
node.FastSync = "v0"
}
return &node

View File

@@ -30,6 +30,11 @@ validator05 = 50
[node.seed01]
mode = "seed"
perturb = ["restart"]
seeds = ["seed02"]
[node.seed02]
mode = "seed"
seeds = ["seed01"]
[node.validator01]
perturb = ["disconnect"]
@@ -42,7 +47,7 @@ database = "boltdb"
persist_interval = 0
perturb = ["restart"]
privval_protocol = "tcp"
seeds = ["seed01"]
seeds = ["seed02"]
[node.validator03]
database = "badgerdb"
@@ -61,21 +66,29 @@ perturb = ["pause"]
[node.validator05]
database = "cleveldb"
block_sync = "v0"
seeds = ["seed01"]
fast_sync = "v0"
seeds = ["seed02"]
start_at = 1005 # Becomes part of the validator set at 1010
abci_protocol = "grpc"
perturb = ["pause", "disconnect", "restart"]
perturb = ["kill", "pause", "disconnect", "restart"]
privval_protocol = "tcp"
[node.full01]
mode = "full"
start_at = 1010
# FIXME: should be v2, disabled due to flake
block_sync = "v0"
fast_sync = "v0"
persistent_peers = ["validator01", "validator02", "validator03", "validator04", "validator05"]
perturb = ["restart"]
retain_blocks = 7
[node.full02]
mode = "full"
start_at = 1015
# FIXME: should be v2, disabled due to flake
fast_sync = "v0"
perturb = ["restart"]
seeds = ["seed01"]
state_sync = true
[node.light01]

View File

@@ -106,9 +106,9 @@ type ManifestNode struct {
// runner will wait for the network to reach at least this block height.
StartAt int64 `toml:"start_at"`
// BlockSync specifies the block sync mode: "" (disable), "v0" or "v2".
// FastSync specifies the fast sync mode: "" (disable), "v0" or "v2".
// Defaults to disabled.
BlockSync string `toml:"block_sync"`
FastSync string `toml:"fast_sync"`
// Mempool specifies which version of mempool to use. Either "v0" or "v1"
Mempool string `toml:"mempool_version"`

View File

@@ -79,7 +79,7 @@ type Node struct {
IP net.IP
ProxyPort uint32
StartAt int64
BlockSync string
FastSync string
Mempool string
StateSync bool
Database string
@@ -168,7 +168,7 @@ func LoadTestnet(file string) (*Testnet, error) {
ABCIProtocol: ProtocolBuiltin,
PrivvalProtocol: ProtocolFile,
StartAt: nodeManifest.StartAt,
BlockSync: nodeManifest.BlockSync,
FastSync: nodeManifest.FastSync,
Mempool: nodeManifest.Mempool,
StateSync: nodeManifest.StateSync,
PersistInterval: 1,
@@ -328,10 +328,10 @@ func (n Node) Validate(testnet Testnet) error {
}
}
}
switch n.BlockSync {
switch n.FastSync {
case "", "v0", "v2":
default:
return fmt.Errorf("invalid block sync setting %q", n.BlockSync)
return fmt.Errorf("invalid fast sync setting %q", n.FastSync)
}
switch n.Mempool {
case "", "v0", "v1":

View File

@@ -296,10 +296,10 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) {
cfg.Mempool.Version = node.Mempool
}
if node.BlockSync == "" {
if node.FastSync == "" {
cfg.FastSyncMode = false
} else {
cfg.BlockSync.Version = node.BlockSync
cfg.FastSync.Version = node.FastSync
}
if node.StateSync {

View File

@@ -153,8 +153,8 @@ func (b *EventBus) PublishEventValidBlock(data EventDataRoundState) error {
return b.Publish(EventValidBlockValue, data)
}
func (b *EventBus) PublishEventBlockSyncStatus(data EventDataBlockSyncStatus) error {
return b.Publish(EventBlockSyncStatusValue, data)
func (b *EventBus) PublishEventFastSyncStatus(data EventDataFastSyncStatus) error {
return b.Publish(EventFastSyncStatusValue, data)
}
func (b *EventBus) PublishEventStateSyncStatus(data EventDataStateSyncStatus) error {
@@ -317,7 +317,7 @@ func (NopEventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpd
return nil
}
func (NopEventBus) PublishEventBlockSyncStatus(data EventDataBlockSyncStatus) error {
func (NopEventBus) PublishEventFastSyncStatus(data EventDataFastSyncStatus) error {
return nil
}

View File

@@ -370,7 +370,7 @@ func TestEventBusPublish(t *testing.T) {
require.NoError(t, err)
err = eventBus.PublishEventValidatorSetUpdates(EventDataValidatorSetUpdates{})
require.NoError(t, err)
err = eventBus.PublishEventBlockSyncStatus(EventDataBlockSyncStatus{})
err = eventBus.PublishEventFastSyncStatus(EventDataFastSyncStatus{})
require.NoError(t, err)
err = eventBus.PublishEventStateSyncStatus(EventDataStateSyncStatus{})
require.NoError(t, err)
@@ -480,7 +480,7 @@ var events = []string{
EventRelockValue,
EventTimeoutWaitValue,
EventVoteValue,
EventBlockSyncStatusValue,
EventFastSyncStatusValue,
EventStateSyncStatusValue,
}
@@ -502,9 +502,7 @@ var queries = []tmpubsub.Query{
EventQueryRelock,
EventQueryTimeoutWait,
EventQueryVote,
EventQueryBlockSyncStatus,
EventQueryStateSyncStatus,
}
EventQueryFastSyncStatus}
func randQuery() tmpubsub.Query {
return queries[mrand.Intn(len(queries))]

View File

@@ -27,9 +27,9 @@ const (
// These are used for testing the consensus state machine.
// They can also be used to build real-time consensus visualizers.
EventCompleteProposalValue = "CompleteProposal"
// The BlockSyncStatus event will be emitted when the node switching
// state sync mechanism between the consensus reactor and the blocksync reactor.
EventBlockSyncStatusValue = "BlockSyncStatus"
// The FastSyncStatus event will be emitted when the node switching
// state sync mechanism between the consensus reactor and the fastsync reactor.
EventFastSyncStatusValue = "FastSyncStatus"
EventLockValue = "Lock"
EventNewRoundValue = "NewRound"
EventNewRoundStepValue = "NewRoundStep"
@@ -104,7 +104,7 @@ func init() {
tmjson.RegisterType(EventDataVote{}, "tendermint/event/Vote")
tmjson.RegisterType(EventDataValidatorSetUpdates{}, "tendermint/event/ValidatorSetUpdates")
tmjson.RegisterType(EventDataString(""), "tendermint/event/ProposalString")
tmjson.RegisterType(EventDataBlockSyncStatus{}, "tendermint/event/FastSyncStatus")
tmjson.RegisterType(EventDataFastSyncStatus{}, "tendermint/event/FastSyncStatus")
tmjson.RegisterType(EventDataStateSyncStatus{}, "tendermint/event/StateSyncStatus")
}
@@ -176,9 +176,9 @@ type EventDataValidatorSetUpdates struct {
ValidatorUpdates []*Validator `json:"validator_updates"`
}
// EventDataBlockSyncStatus shows the fastsync status and the
// EventDataFastSyncStatus shows the fastsync status and the
// height when the node state sync mechanism changes.
type EventDataBlockSyncStatus struct {
type EventDataFastSyncStatus struct {
Complete bool `json:"complete"`
Height int64 `json:"height"`
}
@@ -227,7 +227,7 @@ var (
EventQueryValidatorSetUpdates = QueryForEvent(EventValidatorSetUpdatesValue)
EventQueryValidBlock = QueryForEvent(EventValidBlockValue)
EventQueryVote = QueryForEvent(EventVoteValue)
EventQueryBlockSyncStatus = QueryForEvent(EventBlockSyncStatusValue)
EventQueryFastSyncStatus = QueryForEvent(EventFastSyncStatusValue)
EventQueryStateSyncStatus = QueryForEvent(EventStateSyncStatusValue)
)