mirror of
https://github.com/tendermint/tendermint.git
synced 2026-02-10 05:50:19 +00:00
Merge remote-tracking branch 'origin/master' into wb/proposer-based-timestamps
This commit is contained in:
@@ -7,115 +7,142 @@ import (
|
||||
"math"
|
||||
"net"
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/evidence"
|
||||
"github.com/tendermint/tendermint/internal/mempool"
|
||||
mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0"
|
||||
"github.com/tendermint/tendermint/internal/proxy"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/internal/state/indexer"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/pubsub"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/state/indexer"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func TestNodeStartStop(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("node_node_test")
|
||||
cfg, err := config.ResetTestRoot("node_node_test")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
|
||||
ctx, bcancel := context.WithCancel(context.Background())
|
||||
defer bcancel()
|
||||
|
||||
// create & start node
|
||||
ns, err := newDefaultNode(config, log.TestingLogger())
|
||||
ns, err := newDefaultNode(ctx, cfg, log.TestingLogger())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, ns.Start())
|
||||
require.NoError(t, ns.Start(ctx))
|
||||
|
||||
t.Cleanup(func() {
|
||||
if ns.IsRunning() {
|
||||
bcancel()
|
||||
ns.Wait()
|
||||
}
|
||||
})
|
||||
|
||||
n, ok := ns.(*nodeImpl)
|
||||
require.True(t, ok)
|
||||
|
||||
// wait for the node to produce a block
|
||||
blocksSub, err := n.EventBus().Subscribe(context.Background(), "node_test", types.EventQueryNewBlock)
|
||||
blocksSub, err := n.EventBus().SubscribeWithArgs(ctx, pubsub.SubscribeArgs{
|
||||
ClientID: "node_test",
|
||||
Query: types.EventQueryNewBlock,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
select {
|
||||
case <-blocksSub.Out():
|
||||
case <-blocksSub.Canceled():
|
||||
t.Fatal("blocksSub was canceled")
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatal("timed out waiting for the node to produce a block")
|
||||
tctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
if _, err := blocksSub.Next(tctx); err != nil {
|
||||
t.Fatalf("Waiting for event: %v", err)
|
||||
}
|
||||
|
||||
// stop the node
|
||||
go func() {
|
||||
err = n.Stop()
|
||||
require.NoError(t, err)
|
||||
bcancel()
|
||||
n.Wait()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-n.Quit():
|
||||
case <-time.After(5 * time.Second):
|
||||
pid := os.Getpid()
|
||||
p, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return
|
||||
case <-time.After(10 * time.Second):
|
||||
if n.IsRunning() {
|
||||
t.Fatal("timed out waiting for shutdown")
|
||||
}
|
||||
err = p.Signal(syscall.SIGABRT)
|
||||
fmt.Println(err)
|
||||
t.Fatal("timed out waiting for shutdown")
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func getTestNode(t *testing.T, conf *cfg.Config, logger log.Logger) *nodeImpl {
|
||||
func getTestNode(ctx context.Context, t *testing.T, conf *config.Config, logger log.Logger) *nodeImpl {
|
||||
t.Helper()
|
||||
ns, err := newDefaultNode(conf, logger)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
ns, err := newDefaultNode(ctx, conf, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
n, ok := ns.(*nodeImpl)
|
||||
require.True(t, ok)
|
||||
|
||||
t.Cleanup(func() {
|
||||
cancel()
|
||||
if n.IsRunning() {
|
||||
ns.Wait()
|
||||
}
|
||||
})
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func TestNodeDelayedStart(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("node_delayed_start_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
cfg, err := config.ResetTestRoot("node_delayed_start_test")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
now := tmtime.Now()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// create & start node
|
||||
n := getTestNode(t, config, log.TestingLogger())
|
||||
n := getTestNode(ctx, t, cfg, log.TestingLogger())
|
||||
n.GenesisDoc().GenesisTime = now.Add(2 * time.Second)
|
||||
|
||||
require.NoError(t, n.Start())
|
||||
defer n.Stop() //nolint:errcheck // ignore for tests
|
||||
require.NoError(t, n.Start(ctx))
|
||||
|
||||
startTime := tmtime.Now()
|
||||
assert.Equal(t, true, startTime.After(n.GenesisDoc().GenesisTime))
|
||||
}
|
||||
|
||||
func TestNodeSetAppVersion(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("node_app_version_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
cfg, err := config.ResetTestRoot("node_app_version_test")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// create node
|
||||
n := getTestNode(t, config, log.TestingLogger())
|
||||
n := getTestNode(ctx, t, cfg, log.TestingLogger())
|
||||
|
||||
// default config uses the kvstore app
|
||||
var appVersion uint64 = kvstore.ProtocolVersion
|
||||
appVersion := kvstore.ProtocolVersion
|
||||
|
||||
// check version is set in state
|
||||
state, err := n.stateStore.Load()
|
||||
@@ -129,9 +156,13 @@ func TestNodeSetAppVersion(t *testing.T) {
|
||||
func TestNodeSetPrivValTCP(t *testing.T) {
|
||||
addr := "tcp://" + testFreeAddr(t)
|
||||
|
||||
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
config.PrivValidator.ListenAddr = addr
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
cfg, err := config.ResetTestRoot("node_priv_val_tcp_test")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
cfg.PrivValidator.ListenAddr = addr
|
||||
|
||||
dialer := privval.DialTCPFn(addr, 100*time.Millisecond, ed25519.GenPrivKey())
|
||||
dialerEndpoint := privval.NewSignerDialerEndpoint(
|
||||
@@ -142,41 +173,54 @@ func TestNodeSetPrivValTCP(t *testing.T) {
|
||||
|
||||
signerServer := privval.NewSignerServer(
|
||||
dialerEndpoint,
|
||||
config.ChainID(),
|
||||
cfg.ChainID(),
|
||||
types.NewMockPV(),
|
||||
)
|
||||
|
||||
go func() {
|
||||
err := signerServer.Start()
|
||||
err := signerServer.Start(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
defer signerServer.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
n := getTestNode(t, config, log.TestingLogger())
|
||||
n := getTestNode(ctx, t, cfg, log.TestingLogger())
|
||||
assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator())
|
||||
}
|
||||
|
||||
// address without a protocol must result in error
|
||||
func TestPrivValidatorListenAddrNoProtocol(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
addrNoPrefix := testFreeAddr(t)
|
||||
|
||||
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
config.PrivValidator.ListenAddr = addrNoPrefix
|
||||
cfg, err := config.ResetTestRoot("node_priv_val_tcp_test")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
cfg.PrivValidator.ListenAddr = addrNoPrefix
|
||||
n, err := newDefaultNode(ctx, cfg, log.TestingLogger())
|
||||
|
||||
_, err := newDefaultNode(config, log.TestingLogger())
|
||||
assert.Error(t, err)
|
||||
|
||||
if n != nil && n.IsRunning() {
|
||||
cancel()
|
||||
n.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeSetPrivValIPC(t *testing.T) {
|
||||
tmpfile := "/tmp/kms." + tmrand.Str(6) + ".sock"
|
||||
defer os.Remove(tmpfile) // clean up
|
||||
|
||||
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
config.PrivValidator.ListenAddr = "unix://" + tmpfile
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
cfg, err := config.ResetTestRoot("node_priv_val_tcp_test")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
cfg.PrivValidator.ListenAddr = "unix://" + tmpfile
|
||||
|
||||
dialer := privval.DialUnixFn(tmpfile)
|
||||
dialerEndpoint := privval.NewSignerDialerEndpoint(
|
||||
@@ -187,16 +231,16 @@ func TestNodeSetPrivValIPC(t *testing.T) {
|
||||
|
||||
pvsc := privval.NewSignerServer(
|
||||
dialerEndpoint,
|
||||
config.ChainID(),
|
||||
cfg.ChainID(),
|
||||
types.NewMockPV(),
|
||||
)
|
||||
|
||||
go func() {
|
||||
err := pvsc.Start()
|
||||
err := pvsc.Start(ctx)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
defer pvsc.Stop() //nolint:errcheck // ignore for tests
|
||||
n := getTestNode(t, config, log.TestingLogger())
|
||||
n := getTestNode(ctx, t, cfg, log.TestingLogger())
|
||||
assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator())
|
||||
}
|
||||
|
||||
@@ -212,18 +256,22 @@ func testFreeAddr(t *testing.T) string {
|
||||
// create a proposal block using real and full
|
||||
// mempool and evidence pool and validate it.
|
||||
func TestCreateProposalBlock(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("node_create_proposal")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
cc := proxy.NewLocalClientCreator(kvstore.NewApplication())
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
cfg, err := config.ResetTestRoot("node_create_proposal")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
|
||||
cc := abciclient.NewLocalCreator(kvstore.NewApplication())
|
||||
proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics())
|
||||
err = proxyApp.Start(ctx)
|
||||
require.Nil(t, err)
|
||||
defer proxyApp.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
logger := log.TestingLogger()
|
||||
|
||||
const height int64 = 1
|
||||
state, stateDB, privVals := state(1, height)
|
||||
state, stateDB, privVals := state(t, 1, height)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
maxBytes := 16384
|
||||
const partSize uint32 = 256
|
||||
@@ -232,15 +280,12 @@ func TestCreateProposalBlock(t *testing.T) {
|
||||
state.ConsensusParams.Evidence.MaxBytes = maxEvidenceBytes
|
||||
proposerAddr, _ := state.Validators.GetByIndex(0)
|
||||
|
||||
mp := mempoolv0.NewCListMempool(
|
||||
config.Mempool,
|
||||
mp := mempool.NewTxMempool(
|
||||
logger.With("module", "mempool"),
|
||||
cfg.Mempool,
|
||||
proxyApp.Mempool(),
|
||||
state.LastBlockHeight,
|
||||
mempoolv0.WithMetrics(mempool.NopMetrics()),
|
||||
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv0.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
mp.SetLogger(logger)
|
||||
|
||||
// Make EvidencePool
|
||||
evidenceDB := dbm.NewMemDB()
|
||||
@@ -250,7 +295,7 @@ func TestCreateProposalBlock(t *testing.T) {
|
||||
|
||||
// fill the evidence pool with more evidence
|
||||
// than can fit in a block
|
||||
var currentBytes int64 = 0
|
||||
var currentBytes int64
|
||||
for currentBytes <= maxEvidenceBytes {
|
||||
ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, time.Now(), privVals[0], "test-chain")
|
||||
currentBytes += int64(len(ev.Bytes()))
|
||||
@@ -267,7 +312,7 @@ func TestCreateProposalBlock(t *testing.T) {
|
||||
txLength := 100
|
||||
for i := 0; i <= maxBytes/txLength; i++ {
|
||||
tx := tmrand.Bytes(txLength)
|
||||
err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{})
|
||||
err := mp.CheckTx(ctx, tx, nil, mempool.TxInfo{})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -304,18 +349,22 @@ func TestCreateProposalBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMaxTxsProposalBlockSize(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("node_create_proposal")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
cc := proxy.NewLocalClientCreator(kvstore.NewApplication())
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
cfg, err := config.ResetTestRoot("node_create_proposal")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
cc := abciclient.NewLocalCreator(kvstore.NewApplication())
|
||||
proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics())
|
||||
err = proxyApp.Start(ctx)
|
||||
require.Nil(t, err)
|
||||
defer proxyApp.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
logger := log.TestingLogger()
|
||||
|
||||
const height int64 = 1
|
||||
state, stateDB, _ := state(1, height)
|
||||
state, stateDB, _ := state(t, 1, height)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
const maxBytes int64 = 16384
|
||||
@@ -324,20 +373,18 @@ func TestMaxTxsProposalBlockSize(t *testing.T) {
|
||||
proposerAddr, _ := state.Validators.GetByIndex(0)
|
||||
|
||||
// Make Mempool
|
||||
mp := mempoolv0.NewCListMempool(
|
||||
config.Mempool,
|
||||
|
||||
mp := mempool.NewTxMempool(
|
||||
logger.With("module", "mempool"),
|
||||
cfg.Mempool,
|
||||
proxyApp.Mempool(),
|
||||
state.LastBlockHeight,
|
||||
mempoolv0.WithMetrics(mempool.NopMetrics()),
|
||||
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv0.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
mp.SetLogger(logger)
|
||||
|
||||
// fill the mempool with one txs just below the maximum size
|
||||
txLength := int(types.MaxDataBytesNoEvidence(maxBytes, 1))
|
||||
tx := tmrand.Bytes(txLength - 4) // to account for the varint
|
||||
err = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{})
|
||||
err = mp.CheckTx(ctx, tx, nil, mempool.TxInfo{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
@@ -366,17 +413,20 @@ func TestMaxTxsProposalBlockSize(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMaxProposalBlockSize(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("node_create_proposal")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
cc := proxy.NewLocalClientCreator(kvstore.NewApplication())
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
cfg, err := config.ResetTestRoot("node_create_proposal")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
cc := abciclient.NewLocalCreator(kvstore.NewApplication())
|
||||
proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics())
|
||||
err = proxyApp.Start(ctx)
|
||||
require.Nil(t, err)
|
||||
defer proxyApp.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
logger := log.TestingLogger()
|
||||
|
||||
state, stateDB, _ := state(types.MaxVotesCount, int64(1))
|
||||
state, stateDB, _ := state(t, types.MaxVotesCount, int64(1))
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
const maxBytes int64 = 1024 * 1024 * 2
|
||||
@@ -384,26 +434,23 @@ func TestMaxProposalBlockSize(t *testing.T) {
|
||||
proposerAddr, _ := state.Validators.GetByIndex(0)
|
||||
|
||||
// Make Mempool
|
||||
mp := mempoolv0.NewCListMempool(
|
||||
config.Mempool,
|
||||
mp := mempool.NewTxMempool(
|
||||
logger.With("module", "mempool"),
|
||||
cfg.Mempool,
|
||||
proxyApp.Mempool(),
|
||||
state.LastBlockHeight,
|
||||
mempoolv0.WithMetrics(mempool.NopMetrics()),
|
||||
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv0.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
mp.SetLogger(logger)
|
||||
|
||||
// fill the mempool with one txs just below the maximum size
|
||||
txLength := int(types.MaxDataBytesNoEvidence(maxBytes, types.MaxVotesCount))
|
||||
tx := tmrand.Bytes(txLength - 6) // to account for the varint
|
||||
err = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{})
|
||||
err = mp.CheckTx(ctx, tx, nil, mempool.TxInfo{})
|
||||
assert.NoError(t, err)
|
||||
// now produce more txs than what a normal block can hold with 10 smaller txs
|
||||
// At the end of the test, only the single big tx should be added
|
||||
for i := 0; i < 10; i++ {
|
||||
tx := tmrand.Bytes(10)
|
||||
err = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{})
|
||||
err = mp.CheckTx(ctx, tx, nil, mempool.TxInfo{})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -481,92 +528,126 @@ func TestMaxProposalBlockSize(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNodeNewSeedNode(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("node_new_node_custom_reactors_test")
|
||||
config.Mode = cfg.ModeSeed
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
cfg, err := config.ResetTestRoot("node_new_node_custom_reactors_test")
|
||||
require.NoError(t, err)
|
||||
cfg.Mode = config.ModeSeed
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
|
||||
nodeKey, err := types.LoadOrGenNodeKey(config.NodeKeyFile())
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
nodeKey, err := types.LoadOrGenNodeKey(cfg.NodeKeyFile())
|
||||
require.NoError(t, err)
|
||||
|
||||
ns, err := makeSeedNode(config,
|
||||
cfg.DefaultDBProvider,
|
||||
ns, err := makeSeedNode(cfg,
|
||||
config.DefaultDBProvider,
|
||||
nodeKey,
|
||||
defaultGenesisDocProviderFunc(config),
|
||||
defaultGenesisDocProviderFunc(cfg),
|
||||
log.TestingLogger(),
|
||||
)
|
||||
t.Cleanup(ns.Wait)
|
||||
|
||||
require.NoError(t, err)
|
||||
n, ok := ns.(*nodeImpl)
|
||||
require.True(t, ok)
|
||||
|
||||
err = n.Start()
|
||||
err = n.Start(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, n.pexReactor.IsRunning())
|
||||
|
||||
cancel()
|
||||
n.Wait()
|
||||
|
||||
assert.False(t, n.pexReactor.IsRunning())
|
||||
}
|
||||
|
||||
func TestNodeSetEventSink(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("node_app_version_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
cfg, err := config.ResetTestRoot("node_app_version_test")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
logger := log.TestingLogger()
|
||||
setupTest := func(t *testing.T, conf *cfg.Config) []indexer.EventSink {
|
||||
eventBus, err := createAndStartEventBus(logger)
|
||||
setupTest := func(t *testing.T, conf *config.Config) []indexer.EventSink {
|
||||
eventBus, err := createAndStartEventBus(ctx, logger)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(eventBus.Wait)
|
||||
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
|
||||
genDoc, err := types.GenesisDocFromFile(config.GenesisFile())
|
||||
indexService, eventSinks, err := createAndStartIndexerService(ctx, cfg,
|
||||
config.DefaultDBProvider, eventBus, logger, genDoc.ChainID,
|
||||
indexer.NopMetrics())
|
||||
require.NoError(t, err)
|
||||
|
||||
indexService, eventSinks, err := createAndStartIndexerService(config,
|
||||
cfg.DefaultDBProvider, eventBus, logger, genDoc.ChainID)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { require.NoError(t, indexService.Stop()) })
|
||||
t.Cleanup(indexService.Wait)
|
||||
return eventSinks
|
||||
}
|
||||
cleanup := func(ns service.Service) func() {
|
||||
return func() {
|
||||
n, ok := ns.(*nodeImpl)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
if !n.IsRunning() {
|
||||
return
|
||||
}
|
||||
cancel()
|
||||
n.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
eventSinks := setupTest(t, config)
|
||||
eventSinks := setupTest(t, cfg)
|
||||
assert.Equal(t, 1, len(eventSinks))
|
||||
assert.Equal(t, indexer.KV, eventSinks[0].Type())
|
||||
|
||||
config.TxIndex.Indexer = []string{"null"}
|
||||
eventSinks = setupTest(t, config)
|
||||
cfg.TxIndex.Indexer = []string{"null"}
|
||||
eventSinks = setupTest(t, cfg)
|
||||
|
||||
assert.Equal(t, 1, len(eventSinks))
|
||||
assert.Equal(t, indexer.NULL, eventSinks[0].Type())
|
||||
|
||||
config.TxIndex.Indexer = []string{"null", "kv"}
|
||||
eventSinks = setupTest(t, config)
|
||||
cfg.TxIndex.Indexer = []string{"null", "kv"}
|
||||
eventSinks = setupTest(t, cfg)
|
||||
|
||||
assert.Equal(t, 1, len(eventSinks))
|
||||
assert.Equal(t, indexer.NULL, eventSinks[0].Type())
|
||||
|
||||
config.TxIndex.Indexer = []string{"kvv"}
|
||||
ns, err := newDefaultNode(config, logger)
|
||||
cfg.TxIndex.Indexer = []string{"kvv"}
|
||||
ns, err := newDefaultNode(ctx, cfg, logger)
|
||||
assert.Nil(t, ns)
|
||||
assert.Equal(t, errors.New("unsupported event sink type"), err)
|
||||
assert.Contains(t, err.Error(), "unsupported event sink type")
|
||||
t.Cleanup(cleanup(ns))
|
||||
|
||||
config.TxIndex.Indexer = []string{}
|
||||
eventSinks = setupTest(t, config)
|
||||
cfg.TxIndex.Indexer = []string{}
|
||||
eventSinks = setupTest(t, cfg)
|
||||
|
||||
assert.Equal(t, 1, len(eventSinks))
|
||||
assert.Equal(t, indexer.NULL, eventSinks[0].Type())
|
||||
|
||||
config.TxIndex.Indexer = []string{"psql"}
|
||||
ns, err = newDefaultNode(config, logger)
|
||||
cfg.TxIndex.Indexer = []string{"psql"}
|
||||
ns, err = newDefaultNode(ctx, cfg, logger)
|
||||
assert.Nil(t, ns)
|
||||
assert.Equal(t, errors.New("the psql connection settings cannot be empty"), err)
|
||||
assert.Contains(t, err.Error(), "the psql connection settings cannot be empty")
|
||||
t.Cleanup(cleanup(ns))
|
||||
|
||||
var psqlConn = "test"
|
||||
|
||||
config.TxIndex.Indexer = []string{"psql"}
|
||||
config.TxIndex.PsqlConn = psqlConn
|
||||
eventSinks = setupTest(t, config)
|
||||
cfg.TxIndex.Indexer = []string{"psql"}
|
||||
cfg.TxIndex.PsqlConn = psqlConn
|
||||
eventSinks = setupTest(t, cfg)
|
||||
|
||||
assert.Equal(t, 1, len(eventSinks))
|
||||
assert.Equal(t, indexer.PSQL, eventSinks[0].Type())
|
||||
|
||||
config.TxIndex.Indexer = []string{"psql", "kv"}
|
||||
config.TxIndex.PsqlConn = psqlConn
|
||||
eventSinks = setupTest(t, config)
|
||||
cfg.TxIndex.Indexer = []string{"psql", "kv"}
|
||||
cfg.TxIndex.PsqlConn = psqlConn
|
||||
eventSinks = setupTest(t, cfg)
|
||||
|
||||
assert.Equal(t, 2, len(eventSinks))
|
||||
// we use map to filter the duplicated sinks, so it's not guarantee the order when append sinks.
|
||||
@@ -577,9 +658,9 @@ func TestNodeSetEventSink(t *testing.T) {
|
||||
assert.Equal(t, indexer.KV, eventSinks[1].Type())
|
||||
}
|
||||
|
||||
config.TxIndex.Indexer = []string{"kv", "psql"}
|
||||
config.TxIndex.PsqlConn = psqlConn
|
||||
eventSinks = setupTest(t, config)
|
||||
cfg.TxIndex.Indexer = []string{"kv", "psql"}
|
||||
cfg.TxIndex.PsqlConn = psqlConn
|
||||
eventSinks = setupTest(t, cfg)
|
||||
|
||||
assert.Equal(t, 2, len(eventSinks))
|
||||
if eventSinks[0].Type() == indexer.KV {
|
||||
@@ -590,20 +671,23 @@ func TestNodeSetEventSink(t *testing.T) {
|
||||
}
|
||||
|
||||
var e = errors.New("found duplicated sinks, please check the tx-index section in the config.toml")
|
||||
config.TxIndex.Indexer = []string{"psql", "kv", "Kv"}
|
||||
config.TxIndex.PsqlConn = psqlConn
|
||||
_, err = newDefaultNode(config, logger)
|
||||
cfg.TxIndex.Indexer = []string{"psql", "kv", "Kv"}
|
||||
cfg.TxIndex.PsqlConn = psqlConn
|
||||
ns, err = newDefaultNode(ctx, cfg, logger)
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, e, err)
|
||||
assert.Contains(t, err.Error(), e.Error())
|
||||
t.Cleanup(cleanup(ns))
|
||||
|
||||
config.TxIndex.Indexer = []string{"Psql", "kV", "kv", "pSql"}
|
||||
config.TxIndex.PsqlConn = psqlConn
|
||||
_, err = newDefaultNode(config, logger)
|
||||
cfg.TxIndex.Indexer = []string{"Psql", "kV", "kv", "pSql"}
|
||||
cfg.TxIndex.PsqlConn = psqlConn
|
||||
ns, err = newDefaultNode(ctx, cfg, logger)
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, e, err)
|
||||
assert.Contains(t, err.Error(), e.Error())
|
||||
t.Cleanup(cleanup(ns))
|
||||
}
|
||||
|
||||
func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) {
|
||||
func state(t *testing.T, nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) {
|
||||
t.Helper()
|
||||
privVals := make([]types.PrivValidator, nVals)
|
||||
vals := make([]types.GenesisValidator, nVals)
|
||||
for i := 0; i < nVals; i++ {
|
||||
@@ -624,17 +708,15 @@ func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) {
|
||||
|
||||
// save validators to db for 2 heights
|
||||
stateDB := dbm.NewMemDB()
|
||||
t.Cleanup(func() { require.NoError(t, stateDB.Close()) })
|
||||
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
if err := stateStore.Save(s); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
require.NoError(t, stateStore.Save(s))
|
||||
|
||||
for i := 1; i < int(height); i++ {
|
||||
s.LastBlockHeight++
|
||||
s.LastValidators = s.Validators.Copy()
|
||||
if err := stateStore.Save(s); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
require.NoError(t, stateStore.Save(s))
|
||||
}
|
||||
return s, stateDB, privVals
|
||||
}
|
||||
@@ -648,14 +730,15 @@ func loadStatefromGenesis(t *testing.T) sm.State {
|
||||
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
config := cfg.ResetTestRoot("load_state_from_genesis")
|
||||
cfg, err := config.ResetTestRoot("load_state_from_genesis")
|
||||
require.NoError(t, err)
|
||||
|
||||
loadedState, err := stateStore.Load()
|
||||
require.NoError(t, err)
|
||||
require.True(t, loadedState.IsEmpty())
|
||||
|
||||
valSet, _ := factory.ValidatorSet(0, 10)
|
||||
genDoc := factory.GenesisDoc(config, time.Now(), valSet.Validators, nil)
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
|
||||
|
||||
state, err := loadStateFromDBOrGenesisDocProvider(
|
||||
stateStore,
|
||||
|
||||
Reference in New Issue
Block a user