mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-10 15:07:24 +00:00
This pull request merges in the changes for implementing Proposer-based timestamps into `master`. The power was primarily being done in the `wb/proposer-based-timestamps` branch, with changes being merged into that branch during development. This pull request represents an amalgamation of the changes made into that development branch. All of the changes that were placed into that branch have been cleanly rebased on top of the latest `master`. The changes compile and the tests pass insofar as our tests in general pass. ### Note To Reviewers These changes have been extensively reviewed during development. There is not much new here. In the interest of making effective use of time, I would recommend against trying to perform a complete audit of the changes presented and instead examine for mistakes that may have occurred during the process of rebasing the changes. I gave the complete change set a first pass for any issues, but additional eyes would be very appreciated. In sum, this change set does the following: closes #6942 merges in #6849
743 lines
20 KiB
Go
743 lines
20 KiB
Go
package node
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"fmt"
|
|
"math"
|
|
"net"
|
|
"os"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/fortytw2/leaktest"
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
dbm "github.com/tendermint/tm-db"
|
|
|
|
abciclient "github.com/tendermint/tendermint/abci/client"
|
|
"github.com/tendermint/tendermint/abci/example/kvstore"
|
|
"github.com/tendermint/tendermint/config"
|
|
"github.com/tendermint/tendermint/crypto"
|
|
"github.com/tendermint/tendermint/crypto/ed25519"
|
|
"github.com/tendermint/tendermint/crypto/tmhash"
|
|
"github.com/tendermint/tendermint/internal/eventbus"
|
|
"github.com/tendermint/tendermint/internal/evidence"
|
|
"github.com/tendermint/tendermint/internal/mempool"
|
|
"github.com/tendermint/tendermint/internal/proxy"
|
|
"github.com/tendermint/tendermint/internal/pubsub"
|
|
sm "github.com/tendermint/tendermint/internal/state"
|
|
"github.com/tendermint/tendermint/internal/state/indexer"
|
|
"github.com/tendermint/tendermint/internal/store"
|
|
"github.com/tendermint/tendermint/internal/test/factory"
|
|
"github.com/tendermint/tendermint/libs/log"
|
|
tmrand "github.com/tendermint/tendermint/libs/rand"
|
|
"github.com/tendermint/tendermint/libs/service"
|
|
tmtime "github.com/tendermint/tendermint/libs/time"
|
|
"github.com/tendermint/tendermint/privval"
|
|
"github.com/tendermint/tendermint/types"
|
|
)
|
|
|
|
func TestNodeStartStop(t *testing.T) {
|
|
cfg, err := config.ResetTestRoot("node_node_test")
|
|
require.NoError(t, err)
|
|
|
|
defer os.RemoveAll(cfg.RootDir)
|
|
|
|
ctx, bcancel := context.WithCancel(context.Background())
|
|
defer bcancel()
|
|
|
|
logger := log.NewNopLogger()
|
|
// create & start node
|
|
ns, err := newDefaultNode(ctx, cfg, logger)
|
|
require.NoError(t, err)
|
|
|
|
n, ok := ns.(*nodeImpl)
|
|
require.True(t, ok)
|
|
t.Cleanup(func() {
|
|
if n.IsRunning() {
|
|
bcancel()
|
|
n.Wait()
|
|
}
|
|
})
|
|
|
|
require.NoError(t, n.Start(ctx))
|
|
// wait for the node to produce a block
|
|
tctx, cancel := context.WithTimeout(ctx, time.Second)
|
|
defer cancel()
|
|
|
|
blocksSub, err := n.EventBus().SubscribeWithArgs(tctx, pubsub.SubscribeArgs{
|
|
ClientID: "node_test",
|
|
Query: types.EventQueryNewBlock,
|
|
})
|
|
require.NoError(t, err)
|
|
_, err = blocksSub.Next(tctx)
|
|
require.NoError(t, err, "waiting for event")
|
|
|
|
cancel() // stop the subscription context
|
|
bcancel() // stop the base context
|
|
n.Wait()
|
|
|
|
require.False(t, n.IsRunning(), "node must shut down")
|
|
}
|
|
|
|
func getTestNode(ctx context.Context, t *testing.T, conf *config.Config, logger log.Logger) *nodeImpl {
|
|
t.Helper()
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
defer cancel()
|
|
|
|
ns, err := newDefaultNode(ctx, conf, logger)
|
|
require.NoError(t, err)
|
|
|
|
n, ok := ns.(*nodeImpl)
|
|
require.True(t, ok)
|
|
|
|
t.Cleanup(func() {
|
|
cancel()
|
|
if n.IsRunning() {
|
|
ns.Wait()
|
|
}
|
|
})
|
|
t.Cleanup(leaktest.CheckTimeout(t, time.Second))
|
|
|
|
return n
|
|
}
|
|
|
|
func TestNodeDelayedStart(t *testing.T) {
|
|
cfg, err := config.ResetTestRoot("node_delayed_start_test")
|
|
require.NoError(t, err)
|
|
|
|
defer os.RemoveAll(cfg.RootDir)
|
|
now := tmtime.Now()
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
logger := log.NewNopLogger()
|
|
|
|
// create & start node
|
|
n := getTestNode(ctx, t, cfg, logger)
|
|
n.GenesisDoc().GenesisTime = now.Add(2 * time.Second)
|
|
|
|
require.NoError(t, n.Start(ctx))
|
|
|
|
startTime := tmtime.Now()
|
|
assert.Equal(t, true, startTime.After(n.GenesisDoc().GenesisTime))
|
|
}
|
|
|
|
func TestNodeSetAppVersion(t *testing.T) {
|
|
cfg, err := config.ResetTestRoot("node_app_version_test")
|
|
require.NoError(t, err)
|
|
defer os.RemoveAll(cfg.RootDir)
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
logger := log.NewNopLogger()
|
|
|
|
// create node
|
|
n := getTestNode(ctx, t, cfg, logger)
|
|
|
|
// default config uses the kvstore app
|
|
appVersion := kvstore.ProtocolVersion
|
|
|
|
// check version is set in state
|
|
state, err := n.stateStore.Load()
|
|
require.NoError(t, err)
|
|
assert.Equal(t, state.Version.Consensus.App, appVersion)
|
|
|
|
// check version is set in node info
|
|
assert.Equal(t, n.nodeInfo.ProtocolVersion.App, appVersion)
|
|
}
|
|
|
|
func TestNodeSetPrivValTCP(t *testing.T) {
|
|
addr := "tcp://" + testFreeAddr(t)
|
|
|
|
t.Cleanup(leaktest.Check(t))
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
logger := log.NewNopLogger()
|
|
|
|
cfg, err := config.ResetTestRoot("node_priv_val_tcp_test")
|
|
require.NoError(t, err)
|
|
defer os.RemoveAll(cfg.RootDir)
|
|
cfg.PrivValidator.ListenAddr = addr
|
|
|
|
dialer := privval.DialTCPFn(addr, 100*time.Millisecond, ed25519.GenPrivKey())
|
|
dialerEndpoint := privval.NewSignerDialerEndpoint(logger, dialer)
|
|
privval.SignerDialerEndpointTimeoutReadWrite(100 * time.Millisecond)(dialerEndpoint)
|
|
|
|
signerServer := privval.NewSignerServer(
|
|
dialerEndpoint,
|
|
cfg.ChainID(),
|
|
types.NewMockPV(),
|
|
)
|
|
|
|
go func() {
|
|
err := signerServer.Start(ctx)
|
|
require.NoError(t, err)
|
|
}()
|
|
defer signerServer.Stop() //nolint:errcheck // ignore for tests
|
|
|
|
genDoc, err := defaultGenesisDocProviderFunc(cfg)()
|
|
require.NoError(t, err)
|
|
|
|
pval, err := createPrivval(ctx, logger, cfg, genDoc, nil)
|
|
require.NoError(t, err)
|
|
|
|
assert.IsType(t, &privval.RetrySignerClient{}, pval)
|
|
}
|
|
|
|
// address without a protocol must result in error
|
|
func TestPrivValidatorListenAddrNoProtocol(t *testing.T) {
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
addrNoPrefix := testFreeAddr(t)
|
|
|
|
cfg, err := config.ResetTestRoot("node_priv_val_tcp_test")
|
|
require.NoError(t, err)
|
|
defer os.RemoveAll(cfg.RootDir)
|
|
cfg.PrivValidator.ListenAddr = addrNoPrefix
|
|
|
|
logger := log.NewNopLogger()
|
|
|
|
n, err := newDefaultNode(ctx, cfg, logger)
|
|
|
|
assert.Error(t, err)
|
|
|
|
if n != nil && n.IsRunning() {
|
|
cancel()
|
|
n.Wait()
|
|
}
|
|
}
|
|
|
|
func TestNodeSetPrivValIPC(t *testing.T) {
|
|
tmpfile := "/tmp/kms." + tmrand.Str(6) + ".sock"
|
|
defer os.Remove(tmpfile) // clean up
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
cfg, err := config.ResetTestRoot("node_priv_val_tcp_test")
|
|
require.NoError(t, err)
|
|
defer os.RemoveAll(cfg.RootDir)
|
|
cfg.PrivValidator.ListenAddr = "unix://" + tmpfile
|
|
|
|
logger := log.NewNopLogger()
|
|
|
|
dialer := privval.DialUnixFn(tmpfile)
|
|
dialerEndpoint := privval.NewSignerDialerEndpoint(logger, dialer)
|
|
|
|
privval.SignerDialerEndpointTimeoutReadWrite(100 * time.Millisecond)(dialerEndpoint)
|
|
|
|
pvsc := privval.NewSignerServer(
|
|
dialerEndpoint,
|
|
cfg.ChainID(),
|
|
types.NewMockPV(),
|
|
)
|
|
|
|
go func() {
|
|
err := pvsc.Start(ctx)
|
|
require.NoError(t, err)
|
|
}()
|
|
defer pvsc.Stop() //nolint:errcheck // ignore for tests
|
|
genDoc, err := defaultGenesisDocProviderFunc(cfg)()
|
|
require.NoError(t, err)
|
|
|
|
pval, err := createPrivval(ctx, logger, cfg, genDoc, nil)
|
|
require.NoError(t, err)
|
|
|
|
assert.IsType(t, &privval.RetrySignerClient{}, pval)
|
|
}
|
|
|
|
// testFreeAddr claims a free port so we don't block on listener being ready.
|
|
func testFreeAddr(t *testing.T) string {
|
|
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
|
require.NoError(t, err)
|
|
defer ln.Close()
|
|
|
|
return fmt.Sprintf("127.0.0.1:%d", ln.Addr().(*net.TCPAddr).Port)
|
|
}
|
|
|
|
// create a proposal block using real and full
|
|
// mempool and evidence pool and validate it.
|
|
func TestCreateProposalBlock(t *testing.T) {
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
cfg, err := config.ResetTestRoot("node_create_proposal")
|
|
require.NoError(t, err)
|
|
defer os.RemoveAll(cfg.RootDir)
|
|
|
|
logger := log.NewNopLogger()
|
|
|
|
cc := abciclient.NewLocalCreator(kvstore.NewApplication())
|
|
proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics())
|
|
err = proxyApp.Start(ctx)
|
|
require.NoError(t, err)
|
|
|
|
const height int64 = 1
|
|
state, stateDB, privVals := state(t, 1, height)
|
|
stateStore := sm.NewStore(stateDB)
|
|
maxBytes := 16384
|
|
const partSize uint32 = 256
|
|
maxEvidenceBytes := int64(maxBytes / 2)
|
|
state.ConsensusParams.Block.MaxBytes = int64(maxBytes)
|
|
state.ConsensusParams.Evidence.MaxBytes = maxEvidenceBytes
|
|
proposerAddr, _ := state.Validators.GetByIndex(0)
|
|
|
|
mp := mempool.NewTxMempool(
|
|
logger.With("module", "mempool"),
|
|
cfg.Mempool,
|
|
proxyApp.Mempool(),
|
|
state.LastBlockHeight,
|
|
)
|
|
|
|
// Make EvidencePool
|
|
evidenceDB := dbm.NewMemDB()
|
|
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
|
evidencePool, err := evidence.NewPool(logger, evidenceDB, stateStore, blockStore)
|
|
require.NoError(t, err)
|
|
|
|
// fill the evidence pool with more evidence
|
|
// than can fit in a block
|
|
var currentBytes int64
|
|
for currentBytes <= maxEvidenceBytes {
|
|
ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, height, time.Now(), privVals[0], "test-chain")
|
|
require.NoError(t, err)
|
|
currentBytes += int64(len(ev.Bytes()))
|
|
evidencePool.ReportConflictingVotes(ev.VoteA, ev.VoteB)
|
|
}
|
|
|
|
evList, size := evidencePool.PendingEvidence(state.ConsensusParams.Evidence.MaxBytes)
|
|
require.Less(t, size, state.ConsensusParams.Evidence.MaxBytes+1)
|
|
evData := &types.EvidenceData{Evidence: evList}
|
|
require.EqualValues(t, size, evData.ByteSize())
|
|
|
|
// fill the mempool with more txs
|
|
// than can fit in a block
|
|
txLength := 100
|
|
for i := 0; i <= maxBytes/txLength; i++ {
|
|
tx := tmrand.Bytes(txLength)
|
|
err := mp.CheckTx(ctx, tx, nil, mempool.TxInfo{})
|
|
assert.NoError(t, err)
|
|
}
|
|
|
|
blockExec := sm.NewBlockExecutor(
|
|
stateStore,
|
|
logger,
|
|
proxyApp.Consensus(),
|
|
mp,
|
|
evidencePool,
|
|
blockStore,
|
|
)
|
|
|
|
commit := types.NewCommit(height-1, 0, types.BlockID{}, nil)
|
|
block, _, err := blockExec.CreateProposalBlock(
|
|
height,
|
|
state, commit,
|
|
proposerAddr,
|
|
)
|
|
require.NoError(t, err)
|
|
|
|
// check that the part set does not exceed the maximum block size
|
|
partSet, err := block.MakePartSet(partSize)
|
|
require.NoError(t, err)
|
|
assert.Less(t, partSet.ByteSize(), int64(maxBytes))
|
|
|
|
partSetFromHeader := types.NewPartSetFromHeader(partSet.Header())
|
|
for partSetFromHeader.Count() < partSetFromHeader.Total() {
|
|
added, err := partSetFromHeader.AddPart(partSet.GetPart(int(partSetFromHeader.Count())))
|
|
require.NoError(t, err)
|
|
require.True(t, added)
|
|
}
|
|
assert.EqualValues(t, partSetFromHeader.ByteSize(), partSet.ByteSize())
|
|
|
|
err = blockExec.ValidateBlock(state, block)
|
|
assert.NoError(t, err)
|
|
}
|
|
|
|
func TestMaxTxsProposalBlockSize(t *testing.T) {
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
cfg, err := config.ResetTestRoot("node_create_proposal")
|
|
require.NoError(t, err)
|
|
|
|
defer os.RemoveAll(cfg.RootDir)
|
|
|
|
logger := log.NewNopLogger()
|
|
|
|
cc := abciclient.NewLocalCreator(kvstore.NewApplication())
|
|
proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics())
|
|
err = proxyApp.Start(ctx)
|
|
require.NoError(t, err)
|
|
|
|
const height int64 = 1
|
|
state, stateDB, _ := state(t, 1, height)
|
|
stateStore := sm.NewStore(stateDB)
|
|
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
|
const maxBytes int64 = 16384
|
|
const partSize uint32 = 256
|
|
state.ConsensusParams.Block.MaxBytes = maxBytes
|
|
proposerAddr, _ := state.Validators.GetByIndex(0)
|
|
|
|
// Make Mempool
|
|
|
|
mp := mempool.NewTxMempool(
|
|
logger.With("module", "mempool"),
|
|
cfg.Mempool,
|
|
proxyApp.Mempool(),
|
|
state.LastBlockHeight,
|
|
)
|
|
|
|
// fill the mempool with one txs just below the maximum size
|
|
txLength := int(types.MaxDataBytesNoEvidence(maxBytes, 1))
|
|
tx := tmrand.Bytes(txLength - 4) // to account for the varint
|
|
err = mp.CheckTx(ctx, tx, nil, mempool.TxInfo{})
|
|
assert.NoError(t, err)
|
|
|
|
blockExec := sm.NewBlockExecutor(
|
|
stateStore,
|
|
logger,
|
|
proxyApp.Consensus(),
|
|
mp,
|
|
sm.EmptyEvidencePool{},
|
|
blockStore,
|
|
)
|
|
|
|
commit := types.NewCommit(height-1, 0, types.BlockID{}, nil)
|
|
block, _, err := blockExec.CreateProposalBlock(
|
|
height,
|
|
state, commit,
|
|
proposerAddr,
|
|
)
|
|
require.NoError(t, err)
|
|
|
|
pb, err := block.ToProto()
|
|
require.NoError(t, err)
|
|
assert.Less(t, int64(pb.Size()), maxBytes)
|
|
|
|
// check that the part set does not exceed the maximum block size
|
|
partSet, err := block.MakePartSet(partSize)
|
|
require.NoError(t, err)
|
|
assert.EqualValues(t, partSet.ByteSize(), int64(pb.Size()))
|
|
}
|
|
|
|
func TestMaxProposalBlockSize(t *testing.T) {
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
cfg, err := config.ResetTestRoot("node_create_proposal")
|
|
require.NoError(t, err)
|
|
defer os.RemoveAll(cfg.RootDir)
|
|
|
|
logger := log.NewNopLogger()
|
|
|
|
cc := abciclient.NewLocalCreator(kvstore.NewApplication())
|
|
proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics())
|
|
err = proxyApp.Start(ctx)
|
|
require.NoError(t, err)
|
|
|
|
state, stateDB, _ := state(t, types.MaxVotesCount, int64(1))
|
|
stateStore := sm.NewStore(stateDB)
|
|
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
|
const maxBytes int64 = 1024 * 1024 * 2
|
|
state.ConsensusParams.Block.MaxBytes = maxBytes
|
|
proposerAddr, _ := state.Validators.GetByIndex(0)
|
|
|
|
// Make Mempool
|
|
mp := mempool.NewTxMempool(
|
|
logger.With("module", "mempool"),
|
|
cfg.Mempool,
|
|
proxyApp.Mempool(),
|
|
state.LastBlockHeight,
|
|
)
|
|
|
|
// fill the mempool with one txs just below the maximum size
|
|
txLength := int(types.MaxDataBytesNoEvidence(maxBytes, types.MaxVotesCount))
|
|
tx := tmrand.Bytes(txLength - 6) // to account for the varint
|
|
err = mp.CheckTx(ctx, tx, nil, mempool.TxInfo{})
|
|
assert.NoError(t, err)
|
|
// now produce more txs than what a normal block can hold with 10 smaller txs
|
|
// At the end of the test, only the single big tx should be added
|
|
for i := 0; i < 10; i++ {
|
|
tx := tmrand.Bytes(10)
|
|
err = mp.CheckTx(ctx, tx, nil, mempool.TxInfo{})
|
|
assert.NoError(t, err)
|
|
}
|
|
|
|
blockExec := sm.NewBlockExecutor(
|
|
stateStore,
|
|
logger,
|
|
proxyApp.Consensus(),
|
|
mp,
|
|
sm.EmptyEvidencePool{},
|
|
blockStore,
|
|
)
|
|
|
|
blockID := types.BlockID{
|
|
Hash: tmhash.Sum([]byte("blockID_hash")),
|
|
PartSetHeader: types.PartSetHeader{
|
|
Total: math.MaxInt32,
|
|
Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")),
|
|
},
|
|
}
|
|
|
|
timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC)
|
|
// change state in order to produce the largest accepted header
|
|
state.LastBlockID = blockID
|
|
state.LastBlockHeight = math.MaxInt64 - 1
|
|
state.LastBlockTime = timestamp
|
|
state.LastResultsHash = tmhash.Sum([]byte("last_results_hash"))
|
|
state.AppHash = tmhash.Sum([]byte("app_hash"))
|
|
state.Version.Consensus.Block = math.MaxInt64
|
|
state.Version.Consensus.App = math.MaxInt64
|
|
maxChainID := ""
|
|
for i := 0; i < types.MaxChainIDLen; i++ {
|
|
maxChainID += "𠜎"
|
|
}
|
|
state.ChainID = maxChainID
|
|
|
|
cs := types.CommitSig{
|
|
BlockIDFlag: types.BlockIDFlagNil,
|
|
ValidatorAddress: crypto.AddressHash([]byte("validator_address")),
|
|
Timestamp: timestamp,
|
|
Signature: crypto.CRandBytes(types.MaxSignatureSize),
|
|
}
|
|
|
|
commit := &types.Commit{
|
|
Height: math.MaxInt64,
|
|
Round: math.MaxInt32,
|
|
BlockID: blockID,
|
|
}
|
|
|
|
// add maximum amount of signatures to a single commit
|
|
for i := 0; i < types.MaxVotesCount; i++ {
|
|
commit.Signatures = append(commit.Signatures, cs)
|
|
}
|
|
|
|
block, partSet, err := blockExec.CreateProposalBlock(
|
|
math.MaxInt64,
|
|
state, commit,
|
|
proposerAddr,
|
|
)
|
|
require.NoError(t, err)
|
|
|
|
// this ensures that the header is at max size
|
|
block.Header.Time = timestamp
|
|
|
|
pb, err := block.ToProto()
|
|
require.NoError(t, err)
|
|
|
|
// require that the header and commit be the max possible size
|
|
require.Equal(t, int64(pb.Header.Size()), types.MaxHeaderBytes)
|
|
require.Equal(t, int64(pb.LastCommit.Size()), types.MaxCommitBytes(types.MaxVotesCount))
|
|
// make sure that the block is less than the max possible size
|
|
assert.Equal(t, int64(pb.Size()), maxBytes)
|
|
// because of the proto overhead we expect the part set bytes to be equal or
|
|
// less than the pb block size
|
|
assert.LessOrEqual(t, partSet.ByteSize(), int64(pb.Size()))
|
|
|
|
}
|
|
|
|
func TestNodeNewSeedNode(t *testing.T) {
|
|
cfg, err := config.ResetTestRoot("node_new_node_custom_reactors_test")
|
|
require.NoError(t, err)
|
|
cfg.Mode = config.ModeSeed
|
|
defer os.RemoveAll(cfg.RootDir)
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
nodeKey, err := types.LoadOrGenNodeKey(cfg.NodeKeyFile())
|
|
require.NoError(t, err)
|
|
|
|
logger := log.NewNopLogger()
|
|
|
|
ns, err := makeSeedNode(ctx,
|
|
cfg,
|
|
config.DefaultDBProvider,
|
|
nodeKey,
|
|
defaultGenesisDocProviderFunc(cfg),
|
|
logger,
|
|
)
|
|
t.Cleanup(ns.Wait)
|
|
|
|
require.NoError(t, err)
|
|
n, ok := ns.(*seedNodeImpl)
|
|
require.True(t, ok)
|
|
|
|
err = n.Start(ctx)
|
|
require.NoError(t, err)
|
|
assert.True(t, n.pexReactor.IsRunning())
|
|
|
|
cancel()
|
|
n.Wait()
|
|
|
|
assert.False(t, n.pexReactor.IsRunning())
|
|
}
|
|
|
|
func TestNodeSetEventSink(t *testing.T) {
|
|
cfg, err := config.ResetTestRoot("node_app_version_test")
|
|
require.NoError(t, err)
|
|
|
|
defer os.RemoveAll(cfg.RootDir)
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
logger := log.NewNopLogger()
|
|
|
|
setupTest := func(t *testing.T, conf *config.Config) []indexer.EventSink {
|
|
eventBus := eventbus.NewDefault(logger.With("module", "events"))
|
|
require.NoError(t, eventBus.Start(ctx))
|
|
|
|
t.Cleanup(eventBus.Wait)
|
|
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile())
|
|
require.NoError(t, err)
|
|
|
|
indexService, eventSinks, err := createAndStartIndexerService(ctx, cfg,
|
|
config.DefaultDBProvider, eventBus, logger, genDoc.ChainID,
|
|
indexer.NopMetrics())
|
|
require.NoError(t, err)
|
|
t.Cleanup(indexService.Wait)
|
|
return eventSinks
|
|
}
|
|
cleanup := func(ns service.Service) func() {
|
|
return func() {
|
|
n, ok := ns.(*nodeImpl)
|
|
if !ok {
|
|
return
|
|
}
|
|
if n == nil {
|
|
return
|
|
}
|
|
if !n.IsRunning() {
|
|
return
|
|
}
|
|
cancel()
|
|
n.Wait()
|
|
}
|
|
}
|
|
|
|
eventSinks := setupTest(t, cfg)
|
|
assert.Equal(t, 1, len(eventSinks))
|
|
assert.Equal(t, indexer.KV, eventSinks[0].Type())
|
|
|
|
cfg.TxIndex.Indexer = []string{"null"}
|
|
eventSinks = setupTest(t, cfg)
|
|
|
|
assert.Equal(t, 1, len(eventSinks))
|
|
assert.Equal(t, indexer.NULL, eventSinks[0].Type())
|
|
|
|
cfg.TxIndex.Indexer = []string{"null", "kv"}
|
|
eventSinks = setupTest(t, cfg)
|
|
|
|
assert.Equal(t, 1, len(eventSinks))
|
|
assert.Equal(t, indexer.NULL, eventSinks[0].Type())
|
|
|
|
cfg.TxIndex.Indexer = []string{"kvv"}
|
|
ns, err := newDefaultNode(ctx, cfg, logger)
|
|
assert.Nil(t, ns)
|
|
assert.Contains(t, err.Error(), "unsupported event sink type")
|
|
t.Cleanup(cleanup(ns))
|
|
|
|
cfg.TxIndex.Indexer = []string{}
|
|
eventSinks = setupTest(t, cfg)
|
|
|
|
assert.Equal(t, 1, len(eventSinks))
|
|
assert.Equal(t, indexer.NULL, eventSinks[0].Type())
|
|
|
|
cfg.TxIndex.Indexer = []string{"psql"}
|
|
ns, err = newDefaultNode(ctx, cfg, logger)
|
|
assert.Nil(t, ns)
|
|
assert.Contains(t, err.Error(), "the psql connection settings cannot be empty")
|
|
t.Cleanup(cleanup(ns))
|
|
|
|
// N.B. We can't create a PSQL event sink without starting a postgres
|
|
// instance for it to talk to. The indexer service tests exercise that case.
|
|
|
|
var e = errors.New("found duplicated sinks, please check the tx-index section in the config.toml")
|
|
cfg.TxIndex.Indexer = []string{"null", "kv", "Kv"}
|
|
ns, err = newDefaultNode(ctx, cfg, logger)
|
|
require.Error(t, err)
|
|
assert.Contains(t, err.Error(), e.Error())
|
|
t.Cleanup(cleanup(ns))
|
|
|
|
cfg.TxIndex.Indexer = []string{"Null", "kV", "kv", "nUlL"}
|
|
ns, err = newDefaultNode(ctx, cfg, logger)
|
|
require.Error(t, err)
|
|
assert.Contains(t, err.Error(), e.Error())
|
|
t.Cleanup(cleanup(ns))
|
|
}
|
|
|
|
func state(t *testing.T, nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) {
|
|
t.Helper()
|
|
privVals := make([]types.PrivValidator, nVals)
|
|
vals := make([]types.GenesisValidator, nVals)
|
|
for i := 0; i < nVals; i++ {
|
|
privVal := types.NewMockPV()
|
|
privVals[i] = privVal
|
|
vals[i] = types.GenesisValidator{
|
|
Address: privVal.PrivKey.PubKey().Address(),
|
|
PubKey: privVal.PrivKey.PubKey(),
|
|
Power: 1000,
|
|
Name: fmt.Sprintf("test%d", i),
|
|
}
|
|
}
|
|
s, _ := sm.MakeGenesisState(&types.GenesisDoc{
|
|
ChainID: "test-chain",
|
|
Validators: vals,
|
|
AppHash: nil,
|
|
})
|
|
|
|
// save validators to db for 2 heights
|
|
stateDB := dbm.NewMemDB()
|
|
t.Cleanup(func() { require.NoError(t, stateDB.Close()) })
|
|
|
|
stateStore := sm.NewStore(stateDB)
|
|
require.NoError(t, stateStore.Save(s))
|
|
|
|
for i := 1; i < int(height); i++ {
|
|
s.LastBlockHeight++
|
|
s.LastValidators = s.Validators.Copy()
|
|
require.NoError(t, stateStore.Save(s))
|
|
}
|
|
return s, stateDB, privVals
|
|
}
|
|
|
|
func TestLoadStateFromGenesis(t *testing.T) {
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
_ = loadStatefromGenesis(ctx, t)
|
|
}
|
|
|
|
func loadStatefromGenesis(ctx context.Context, t *testing.T) sm.State {
|
|
t.Helper()
|
|
|
|
stateDB := dbm.NewMemDB()
|
|
stateStore := sm.NewStore(stateDB)
|
|
cfg, err := config.ResetTestRoot("load_state_from_genesis")
|
|
require.NoError(t, err)
|
|
|
|
loadedState, err := stateStore.Load()
|
|
require.NoError(t, err)
|
|
require.True(t, loadedState.IsEmpty())
|
|
|
|
valSet, _ := factory.ValidatorSet(ctx, t, 0, 10)
|
|
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
|
|
|
|
state, err := loadStateFromDBOrGenesisDocProvider(
|
|
stateStore,
|
|
genDoc,
|
|
)
|
|
require.NoError(t, err)
|
|
require.NotNil(t, state)
|
|
|
|
return state
|
|
}
|