Compare commits

...

14 Commits

Author SHA1 Message Date
Alessio Treglia
bf64ac63a5 use t.Cleanup() in node/ 2020-11-30 17:07:23 +00:00
Alessio Treglia
ced64e7648 replace ResetTestRoot with SetupTestConfiguration in mempool and state 2020-11-30 16:54:11 +00:00
Alessio Treglia
8b094eb6ff sort imports 2020-11-30 16:39:19 +00:00
Alessio Treglia
a8a68b8ce0 Merge branch 'master' into alessio/new-setup-test-configuration 2020-11-30 16:24:47 +00:00
Alessio Treglia
02a2ce6b10 WIP 2020-11-29 12:07:15 +00:00
Alessio Treglia
1b665fe822 replace defer with t.Cleanup calls 2020-11-29 10:38:55 +00:00
Alessio Treglia
8de77321c4 replace defer with t.Cleanup calls 2020-11-29 10:31:05 +00:00
Alessio Treglia
cec4876dfc use t.Cleanup() for leakage test as well 2020-11-29 09:25:23 +00:00
Alessio Treglia
adc05084fd use t.Cleanup() instead of defer to close/stop once test is done 2020-11-29 09:09:32 +00:00
Alessio Treglia
69ef808786 Use latest go1.15 2020-11-29 08:37:31 +00:00
Alessio Treglia
7af19bc1aa compile with go1.15 2020-11-29 08:04:20 +00:00
Alessio Treglia
7be41a2959 Merge branch 'master' into alessio/use-go-testing-t-utils 2020-11-28 21:06:58 +00:00
Alessio Treglia
3b4124bb40 use go 1.15 2020-11-09 18:41:34 +00:00
Alessio Treglia
f2876f4ece use testing.T.{Cleanup,TempDir}() in tests 2020-11-09 18:16:45 +00:00
14 changed files with 213 additions and 191 deletions

View File

@@ -2,7 +2,6 @@ package v0
import (
"fmt"
"os"
"sort"
"testing"
"time"
@@ -132,8 +131,7 @@ func newBlockchainReactor(
}
func TestNoBlockResponse(t *testing.T) {
config = cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
config = cfg.SetupTestConfiguration(t)
genDoc, privVals := randGenesisDoc(1, false, 30)
maxBlockHeight := int64(65)
@@ -149,14 +147,14 @@ func TestNoBlockResponse(t *testing.T) {
}, p2p.Connect2Switches)
defer func() {
t.Cleanup(func() {
for _, r := range reactorPairs {
err := r.reactor.Stop()
require.NoError(t, err)
err = r.app.Stop()
require.NoError(t, err)
}
}()
})
tests := []struct {
height int64
@@ -194,8 +192,7 @@ func TestNoBlockResponse(t *testing.T) {
// Alternatively we could actually dial a TCP conn but
// that seems extreme.
func TestBadBlockStopsPeer(t *testing.T) {
config = cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
config = cfg.SetupTestConfiguration(t)
genDoc, privVals := randGenesisDoc(1, false, 30)
maxBlockHeight := int64(148)
@@ -204,12 +201,12 @@ func TestBadBlockStopsPeer(t *testing.T) {
otherGenDoc, otherPrivVals := randGenesisDoc(1, false, 30)
otherChain := newBlockchainReactor(log.TestingLogger(), otherGenDoc, otherPrivVals, maxBlockHeight)
defer func() {
t.Cleanup(func() {
err := otherChain.reactor.Stop()
require.Error(t, err)
err = otherChain.app.Stop()
require.NoError(t, err)
}()
})
reactorPairs := make([]BlockchainReactorPair, 4)
@@ -224,7 +221,7 @@ func TestBadBlockStopsPeer(t *testing.T) {
}, p2p.Connect2Switches)
defer func() {
t.Cleanup(func() {
for _, r := range reactorPairs {
err := r.reactor.Stop()
require.NoError(t, err)
@@ -232,7 +229,7 @@ func TestBadBlockStopsPeer(t *testing.T) {
err = r.app.Stop()
require.NoError(t, err)
}
}()
})
for {
time.Sleep(1 * time.Second)

View File

@@ -2,7 +2,6 @@ package v1
import (
"fmt"
"os"
"sort"
"sync"
"testing"
@@ -180,9 +179,7 @@ func (conR *consensusReactorTest) SwitchToConsensus(state sm.State, blocksSynced
}
func TestFastSyncNoBlockResponse(t *testing.T) {
config = cfg.ResetTestRoot("blockchain_new_reactor_test")
defer os.RemoveAll(config.RootDir)
config = cfg.SetupTestConfiguration(t)
genDoc, privVals := randGenesisDoc(1, false, 30)
maxBlockHeight := int64(65)
@@ -203,12 +200,12 @@ func TestFastSyncNoBlockResponse(t *testing.T) {
}, p2p.Connect2Switches)
defer func() {
t.Cleanup(func() {
for _, r := range reactorPairs {
_ = r.bcR.Stop()
_ = r.conR.Stop()
}
}()
})
tests := []struct {
height int64
@@ -251,15 +248,14 @@ func TestFastSyncBadBlockStopsPeer(t *testing.T) {
numNodes := 4
maxBlockHeight := int64(148)
config = cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
config = cfg.SetupTestConfiguration(t)
genDoc, privVals := randGenesisDoc(1, false, 30)
otherChain := newBlockchainReactorPair(t, log.TestingLogger(), genDoc, privVals, maxBlockHeight)
defer func() {
t.Cleanup(func() {
_ = otherChain.bcR.Stop()
_ = otherChain.conR.Stop()
}()
})
reactorPairs := make([]BlockchainReactorPair, numNodes)
logger := make([]log.Logger, numNodes)
@@ -284,12 +280,12 @@ func TestFastSyncBadBlockStopsPeer(t *testing.T) {
}, p2p.Connect2Switches)
defer func() {
t.Cleanup(func() {
for _, r := range reactorPairs {
_ = r.bcR.Stop()
_ = r.conR.Stop()
}
}()
})
outerFor:
for {

View File

@@ -3,7 +3,6 @@ package v2
import (
"fmt"
"net"
"os"
"sort"
"sync"
"testing"
@@ -351,8 +350,7 @@ func TestReactorHelperMode(t *testing.T) {
channelID = byte(0x40)
)
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
defer os.RemoveAll(config.RootDir)
config := cfg.SetupTestConfiguration(t)
genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30)
params := testReactorParams{
@@ -427,8 +425,7 @@ func TestReactorHelperMode(t *testing.T) {
}
func TestReactorSetSwitchNil(t *testing.T) {
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
defer os.RemoveAll(config.RootDir)
config := cfg.SetupTestConfiguration(t)
genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30)
reactor := newTestReactor(testReactorParams{

View File

@@ -458,6 +458,58 @@ max_open_connections = {{ .Instrumentation.MaxOpenConnections }}
namespace = "{{ .Instrumentation.Namespace }}"
`
type T interface {
TempDir() string
Fatal(...interface{})
Fatalf(string, ...interface{})
}
func SetupTestConfiguration(t T) *Config {
return SetupTestConfigurationWithChainID(t, "")
}
func SetupTestConfigurationWithChainID(t T, chainID string) *Config {
// create a unique, concurrency-safe test directory under os.TempDir()
rootDir := t.TempDir()
// ensure config and data subdirs are created
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
t.Fatal(err)
}
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
t.Fatal(err)
}
baseConfig := DefaultBaseConfig()
configFilePath := filepath.Join(rootDir, defaultConfigFilePath)
genesisFilePath := filepath.Join(rootDir, baseConfig.Genesis)
privKeyFilePath := filepath.Join(rootDir, baseConfig.PrivValidatorKey)
privStateFilePath := filepath.Join(rootDir, baseConfig.PrivValidatorState)
// Write default config file if missing.
if !tmos.FileExists(configFilePath) {
writeDefaultConfigFile(configFilePath)
}
if !tmos.FileExists(genesisFilePath) {
if chainID == "" {
chainID = "tendermint_test"
}
testGenesis := fmt.Sprintf(testGenesisFmt, chainID)
mustWriteFileHelper(t, genesisFilePath, []byte(testGenesis), 0644)
}
// we always overwrite the priv val
mustWriteFileHelper(t, privKeyFilePath, []byte(testPrivValidatorKey), 0644)
mustWriteFileHelper(t, privStateFilePath, []byte(testPrivValidatorState), 0644)
config := TestConfig().SetRoot(rootDir)
return config
}
func mustWriteFileHelper(t T, filePath string, contents []byte, mode os.FileMode) {
if err := ioutil.WriteFile(filePath, contents, mode); err != nil {
t.Fatalf("failed to write file: %v", err)
}
}
/****** these are for test settings ***********/
func ResetTestRoot(testName string) *Config {

View File

@@ -41,6 +41,25 @@ func TestEnsureRoot(t *testing.T) {
ensureFiles(t, tmpDir, "data")
}
func TestSetupConfiguration(t *testing.T) {
require := require.New(t)
// create root dir
cfg := SetupTestConfiguration(t)
rootDir := cfg.RootDir
// make sure config is set properly
data, err := ioutil.ReadFile(filepath.Join(rootDir, defaultConfigFilePath))
require.Nil(err)
if !checkConfig(string(data)) {
t.Fatalf("config file missing some information")
}
// TODO: make sure the cfg returned and testconfig are the same!
baseConfig := DefaultBaseConfig()
ensureFiles(t, rootDir, defaultDataDir, baseConfig.Genesis, baseConfig.PrivValidatorKey, baseConfig.PrivValidatorState)
}
func TestEnsureTestRoot(t *testing.T) {
require := require.New(t)

View File

@@ -11,8 +11,7 @@ import (
func BenchmarkReap(b *testing.B) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
mempool := newMempoolWithApp(b, cc)
size := 10000
for i := 0; i < size; i++ {
@@ -31,8 +30,7 @@ func BenchmarkReap(b *testing.B) {
func BenchmarkCheckTx(b *testing.B) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
mempool := newMempoolWithApp(b, cc)
for i := 0; i < b.N; i++ {
tx := make([]byte, 8)

View File

@@ -39,8 +39,7 @@ func TestCacheRemove(t *testing.T) {
func TestCacheAfterUpdate(t *testing.T) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
mempool := newMempoolWithApp(t, cc)
// reAddIndices & txsInCache can have elements > numTxsToCreate
// also assumes max index is 255 for convenience

View File

@@ -8,7 +8,6 @@ import (
"fmt"
"io/ioutil"
mrand "math/rand"
"os"
"path/filepath"
"testing"
"time"
@@ -30,24 +29,19 @@ import (
"github.com/tendermint/tendermint/types"
)
// A cleanupFunc cleans up any config / test files created for a particular
// test.
type cleanupFunc func()
func newMempoolWithApp(cc proxy.ClientCreator) (*CListMempool, cleanupFunc) {
return newMempoolWithAppAndConfig(cc, cfg.ResetTestRoot("mempool_test"))
func newMempoolWithApp(t testing.TB, cc proxy.ClientCreator) *CListMempool {
return newMempoolWithAppAndConfig(t, cc, cfg.SetupTestConfiguration(t))
}
func newMempoolWithAppAndConfig(cc proxy.ClientCreator, config *cfg.Config) (*CListMempool, cleanupFunc) {
func newMempoolWithAppAndConfig(t testing.TB, cc proxy.ClientCreator, config *cfg.Config) *CListMempool {
appConnMem, _ := cc.NewABCIClient()
appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
err := appConnMem.Start()
if err != nil {
panic(err)
if err := appConnMem.Start(); err != nil {
t.Fatal(err)
}
mempool := NewCListMempool(config.Mempool, appConnMem, 0)
mempool.SetLogger(log.TestingLogger())
return mempool, func() { os.RemoveAll(config.RootDir) }
return mempool
}
func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) {
@@ -94,8 +88,7 @@ func checkTxs(t *testing.T, mempool Mempool, count int, peerID uint16) types.Txs
func TestReapMaxBytesMaxGas(t *testing.T) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
mempool := newMempoolWithApp(t, cc)
// Ensure gas calculation behaves as expected
checkTxs(t, mempool, 1, UnknownPeerID)
@@ -143,8 +136,7 @@ func TestReapMaxBytesMaxGas(t *testing.T) {
func TestMempoolFilters(t *testing.T) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
mempool := newMempoolWithApp(t, cc)
emptyTxArr := []types.Tx{[]byte{}}
nopPreFilter := func(tx types.Tx) error { return nil }
@@ -182,8 +174,7 @@ func TestMempoolFilters(t *testing.T) {
func TestMempoolUpdate(t *testing.T) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
mempool := newMempoolWithApp(t, cc)
// 1. Adds valid txs to the cache
{
@@ -220,8 +211,7 @@ func TestMempoolUpdate(t *testing.T) {
func TestTxsAvailable(t *testing.T) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
mempool := newMempoolWithApp(t, cc)
mempool.EnableTxsAvailable()
timeoutMS := 500
@@ -265,8 +255,7 @@ func TestSerialReap(t *testing.T) {
app := counter.NewApplication(true)
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
mempool := newMempoolWithApp(t, cc)
appConnCon, _ := cc.NewABCIClient()
appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
@@ -387,8 +376,7 @@ func TestMempoolCloseWAL(t *testing.T) {
wcfg.Mempool.RootDir = rootDir
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithAppAndConfig(cc, wcfg)
defer cleanup()
mempool := newMempoolWithAppAndConfig(t, cc, wcfg)
mempool.height = 10
err = mempool.InitWAL()
require.NoError(t, err)
@@ -424,11 +412,8 @@ func TestMempoolCloseWAL(t *testing.T) {
func TestMempool_CheckTxChecksTxSize(t *testing.T) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempl, cleanup := newMempoolWithApp(cc)
defer cleanup()
mempl := newMempoolWithApp(t, cc)
maxTxSize := mempl.config.MaxTxBytes
testCases := []struct {
len int
err bool
@@ -466,10 +451,9 @@ func TestMempool_CheckTxChecksTxSize(t *testing.T) {
func TestMempoolTxsBytes(t *testing.T) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
config := cfg.ResetTestRoot("mempool_test")
config := cfg.SetupTestConfiguration(t)
config.Mempool.MaxTxsBytes = 10
mempool, cleanup := newMempoolWithAppAndConfig(cc, config)
defer cleanup()
mempool := newMempoolWithAppAndConfig(t, cc, config)
// 1. zero by default
assert.EqualValues(t, 0, mempool.TxsBytes())
@@ -503,10 +487,9 @@ func TestMempoolTxsBytes(t *testing.T) {
// 6. zero after tx is rechecked and removed due to not being valid anymore
app2 := counter.NewApplication(true)
cc = proxy.NewLocalClientCreator(app2)
mempool, cleanup = newMempoolWithApp(cc)
defer cleanup()
mempool = newMempoolWithApp(t, cc)
txBytes := make([]byte, 8)
binary.BigEndian.PutUint64(txBytes, uint64(0))
err = mempool.CheckTx(txBytes, nil, TxInfo{})
@@ -559,9 +542,8 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) {
t.Error(err)
}
})
config := cfg.ResetTestRoot("mempool_test")
mempool, cleanup := newMempoolWithAppAndConfig(cc, config)
defer cleanup()
config := cfg.SetupTestConfiguration(t)
mempool := newMempoolWithAppAndConfig(t, cc, config)
// generate small number of txs
nTxs := 10

View File

@@ -47,7 +47,7 @@ func TestReactorBroadcastTxsMessage(t *testing.T) {
// replace Connect2Switches (full mesh) with a func, which connects first
// reactor to others and nothing else, this test should also pass with >2 reactors.
const N = 2
reactors := makeAndConnectReactors(config, N)
reactors := makeAndConnectReactors(t, config, N)
defer func() {
for _, r := range reactors {
if err := r.Stop(); err != nil {
@@ -69,7 +69,7 @@ func TestReactorBroadcastTxsMessage(t *testing.T) {
func TestReactorConcurrency(t *testing.T) {
config := cfg.TestConfig()
const N = 2
reactors := makeAndConnectReactors(config, N)
reactors := makeAndConnectReactors(t, config, N)
defer func() {
for _, r := range reactors {
if err := r.Stop(); err != nil {
@@ -130,7 +130,7 @@ func TestReactorConcurrency(t *testing.T) {
func TestReactorNoBroadcastToSender(t *testing.T) {
config := cfg.TestConfig()
const N = 2
reactors := makeAndConnectReactors(config, N)
reactors := makeAndConnectReactors(t, config, N)
defer func() {
for _, r := range reactors {
if err := r.Stop(); err != nil {
@@ -154,7 +154,7 @@ func TestReactor_MaxBatchBytes(t *testing.T) {
config.Mempool.MaxBatchBytes = 1024
const N = 2
reactors := makeAndConnectReactors(config, N)
reactors := makeAndConnectReactors(t, config, N)
defer func() {
for _, r := range reactors {
if err := r.Stop(); err != nil {
@@ -196,7 +196,7 @@ func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) {
config := cfg.TestConfig()
const N = 2
reactors := makeAndConnectReactors(config, N)
reactors := makeAndConnectReactors(t, config, N)
defer func() {
for _, r := range reactors {
if err := r.Stop(); err != nil {
@@ -221,7 +221,7 @@ func TestBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) {
config := cfg.TestConfig()
const N = 2
reactors := makeAndConnectReactors(config, N)
reactors := makeAndConnectReactors(t, config, N)
// stop reactors
for _, r := range reactors {
@@ -271,7 +271,7 @@ func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) {
func TestDontExhaustMaxActiveIDs(t *testing.T) {
config := cfg.TestConfig()
const N = 1
reactors := makeAndConnectReactors(config, N)
reactors := makeAndConnectReactors(t, config, N)
defer func() {
for _, r := range reactors {
if err := r.Stop(); err != nil {
@@ -302,15 +302,13 @@ func mempoolLogger() log.Logger {
}
// connect N mempool reactors through N switches
func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor {
func makeAndConnectReactors(t *testing.T, config *cfg.Config, n int) []*Reactor {
reactors := make([]*Reactor, n)
logger := mempoolLogger()
for i := 0; i < n; i++ {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
mempool := newMempoolWithApp(t, cc)
reactors[i] = NewReactor(config.Mempool, mempool) // so we dont start the consensus states
reactors[i].SetLogger(logger.With("validator", i))
}

View File

@@ -32,8 +32,7 @@ import (
)
func TestNodeStartStop(t *testing.T) {
config := cfg.ResetTestRoot("node_node_test")
defer os.RemoveAll(config.RootDir)
config := cfg.SetupTestConfiguration(t)
// create & start node
n, err := DefaultNewNode(config, log.TestingLogger())
@@ -94,8 +93,7 @@ func TestSplitAndTrimEmpty(t *testing.T) {
}
func TestNodeDelayedStart(t *testing.T) {
config := cfg.ResetTestRoot("node_delayed_start_test")
defer os.RemoveAll(config.RootDir)
config := cfg.SetupTestConfiguration(t)
now := tmtime.Now()
// create & start node
@@ -112,8 +110,7 @@ func TestNodeDelayedStart(t *testing.T) {
}
func TestNodeSetAppVersion(t *testing.T) {
config := cfg.ResetTestRoot("node_app_version_test")
defer os.RemoveAll(config.RootDir)
config := cfg.SetupTestConfiguration(t)
// create & start node
n, err := DefaultNewNode(config, log.TestingLogger())
@@ -134,8 +131,7 @@ func TestNodeSetAppVersion(t *testing.T) {
func TestNodeSetPrivValTCP(t *testing.T) {
addr := "tcp://" + testFreeAddr(t)
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
defer os.RemoveAll(config.RootDir)
config := cfg.SetupTestConfiguration(t)
config.BaseConfig.PrivValidatorListenAddr = addr
dialer := privval.DialTCPFn(addr, 100*time.Millisecond, ed25519.GenPrivKey())
@@ -157,7 +153,7 @@ func TestNodeSetPrivValTCP(t *testing.T) {
panic(err)
}
}()
defer signerServer.Stop() //nolint:errcheck // ignore for tests
t.Cleanup(func() { _ = signerServer.Stop() })
n, err := DefaultNewNode(config, log.TestingLogger())
require.NoError(t, err)
@@ -168,8 +164,7 @@ func TestNodeSetPrivValTCP(t *testing.T) {
func TestPrivValidatorListenAddrNoProtocol(t *testing.T) {
addrNoPrefix := testFreeAddr(t)
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
defer os.RemoveAll(config.RootDir)
config := cfg.SetupTestConfiguration(t)
config.BaseConfig.PrivValidatorListenAddr = addrNoPrefix
_, err := DefaultNewNode(config, log.TestingLogger())
@@ -178,10 +173,9 @@ func TestPrivValidatorListenAddrNoProtocol(t *testing.T) {
func TestNodeSetPrivValIPC(t *testing.T) {
tmpfile := "/tmp/kms." + tmrand.Str(6) + ".sock"
defer os.Remove(tmpfile) // clean up
t.Cleanup(func() { _ = os.Remove(tmpfile) })
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
defer os.RemoveAll(config.RootDir)
config := cfg.SetupTestConfiguration(t)
config.BaseConfig.PrivValidatorListenAddr = "unix://" + tmpfile
dialer := privval.DialUnixFn(tmpfile)
@@ -201,7 +195,7 @@ func TestNodeSetPrivValIPC(t *testing.T) {
err := pvsc.Start()
require.NoError(t, err)
}()
defer pvsc.Stop() //nolint:errcheck // ignore for tests
t.Cleanup(func() { _ = pvsc.Stop() })
n, err := DefaultNewNode(config, log.TestingLogger())
require.NoError(t, err)
@@ -210,9 +204,10 @@ func TestNodeSetPrivValIPC(t *testing.T) {
// testFreeAddr claims a free port so we don't block on listener being ready.
func testFreeAddr(t *testing.T) string {
t.Helper()
ln, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
defer ln.Close()
defer func() { _ = ln.Close() }()
return fmt.Sprintf("127.0.0.1:%d", ln.Addr().(*net.TCPAddr).Port)
}
@@ -220,13 +215,12 @@ func testFreeAddr(t *testing.T) string {
// create a proposal block using real and full
// mempool and evidence pool and validate it.
func TestCreateProposalBlock(t *testing.T) {
config := cfg.ResetTestRoot("node_create_proposal")
defer os.RemoveAll(config.RootDir)
config := cfg.SetupTestConfiguration(t)
cc := proxy.NewLocalClientCreator(kvstore.NewApplication())
proxyApp := proxy.NewAppConns(cc)
err := proxyApp.Start()
require.Nil(t, err)
defer proxyApp.Stop() //nolint:errcheck // ignore for tests
t.Cleanup(func() { _ = proxyApp.Stop() })
logger := log.TestingLogger()
@@ -315,13 +309,12 @@ func TestCreateProposalBlock(t *testing.T) {
}
func TestMaxProposalBlockSize(t *testing.T) {
config := cfg.ResetTestRoot("node_create_proposal")
defer os.RemoveAll(config.RootDir)
config := cfg.SetupTestConfiguration(t)
cc := proxy.NewLocalClientCreator(kvstore.NewApplication())
proxyApp := proxy.NewAppConns(cc)
err := proxyApp.Start()
require.Nil(t, err)
defer proxyApp.Stop() //nolint:errcheck // ignore for tests
t.Cleanup(func() { _ = proxyApp.Stop() })
logger := log.TestingLogger()
@@ -376,12 +369,9 @@ func TestMaxProposalBlockSize(t *testing.T) {
}
func TestNodeNewNodeCustomReactors(t *testing.T) {
config := cfg.ResetTestRoot("node_new_node_custom_reactors_test")
defer os.RemoveAll(config.RootDir)
config := cfg.SetupTestConfiguration(t)
cr := p2pmock.NewReactor()
customBlockchainReactor := p2pmock.NewReactor()
nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
require.NoError(t, err)
pval, err := privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
@@ -401,7 +391,7 @@ func TestNodeNewNodeCustomReactors(t *testing.T) {
err = n.Start()
require.NoError(t, err)
defer n.Stop() //nolint:errcheck // ignore for tests
t.Cleanup(func() { _ = n.Stop() })
assert.True(t, cr.IsRunning())
assert.Equal(t, cr, n.Switch().Reactor("FOO"))

View File

@@ -5,7 +5,6 @@ import (
"fmt"
"math"
"math/big"
"os"
"testing"
"github.com/stretchr/testify/assert"
@@ -25,8 +24,8 @@ import (
)
// setupTestCase does setup common to all test cases.
func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) {
config := cfg.ResetTestRoot("state_")
func setupTestCase(t *testing.T) (dbm.DB, sm.State) {
config := cfg.SetupTestConfiguration(t)
dbType := dbm.BackendType(config.DBBackend)
stateDB, err := dbm.NewDB("state", dbType, config.DBDir())
stateStore := sm.NewStore(stateDB)
@@ -36,15 +35,12 @@ func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) {
err = stateStore.Save(state)
require.NoError(t, err)
tearDown := func(t *testing.T) { os.RemoveAll(config.RootDir) }
return tearDown, stateDB, state
return stateDB, state
}
// TestStateCopy tests the correct copying behaviour of State.
func TestStateCopy(t *testing.T) {
tearDown, _, state := setupTestCase(t)
defer tearDown(t)
_, state := setupTestCase(t)
assert := assert.New(t)
stateCopy := state.Copy()
@@ -74,8 +70,7 @@ func TestMakeGenesisStateNilValidators(t *testing.T) {
// TestStateSaveLoad tests saving and loading State from a db.
func TestStateSaveLoad(t *testing.T) {
tearDown, stateDB, state := setupTestCase(t)
defer tearDown(t)
stateDB, state := setupTestCase(t)
stateStore := sm.NewStore(stateDB)
assert := assert.New(t)
@@ -93,8 +88,7 @@ func TestStateSaveLoad(t *testing.T) {
// TestABCIResponsesSaveLoad tests saving and loading ABCIResponses.
func TestABCIResponsesSaveLoad1(t *testing.T) {
tearDown, stateDB, state := setupTestCase(t)
defer tearDown(t)
stateDB, state := setupTestCase(t)
stateStore := sm.NewStore(stateDB)
assert := assert.New(t)
@@ -124,8 +118,7 @@ func TestABCIResponsesSaveLoad1(t *testing.T) {
// TestResultsSaveLoad tests saving and loading ABCI results.
func TestABCIResponsesSaveLoad2(t *testing.T) {
tearDown, stateDB, _ := setupTestCase(t)
defer tearDown(t)
stateDB, _ := setupTestCase(t)
assert := assert.New(t)
stateStore := sm.NewStore(stateDB)
@@ -212,8 +205,7 @@ func TestABCIResponsesSaveLoad2(t *testing.T) {
// TestValidatorSimpleSaveLoad tests saving and loading validators.
func TestValidatorSimpleSaveLoad(t *testing.T) {
tearDown, stateDB, state := setupTestCase(t)
defer tearDown(t)
stateDB, state := setupTestCase(t)
assert := assert.New(t)
statestore := sm.NewStore(stateDB)
@@ -247,8 +239,7 @@ func TestValidatorSimpleSaveLoad(t *testing.T) {
// TestValidatorChangesSaveLoad tests saving and loading a validator set with changes.
func TestOneValidatorChangesSaveLoad(t *testing.T) {
tearDown, stateDB, state := setupTestCase(t)
defer tearDown(t)
stateDB, state := setupTestCase(t)
stateStore := sm.NewStore(stateDB)
// Change vals at these heights.
@@ -304,7 +295,6 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) {
}
func TestProposerFrequency(t *testing.T) {
// some explicit test cases
testCases := []struct {
powers []int64
@@ -431,8 +421,7 @@ func testProposerFreq(t *testing.T, caseNum int, valSet *types.ValidatorSet) {
// TestProposerPriorityDoesNotGetResetToZero assert that we preserve accum when calling updateState
// see https://github.com/tendermint/tendermint/issues/2718
func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) {
tearDown, _, state := setupTestCase(t)
defer tearDown(t)
_, state := setupTestCase(t)
val1VotingPower := int64(10)
val1PubKey := ed25519.GenPrivKey().PubKey()
val1 := &types.Validator{Address: val1PubKey.Address(), PubKey: val1PubKey, VotingPower: val1VotingPower}
@@ -545,8 +534,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) {
// IncrementProposerPriority change.
// Additionally, make sure that same power validators alternate if both
// have the same voting power (and the 2nd was added later).
tearDown, _, state := setupTestCase(t)
defer tearDown(t)
_, state := setupTestCase(t)
val1VotingPower := int64(10)
val1PubKey := ed25519.GenPrivKey().PubKey()
val1 := &types.Validator{Address: val1PubKey.Address(), PubKey: val1PubKey, VotingPower: val1VotingPower}
@@ -716,8 +704,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) {
}
func TestLargeGenesisValidator(t *testing.T) {
tearDown, _, state := setupTestCase(t)
defer tearDown(t)
_, state := setupTestCase(t)
genesisVotingPower := types.MaxTotalVotingPower / 1000
genesisPubKey := ed25519.GenPrivKey().PubKey()
@@ -899,8 +886,7 @@ func TestLargeGenesisValidator(t *testing.T) {
func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) {
const valSetSize = 2
tearDown, stateDB, state := setupTestCase(t)
t.Cleanup(func() { tearDown(t) })
stateDB, state := setupTestCase(t)
stateStore := sm.NewStore(stateDB)
state.Validators = genValSet(valSetSize)
state.NextValidators = state.Validators.CopyIncrementProposerPriority(1)
@@ -924,8 +910,7 @@ func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) {
// changes.
func TestManyValidatorChangesSaveLoad(t *testing.T) {
const valSetSize = 7
tearDown, stateDB, state := setupTestCase(t)
defer tearDown(t)
stateDB, state := setupTestCase(t)
stateStore := sm.NewStore(stateDB)
require.Equal(t, int64(0), state.LastBlockHeight)
state.Validators = genValSet(valSetSize)
@@ -972,9 +957,7 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) {
}
func TestStateMakeBlock(t *testing.T) {
tearDown, _, state := setupTestCase(t)
defer tearDown(t)
_, state := setupTestCase(t)
proposerAddress := state.Validators.GetProposer().Address
stateVersion := state.Version.Consensus
block := makeBlock(state, 2)
@@ -987,11 +970,8 @@ func TestStateMakeBlock(t *testing.T) {
// TestConsensusParamsChangesSaveLoad tests saving and loading consensus params
// with changes.
func TestConsensusParamsChangesSaveLoad(t *testing.T) {
tearDown, stateDB, state := setupTestCase(t)
defer tearDown(t)
stateDB, state := setupTestCase(t)
stateStore := sm.NewStore(stateDB)
// Change vals at these heights.
changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20}
N := len(changeHeights)
@@ -1052,9 +1032,7 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) {
}
func TestStateProto(t *testing.T) {
tearDown, _, state := setupTestCase(t)
defer tearDown(t)
_, state := setupTestCase(t)
tc := []struct {
testName string
state *sm.State

View File

@@ -2,7 +2,6 @@ package state_test
import (
"fmt"
"os"
"testing"
"github.com/stretchr/testify/assert"
@@ -49,8 +48,7 @@ func TestStoreLoadValidators(t *testing.T) {
func BenchmarkLoadValidators(b *testing.B) {
const valSetSize = 100
config := cfg.ResetTestRoot("state_")
defer os.RemoveAll(config.RootDir)
config := cfg.SetupTestConfiguration(b)
dbType := dbm.BackendType(config.DBBackend)
stateDB, err := dbm.NewDB("state", dbType, config.DBDir())
require.NoError(b, err)

View File

@@ -3,7 +3,6 @@ package store
import (
"bytes"
"fmt"
"os"
"runtime/debug"
"strings"
"testing"
@@ -26,10 +25,6 @@ import (
"github.com/tendermint/tendermint/version"
)
// A cleanupFunc cleans up any config / test files created for a particular
// test.
type cleanupFunc func()
// make a Commit with a single vote containing just the height and a timestamp
func makeTestCommit(height int64, timestamp time.Time) *types.Commit {
commitSigs := []types.CommitSig{{
@@ -54,8 +49,8 @@ func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Bl
return block
}
func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFunc) {
config := cfg.ResetTestRoot("blockchain_reactor_test")
func makeStateAndBlockStore(t *testing.T) (sm.State, *BlockStore) {
config := cfg.SetupTestConfiguration(t)
// blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB())
// stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB())
blockDB := dbm.NewMemDB()
@@ -65,7 +60,22 @@ func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFu
if err != nil {
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
}
return state, NewBlockStore(blockDB), func() { os.RemoveAll(config.RootDir) }
return state, NewBlockStore(blockDB)
}
func makeTestStateAndBlockStore(t *testing.T, logger log.Logger) (sm.State, *BlockStore) {
config := cfg.SetupTestConfiguration(t)
// blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB())
// stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB())
blockDB := dbm.NewMemDB()
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB)
state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile())
if err != nil {
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
}
return state, NewBlockStore(blockDB)
}
func TestLoadBlockStoreState(t *testing.T) {
@@ -133,33 +143,43 @@ func freshBlockStore() (*BlockStore, dbm.DB) {
return NewBlockStore(db), db
}
var (
type testFixture struct {
state sm.State
block *types.Block
partSet *types.PartSet
part1 *types.Part
part2 *types.Part
seenCommit1 *types.Commit
)
store *BlockStore
func TestMain(m *testing.M) {
var cleanup cleanupFunc
state, _, cleanup = makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
block = makeBlock(1, state, new(types.Commit))
partSet = block.MakePartSet(2)
part1 = partSet.GetPart(0)
part2 = partSet.GetPart(1)
seenCommit1 = makeTestCommit(10, tmtime.Now())
code := m.Run()
cleanup()
os.Exit(code)
t *testing.T
}
func newTestFixture(t *testing.T) *testFixture {
state, store := makeStateAndBlockStore(t)
block := makeBlock(1, state, new(types.Commit))
partSet := block.MakePartSet(2)
part1 := partSet.GetPart(0)
part2 := partSet.GetPart(1)
seenCommit1 := makeTestCommit(10, tmtime.Now())
return &testFixture{
state: state,
block: block,
partSet: partSet,
part1: part1,
part2: part2,
seenCommit1: seenCommit1,
store: store,
t: t,
}
}
// TODO: This test should be simplified ...
func TestBlockStoreSaveLoadBlock(t *testing.T) {
state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
defer cleanup()
fixture := newTestFixture(t)
state, bs := makeTestStateAndBlockStore(t, log.NewTMLogger(new(bytes.Buffer)))
require.Equal(t, bs.Base(), int64(0), "initially the base should be zero")
require.Equal(t, bs.Height(), int64(0), "initially the height should be zero")
@@ -175,13 +195,13 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
block := makeBlock(bs.Height()+1, state, new(types.Commit))
validPartSet := block.MakePartSet(2)
seenCommit := makeTestCommit(10, tmtime.Now())
bs.SaveBlock(block, partSet, seenCommit)
bs.SaveBlock(block, fixture.partSet, seenCommit)
require.EqualValues(t, 1, bs.Base(), "expecting the new height to be changed")
require.EqualValues(t, block.Header.Height, bs.Height(), "expecting the new height to be changed")
incompletePartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 2})
uncontiguousPartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 0})
_, err := uncontiguousPartSet.AddPart(part2)
_, err := uncontiguousPartSet.AddPart(fixture.part2)
require.Error(t, err)
header1 := types.Header{
@@ -211,7 +231,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
{
block: newBlock(header1, commitAtH10),
parts: validPartSet,
seenCommit: seenCommit1,
seenCommit: fixture.seenCommit1,
},
{
@@ -242,7 +262,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
{
block: newBlock(header1, commitAtH10),
parts: validPartSet,
seenCommit: seenCommit1,
seenCommit: fixture.seenCommit1,
corruptCommitInDB: true, // Corrupt the DB's commit entry
wantPanic: "error reading block commit",
},
@@ -250,7 +270,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
{
block: newBlock(header1, commitAtH10),
parts: validPartSet,
seenCommit: seenCommit1,
seenCommit: fixture.seenCommit1,
wantPanic: "unmarshal to tmproto.BlockMeta",
corruptBlockInDB: true, // Corrupt the DB's block entry
},
@@ -258,7 +278,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
{
block: newBlock(header1, commitAtH10),
parts: validPartSet,
seenCommit: seenCommit1,
seenCommit: fixture.seenCommit1,
// Expecting no error and we want a nil back
eraseSeenCommitInDB: true,
@@ -267,7 +287,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
{
block: newBlock(header1, commitAtH10),
parts: validPartSet,
seenCommit: seenCommit1,
seenCommit: fixture.seenCommit1,
corruptSeenCommitInDB: true,
wantPanic: "error reading block seen commit",
@@ -276,7 +296,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
{
block: newBlock(header1, commitAtH10),
parts: validPartSet,
seenCommit: seenCommit1,
seenCommit: fixture.seenCommit1,
// Expecting no error and we want a nil back
eraseCommitInDB: true,
@@ -367,8 +387,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
}
func TestLoadBaseMeta(t *testing.T) {
config := cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
config := cfg.SetupTestConfiguration(t)
stateStore := sm.NewStore(dbm.NewMemDB())
state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile())
require.NoError(t, err)
@@ -390,6 +409,7 @@ func TestLoadBaseMeta(t *testing.T) {
}
func TestLoadBlockPart(t *testing.T) {
fixture := newTestFixture(t)
bs, db := freshBlockStore()
height, index := int64(10), 1
loadPart := func() (interface{}, error) {
@@ -411,20 +431,19 @@ func TestLoadBlockPart(t *testing.T) {
require.Contains(t, panicErr.Error(), "unmarshal to tmproto.Part failed")
// 3. A good block serialized and saved to the DB should be retrievable
pb1, err := part1.ToProto()
pb1, err := fixture.part1.ToProto()
require.NoError(t, err)
err = db.Set(calcBlockPartKey(height, index), mustEncode(pb1))
require.NoError(t, err)
gotPart, _, panicErr := doFn(loadPart)
require.Nil(t, panicErr, "an existent and proper block should not panic")
require.Nil(t, res, "a properly saved block should return a proper block")
require.Equal(t, gotPart.(*types.Part), part1,
require.Equal(t, gotPart.(*types.Part), fixture.part1,
"expecting successful retrieval of previously saved block")
}
func TestPruneBlocks(t *testing.T) {
config := cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
config := cfg.SetupTestConfiguration(t)
stateStore := sm.NewStore(dbm.NewMemDB())
state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile())
require.NoError(t, err)
@@ -549,8 +568,7 @@ func TestLoadBlockMeta(t *testing.T) {
}
func TestBlockFetchAtHeight(t *testing.T) {
state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
defer cleanup()
state, bs := makeTestStateAndBlockStore(t, log.NewTMLogger(new(bytes.Buffer)))
require.Equal(t, bs.Height(), int64(0), "initially the height should be zero")
block := makeBlock(bs.Height()+1, state, new(types.Commit))

View File

@@ -153,7 +153,7 @@ func makeAddrs() (string, string, string) {
// getConfig returns a config for test cases
func getConfig(t *testing.T) *cfg.Config {
c := cfg.ResetTestRoot(t.Name())
c := cfg.SetupTestConfiguration(t)
// and we use random ports to run in parallel
tm, rpc, grpc := makeAddrs()