consensus: explicit test timeout (#7585)

This commit is contained in:
Sam Kleinman
2022-01-13 16:11:51 -05:00
committed by GitHub
parent b7c19a5cd4
commit 7e8fa4ed85
3 changed files with 17 additions and 17 deletions

View File

@@ -7,6 +7,7 @@ import (
"path"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -30,7 +31,11 @@ import (
// Byzantine node sends two different prevotes (nil and blockID) to the same
// validator.
func TestByzantinePrevoteEquivocation(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
// empirically, this test either passes in <1s or hits some
// kind of deadlock and hit the larger timeout. This timeout
// can be extended a bunch if needed, but it's good to avoid
// falling back to a much coarser timeout
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
config := configSetup(t)
@@ -275,12 +280,11 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
require.NoError(t, err)
for idx, ev := range evidenceFromEachValidator {
if assert.NotNil(t, ev, idx) {
ev, ok := ev.(*types.DuplicateVoteEvidence)
assert.True(t, ok)
assert.Equal(t, pubkey.Address(), ev.VoteA.ValidatorAddress)
assert.Equal(t, prevoteHeight, ev.Height())
}
require.NotNil(t, ev, idx)
ev, ok := ev.(*types.DuplicateVoteEvidence)
require.True(t, ok)
assert.Equal(t, pubkey.Address(), ev.VoteA.ValidatorAddress)
assert.Equal(t, prevoteHeight, ev.Height())
}
}

View File

@@ -73,17 +73,17 @@ func startNewStateAndWaitForBlock(ctx context.Context, t *testing.T, consensusRe
blockStore,
)
bytes, _ := os.ReadFile(cs.config.WalFile())
t.Logf("====== WAL: \n\r%X\n", bytes)
err = cs.Start(ctx)
bytes, err := os.ReadFile(cs.config.WalFile())
require.NoError(t, err)
require.NotNil(t, bytes)
require.NoError(t, cs.Start(ctx))
defer func() {
if err := cs.Stop(); err != nil {
t.Error(err)
}
}()
t.Cleanup(cs.Wait)
// This is just a signal that we haven't halted; its not something contained
// in the WAL itself. Assuming the consensus state is running, replay of any
// WAL, including the empty one, should eventually be followed by a new
@@ -157,8 +157,6 @@ func crashWALandCheckLiveness(rctx context.Context, t *testing.T, consensusRepla
i := 1
LOOP:
for {
t.Logf("====== LOOP %d\n", i)
// create consensus state from a clean slate
logger := log.NewNopLogger()
blockDB := dbm.NewMemDB()
@@ -204,8 +202,6 @@ LOOP:
select {
case err := <-walPanicked:
t.Logf("WAL panicked: %v", err)
// make sure we can make blocks after a crash
startNewStateAndWaitForBlock(ctx, t, consensusReplayConfig, cs.Height, blockDB, stateStore)

View File

@@ -136,7 +136,7 @@ func TestWALSearchForEndHeight(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewTestingLogger(t)
logger := log.NewNopLogger()
walBody, err := WALWithNBlocks(ctx, t, logger, 6)
if err != nil {