Improve handling of -short flag in tests (#9075)

As a small developer quality of life improvement, I found many individual unit tests that take longer than around a second to complete, and set them to skip when run under `go test -short`.

On my machine, the wall timings for tests (with `go test -count=1 ./...` and optionally `-short` and `-race`) are roughly:

- Long tests, no race detector: about 1m42s
- Short tests, no race detector: about 17s
- Long tests, race detector enabled: about 2m1s
- Short tests, race detector enabled: about 28s

This PR is split into many commits each touching a single package, with commit messages detailing the approximate timing change per package.
This commit is contained in:
Mark Rushakoff
2022-07-29 09:41:54 -04:00
committed by GitHub
parent 48147e1fb9
commit d433ebe68d
24 changed files with 217 additions and 2 deletions

View File

@@ -33,6 +33,10 @@ import (
// Byzantine node sends two different prevotes (nil and blockID) to the same
// validator.
func TestByzantinePrevoteEquivocation(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
// empirically, this test either passes in <1s or hits some
// kind of deadlock and hit the larger timeout. This timeout
// can be extended a bunch if needed, but it's good to avoid

View File

@@ -779,6 +779,10 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
}
func TestReactorVotingPowerChange(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
@@ -885,6 +889,10 @@ func TestReactorVotingPowerChange(t *testing.T) {
}
func TestReactorValidatorSetChanges(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()

View File

@@ -118,6 +118,10 @@ func sendTxs(ctx context.Context, t *testing.T, cs *State) {
// TestWALCrash uses crashing WAL to test we can recover from any WAL failure.
func TestWALCrash(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
testCases := []struct {
name string
initFn func(dbm.DB, *State, context.Context)

View File

@@ -132,6 +132,10 @@ func convertTex(in []testTx) types.Txs {
}
func TestTxMempool_TxsAvailable(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -540,6 +544,10 @@ func TestTxMempool_CheckTxSameSender(t *testing.T) {
}
func TestTxMempool_ConcurrentTxs(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

View File

@@ -315,6 +315,10 @@ func TestMConnectionMultiplePings(t *testing.T) {
}
func TestMConnectionPingPongs(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
// check that we are not leaking any go-routines
t.Cleanup(leaktest.CheckTimeout(t, 10*time.Second))
@@ -558,6 +562,10 @@ func TestMConnectionReadErrorUnknownMsgType(t *testing.T) {
}
func TestMConnectionTrySend(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
server, client := net.Pipe()
t.Cleanup(closeAll(t, client, server))
ctx, cancel := context.WithCancel(context.Background())
@@ -606,6 +614,10 @@ func TestConnVectors(t *testing.T) {
}
func TestMConnectionChannelOverflow(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
chOnErr := make(chan struct{})
chOnRcv := make(chan struct{})

View File

@@ -296,6 +296,10 @@ func TestPeerManager_DialNext(t *testing.T) {
}
func TestPeerManager_DialNext_Retry(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

View File

@@ -41,6 +41,10 @@ func echoReactor(ctx context.Context, channel p2p.Channel) {
}
func TestRouter_Network(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -162,6 +166,10 @@ func TestRouter_Channel_Basic(t *testing.T) {
// Channel tests are hairy to mock, so we use an in-memory network instead.
func TestRouter_Channel_SendReceive(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -224,6 +232,10 @@ func TestRouter_Channel_SendReceive(t *testing.T) {
}
func TestRouter_Channel_Broadcast(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
t.Cleanup(leaktest.Check(t))
ctx, cancel := context.WithCancel(context.Background())
@@ -255,6 +267,10 @@ func TestRouter_Channel_Broadcast(t *testing.T) {
}
func TestRouter_Channel_Wrapper(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
t.Cleanup(leaktest.Check(t))
ctx, cancel := context.WithCancel(context.Background())
@@ -443,6 +459,11 @@ func TestRouter_AcceptPeers(t *testing.T) {
}
func TestRouter_AcceptPeers_Errors(t *testing.T) {
if testing.Short() {
// Each subtest takes more than one second due to the time.Sleep call,
// so just skip from the parent test in short mode.
t.Skip("skipping test in short mode")
}
for _, err := range []error{io.EOF, context.Canceled, context.DeadlineExceeded} {
t.Run(err.Error(), func(t *testing.T) {
@@ -480,9 +501,7 @@ func TestRouter_AcceptPeers_Errors(t *testing.T) {
router.Stop()
mockTransport.AssertExpectations(t)
})
}
}
@@ -811,6 +830,10 @@ func TestRouter_EvictPeers(t *testing.T) {
}
func TestRouter_ChannelCompatability(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
t.Cleanup(leaktest.Check(t))
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

View File

@@ -59,6 +59,10 @@ func TestMConnTransport_AcceptBeforeListen(t *testing.T) {
}
func TestMConnTransport_AcceptMaxAcceptedConnections(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

View File

@@ -126,6 +126,10 @@ func TestBlockQueueWithFailures(t *testing.T) {
// Test that when all the blocks are retrieved that the queue still holds on to
// it's workers and in the event of failure can still fetch the failed block
func TestBlockQueueBlocks(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
require.NoError(t, err)
queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 2)
@@ -176,6 +180,10 @@ loop:
}
func TestBlockQueueAcceptsNoMoreBlocks(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
require.NoError(t, err)
queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 1)

View File

@@ -197,6 +197,10 @@ func setup(
}
func TestReactor_Sync(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
@@ -618,6 +622,10 @@ func TestReactor_StateProviderP2P(t *testing.T) {
}
func TestReactor_Backfill(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -626,6 +634,10 @@ func TestReactor_Backfill(t *testing.T) {
for _, failureRate := range failureRates {
failureRate := failureRate
t.Run(fmt.Sprintf("failure rate: %d", failureRate), func(t *testing.T) {
if testing.Short() && failureRate > 0 {
t.Skip("skipping test in short mode")
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()

View File

@@ -22,6 +22,10 @@ import (
)
func TestSyncer_SyncAny(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()