add fixes for flaky tests (#5146)

While working on tendermint my colleague @jinmannwong fixed a few of the unit tests that we found to be flaky in our CI. We thought that you might find this useful, see below for comments.
This commit is contained in:
n-hutton
2020-07-27 07:36:56 +01:00
committed by GitHub
parent 940e68292c
commit 375f0c819f
4 changed files with 38 additions and 28 deletions

View File

@@ -196,7 +196,10 @@ func TestBadBlockStopsPeer(t *testing.T) {
maxBlockHeight := int64(148)
otherChain := newBlockchainReactor(log.TestingLogger(), genDoc, privVals, maxBlockHeight)
// Other chain needs a different validator set
otherGenDoc, otherPrivVals := randGenesisDoc(1, false, 30)
otherChain := newBlockchainReactor(log.TestingLogger(), otherGenDoc, otherPrivVals, maxBlockHeight)
defer func() {
err := otherChain.reactor.Stop()
require.Error(t, err)

View File

@@ -67,14 +67,16 @@ func TestThrottle(test *testing.T) {
time.Sleep(longwait)
assert.Equal(2, c.Count())
// send 12, over 2 delay sections, adds 3
// send 12, over 2 delay sections, adds 3 or more. It
// is possible for more to be added if the overhead
// in executing the loop is large
short := time.Duration(ms/5) * time.Millisecond
for i := 0; i < 13; i++ {
t.Set()
time.Sleep(short)
}
time.Sleep(longwait)
assert.Equal(5, c.Count())
assert.LessOrEqual(5, c.Count())
close(t.Ch)
}

View File

@@ -189,7 +189,7 @@ func TestMConnectionPongTimeoutResultsInError(t *testing.T) {
}()
<-serverGotPing
pongTimerExpired := mconn.config.PongTimeout + 20*time.Millisecond
pongTimerExpired := mconn.config.PongTimeout + 200*time.Millisecond
select {
case msgBytes := <-receivedCh:
t.Fatalf("Expected error, but got %v", msgBytes)

View File

@@ -138,13 +138,16 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) {
}
func TestTransportMultiplexMaxIncomingConnections(t *testing.T) {
pv := ed25519.GenPrivKey()
id := PubKeyToID(pv.PubKey())
mt := newMultiplexTransport(
emptyNodeInfo(),
testNodeInfo(
id, "transport",
),
NodeKey{
PrivKey: ed25519.GenPrivKey(),
PrivKey: pv,
},
)
id := mt.nodeKey.ID()
MultiplexTransportMaxIncomingConnections(0)(mt)
@@ -152,32 +155,34 @@ func TestTransportMultiplexMaxIncomingConnections(t *testing.T) {
if err != nil {
t.Fatal(err)
}
const maxIncomingConns = 2
MultiplexTransportMaxIncomingConnections(maxIncomingConns)(mt)
if err := mt.Listen(*addr); err != nil {
t.Fatal(err)
}
errc := make(chan error)
laddr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
go func() {
addr := NewNetAddress(id, mt.listener.Addr())
// Connect more peers than max
for i := 0; i <= maxIncomingConns; i++ {
errc := make(chan error)
go testDialer(*laddr, errc)
_, err := addr.Dial()
if err != nil {
errc <- err
return
err = <-errc
if i < maxIncomingConns {
if err != nil {
t.Errorf("dialer connection failed: %v", err)
}
_, err = mt.Accept(peerConfig{})
if err != nil {
t.Errorf("connection failed: %v", err)
}
} else if err == nil || !strings.Contains(err.Error(), "i/o timeout") {
// mt actually blocks forever on trying to accept a new peer into a full channel so
// expect the dialer to encounter a timeout error. Calling mt.Accept will block until
// mt is closed.
t.Errorf("expected i/o timeout error, got %v", err)
}
close(errc)
}()
if err := <-errc; err != nil {
t.Errorf("connection failed: %v", err)
}
_, err = mt.Accept(peerConfig{})
if err == nil || !strings.Contains(err.Error(), "connection reset by peer") {
t.Errorf("expected connection reset by peer error, got %v", err)
}
}
@@ -294,13 +299,13 @@ func TestTransportMultiplexAcceptNonBlocking(t *testing.T) {
errc <- fmt.Errorf("fast peer timed out")
}
sc, err := upgradeSecretConn(c, 20*time.Millisecond, ed25519.GenPrivKey())
sc, err := upgradeSecretConn(c, 200*time.Millisecond, ed25519.GenPrivKey())
if err != nil {
errc <- err
return
}
_, err = handshake(sc, 20*time.Millisecond,
_, err = handshake(sc, 200*time.Millisecond,
testNodeInfo(
PubKeyToID(ed25519.GenPrivKey().PubKey()),
"slow_peer",