mirror of
https://github.com/tendermint/tendermint.git
synced 2025-12-23 06:15:19 +00:00
lint: errcheck (#5091)
## Description add more error checks to tests gonna do a third PR that tackles the non test cases
This commit is contained in:
@@ -55,7 +55,7 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- [crypto] [\#4721](https://github.com/tendermint/tendermint/pull/4721) Remove `SimpleHashFromMap()` and `SimpleProofsFromMap()` (@erikgrinaker)
|
||||
- [crypto] \#4940 All keys have become `[]byte` instead of `[<size>]byte`. The byte method no longer returns the marshaled value but just the `[]byte` form of the data.
|
||||
- [crypto] \4988 Removal of key type multisig
|
||||
- The key has been moved to the Cosmos-SDK (https://github.com/cosmos/cosmos-sdk/blob/master/crypto/types/multisig/multisignature.go)
|
||||
- The key has been moved to the [Cosmos-SDK](https://github.com/cosmos/cosmos-sdk/blob/master/crypto/types/multisig/multisignature.go)
|
||||
- [crypto] \#4989 Remove `Simple` prefixes from `SimpleProof`, `SimpleValueOp` & `SimpleProofNode`.
|
||||
- `merkle.Proof` has been renamed to `ProofOps`.
|
||||
- Protobuf messages `Proof` & `ProofOp` has been moved to `proto/crypto/merkle`
|
||||
|
||||
@@ -295,80 +295,119 @@ func (cli *socketClient) FlushSync() error {
|
||||
|
||||
func (cli *socketClient) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestEcho(msg))
|
||||
cli.FlushSync()
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetEcho(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestInfo(req))
|
||||
cli.FlushSync()
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetInfo(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestSetOption(req))
|
||||
cli.FlushSync()
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetSetOption(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestDeliverTx(req))
|
||||
cli.FlushSync()
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetDeliverTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestCheckTx(req))
|
||||
cli.FlushSync()
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetCheckTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestQuery(req))
|
||||
cli.FlushSync()
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetQuery(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) CommitSync() (*types.ResponseCommit, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestCommit())
|
||||
cli.FlushSync()
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetCommit(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestInitChain(req))
|
||||
cli.FlushSync()
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetInitChain(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestBeginBlock(req))
|
||||
cli.FlushSync()
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetBeginBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestEndBlock(req))
|
||||
cli.FlushSync()
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetEndBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestListSnapshots(req))
|
||||
cli.FlushSync()
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetListSnapshots(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestOfferSnapshot(req))
|
||||
cli.FlushSync()
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetOfferSnapshot(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) LoadSnapshotChunkSync(
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestLoadSnapshotChunk(req))
|
||||
cli.FlushSync()
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetLoadSnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
|
||||
@@ -43,14 +43,23 @@ func TestProperSyncCalls(t *testing.T) {
|
||||
app := slowApp{}
|
||||
|
||||
s, c := setupClientServer(t, app)
|
||||
defer s.Stop()
|
||||
defer c.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := c.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
// This is BeginBlockSync unrolled....
|
||||
reqres := c.BeginBlockAsync(types.RequestBeginBlock{})
|
||||
c.FlushSync()
|
||||
err := c.FlushSync()
|
||||
require.NoError(t, err)
|
||||
res := reqres.Response.GetBeginBlock()
|
||||
require.NotNil(t, res)
|
||||
resp <- c.Error()
|
||||
@@ -69,8 +78,16 @@ func TestHangingSyncCalls(t *testing.T) {
|
||||
app := slowApp{}
|
||||
|
||||
s, c := setupClientServer(t, app)
|
||||
defer s.Stop()
|
||||
defer c.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := c.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
@@ -81,7 +98,8 @@ func TestHangingSyncCalls(t *testing.T) {
|
||||
// no response yet from server
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
// kill the server, so the connections break
|
||||
s.Stop()
|
||||
err := s.Stop()
|
||||
require.NoError(t, err)
|
||||
|
||||
// wait for the response from BeginBlock
|
||||
reqres.Wait()
|
||||
|
||||
@@ -642,7 +642,9 @@ func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
// Cleanup
|
||||
srv.Stop()
|
||||
if err := srv.Stop(); err != nil {
|
||||
logger.Error("Error while stopping server", "err", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
@@ -674,7 +676,9 @@ func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
// Cleanup
|
||||
srv.Stop()
|
||||
if err := srv.Stop(); err != nil {
|
||||
logger.Error("Error while stopping server", "err", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
|
||||
@@ -56,7 +56,11 @@ func testStream(t *testing.T, app types.Application) {
|
||||
if err := server.Start(); err != nil {
|
||||
require.NoError(t, err, "Error starting socket server")
|
||||
}
|
||||
defer server.Stop() //nolint:errcheck // ignore for tests
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Connect to the socket
|
||||
client := abcicli.NewSocketClient(socket, false)
|
||||
@@ -64,7 +68,11 @@ func testStream(t *testing.T, app types.Application) {
|
||||
if err := client.Start(); err != nil {
|
||||
t.Fatalf("Error starting socket client: %v", err.Error())
|
||||
}
|
||||
defer client.Stop() //nolint:errcheck // ignore for tests
|
||||
t.Cleanup(func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
done := make(chan struct{})
|
||||
counter := 0
|
||||
@@ -132,14 +140,24 @@ func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
|
||||
if err := server.Start(); err != nil {
|
||||
t.Fatalf("Error starting GRPC server: %v", err.Error())
|
||||
}
|
||||
defer server.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Connect to the socket
|
||||
conn, err := grpc.Dial(socket, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
if err != nil {
|
||||
t.Fatalf("Error dialing GRPC server: %v", err.Error())
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := conn.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
client := types.NewABCIApplicationClient(conn)
|
||||
|
||||
|
||||
@@ -278,8 +278,16 @@ func TestClientServer(t *testing.T) {
|
||||
kvstore := NewApplication()
|
||||
client, server, err := makeSocketClientServer(kvstore, "kvstore-socket")
|
||||
require.NoError(t, err)
|
||||
defer server.Stop() //nolint:errcheck // ignore for tests
|
||||
defer client.Stop() //nolint:errcheck // ignore for tests
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
runClientTests(t, client)
|
||||
|
||||
@@ -287,8 +295,17 @@ func TestClientServer(t *testing.T) {
|
||||
kvstore = NewApplication()
|
||||
gclient, gserver, err := makeGRPCClientServer(kvstore, "kvstore-grpc")
|
||||
require.NoError(t, err)
|
||||
defer gserver.Stop() //nolint:errcheck // ignore for tests
|
||||
defer gclient.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := gserver.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := gclient.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
runClientTests(t, gclient)
|
||||
}
|
||||
|
||||
@@ -90,7 +90,11 @@ func TestBlockPoolBasic(t *testing.T) {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
defer pool.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := pool.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
peers.start()
|
||||
defer peers.stop()
|
||||
@@ -144,7 +148,11 @@ func TestBlockPoolTimeout(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer pool.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := pool.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
for _, peer := range peers {
|
||||
t.Logf("Peer %v", peer.id)
|
||||
@@ -206,7 +214,11 @@ func TestBlockPoolRemovePeer(t *testing.T) {
|
||||
pool.SetLogger(log.TestingLogger())
|
||||
err := pool.Start()
|
||||
require.NoError(t, err)
|
||||
defer pool.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := pool.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// add peers
|
||||
for peerID, peer := range peers {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
@@ -387,7 +388,8 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
reactor := newTestReactor(params)
|
||||
mockSwitch := &mockSwitchIo{switchedToConsensus: false}
|
||||
reactor.io = mockSwitch
|
||||
reactor.Start()
|
||||
err := reactor.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < len(tt.msgs); i++ {
|
||||
step := tt.msgs[i]
|
||||
@@ -415,7 +417,8 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
reactor.Stop()
|
||||
err = reactor.Stop()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -195,7 +195,7 @@ LOOP:
|
||||
startNewStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateDB)
|
||||
|
||||
// stop consensus state and transactions sender (initFn)
|
||||
cs.Stop()
|
||||
cs.Stop() // Logging this error causes failure
|
||||
cancel()
|
||||
|
||||
// if we reached the required height, exit
|
||||
@@ -683,12 +683,12 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
|
||||
err = wal.Start()
|
||||
require.NoError(t, err)
|
||||
defer wal.Stop()
|
||||
|
||||
chain, commits, err = makeBlockchainFromWAL(wal)
|
||||
require.NoError(t, err)
|
||||
pubKey, err := privVal.GetPubKey()
|
||||
require.NoError(t, err)
|
||||
stateDB, genisisState, store = stateAndStore(config, pubKey, kvstore.ProtocolVersion)
|
||||
|
||||
}
|
||||
store.chain = chain
|
||||
store.commits = commits
|
||||
@@ -728,7 +728,9 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
t.Fatalf("Error starting proxy app connections: %v", err)
|
||||
}
|
||||
|
||||
defer proxyApp.Stop()
|
||||
|
||||
err := handshaker.Handshake(proxyApp)
|
||||
if expectError {
|
||||
require.Error(t, err)
|
||||
@@ -781,7 +783,7 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB,
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer proxyApp.Stop()
|
||||
defer proxyApp.Stop() //nolint:errcheck // ignore
|
||||
|
||||
state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version
|
||||
validators := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
@@ -830,7 +832,7 @@ func buildTMStateFromChain(
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer proxyApp.Stop()
|
||||
defer proxyApp.Stop() //nolint:errcheck //ignore
|
||||
|
||||
state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version
|
||||
validators := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
@@ -897,7 +899,9 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
|
||||
|
||||
assert.Panics(t, func() {
|
||||
h := NewHandshaker(stateDB, state, store, genDoc)
|
||||
h.Handshake(proxyApp)
|
||||
if err = h.Handshake(proxyApp); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -915,7 +919,9 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
|
||||
|
||||
assert.Panics(t, func() {
|
||||
h := NewHandshaker(stateDB, state, store, genDoc)
|
||||
h.Handshake(proxyApp)
|
||||
if err = h.Handshake(proxyApp); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1208,7 +1214,6 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
|
||||
if err := handshaker.Handshake(proxyApp); err != nil {
|
||||
t.Fatalf("Error on abci handshake: %v", err)
|
||||
}
|
||||
|
||||
// reload the state, check the validator set was updated
|
||||
state = sm.LoadState(stateDB)
|
||||
|
||||
|
||||
@@ -22,7 +22,9 @@ func TestSIGHUP(t *testing.T) {
|
||||
// First, create a temporary directory and move into it
|
||||
dir, err := ioutil.TempDir("", "sighup_test")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
t.Cleanup(func() {
|
||||
os.RemoveAll(dir)
|
||||
})
|
||||
err = os.Chdir(dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -50,7 +52,8 @@ func TestSIGHUP(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Send SIGHUP to self.
|
||||
syscall.Kill(syscall.Getpid(), syscall.SIGHUP)
|
||||
err = syscall.Kill(syscall.Getpid(), syscall.SIGHUP)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait a bit... signals are not handled synchronously.
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
|
||||
@@ -59,8 +59,7 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = group.Start()
|
||||
if err != nil {
|
||||
if err = group.Start(); err != nil {
|
||||
fmt.Printf("logjack couldn't start with file %v\n", headPath)
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -69,10 +68,11 @@ func main() {
|
||||
buf := make([]byte, readBufferSize)
|
||||
for {
|
||||
n, err := os.Stdin.Read(buf)
|
||||
group.Write(buf[:n])
|
||||
group.FlushAndSync()
|
||||
if err != nil {
|
||||
group.Stop()
|
||||
if err := group.Stop(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "logjack stopped with error %v\n", headPath)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err == io.EOF {
|
||||
os.Exit(0)
|
||||
} else {
|
||||
@@ -80,6 +80,15 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
_, err = group.Write(buf[:n])
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "logjack failed write with error %v\n", headPath)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := group.FlushAndSync(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "logjack flushsync fail with error %v\n", headPath)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -53,7 +53,8 @@ func TestCheckHeadSizeLimit(t *testing.T) {
|
||||
err := g.WriteLine(tmrand.Str(999))
|
||||
require.NoError(t, err, "Error appending to head")
|
||||
}
|
||||
g.FlushAndSync()
|
||||
err := g.FlushAndSync()
|
||||
require.NoError(t, err)
|
||||
assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000)
|
||||
|
||||
// Even calling checkHeadSizeLimit manually won't rotate it.
|
||||
@@ -61,9 +62,10 @@ func TestCheckHeadSizeLimit(t *testing.T) {
|
||||
assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000)
|
||||
|
||||
// Write 1000 more bytes.
|
||||
err := g.WriteLine(tmrand.Str(999))
|
||||
err = g.WriteLine(tmrand.Str(999))
|
||||
require.NoError(t, err, "Error appending to head")
|
||||
g.FlushAndSync()
|
||||
err = g.FlushAndSync()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Calling checkHeadSizeLimit this time rolls it.
|
||||
g.checkHeadSizeLimit()
|
||||
@@ -72,7 +74,8 @@ func TestCheckHeadSizeLimit(t *testing.T) {
|
||||
// Write 1000 more bytes.
|
||||
err = g.WriteLine(tmrand.Str(999))
|
||||
require.NoError(t, err, "Error appending to head")
|
||||
g.FlushAndSync()
|
||||
err = g.FlushAndSync()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Calling checkHeadSizeLimit does nothing.
|
||||
g.checkHeadSizeLimit()
|
||||
@@ -83,7 +86,8 @@ func TestCheckHeadSizeLimit(t *testing.T) {
|
||||
err = g.WriteLine(tmrand.Str(999))
|
||||
require.NoError(t, err, "Error appending to head")
|
||||
}
|
||||
g.FlushAndSync()
|
||||
err = g.FlushAndSync()
|
||||
require.NoError(t, err)
|
||||
assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 2000000, 1000000)
|
||||
|
||||
// Calling checkHeadSizeLimit rolls it again.
|
||||
@@ -93,7 +97,8 @@ func TestCheckHeadSizeLimit(t *testing.T) {
|
||||
// Write 1000 more bytes.
|
||||
_, err = g.Head.Write([]byte(tmrand.Str(999) + "\n"))
|
||||
require.NoError(t, err, "Error appending to head")
|
||||
g.FlushAndSync()
|
||||
err = g.FlushAndSync()
|
||||
require.NoError(t, err)
|
||||
assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2001000, 1000)
|
||||
|
||||
// Calling checkHeadSizeLimit does nothing.
|
||||
@@ -111,7 +116,11 @@ func TestRotateFile(t *testing.T) {
|
||||
// relative paths are resolved at Group creation
|
||||
origDir, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
defer os.Chdir(origDir)
|
||||
defer func() {
|
||||
if err := os.Chdir(origDir); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
dir, err := ioutil.TempDir("", "rotate_test")
|
||||
require.NoError(t, err)
|
||||
@@ -123,15 +132,23 @@ func TestRotateFile(t *testing.T) {
|
||||
require.True(t, filepath.IsAbs(g.Dir))
|
||||
|
||||
// Create and rotate files
|
||||
g.WriteLine("Line 1")
|
||||
g.WriteLine("Line 2")
|
||||
g.WriteLine("Line 3")
|
||||
g.FlushAndSync()
|
||||
err = g.WriteLine("Line 1")
|
||||
require.NoError(t, err)
|
||||
err = g.WriteLine("Line 2")
|
||||
require.NoError(t, err)
|
||||
err = g.WriteLine("Line 3")
|
||||
require.NoError(t, err)
|
||||
err = g.FlushAndSync()
|
||||
require.NoError(t, err)
|
||||
g.RotateFile()
|
||||
g.WriteLine("Line 4")
|
||||
g.WriteLine("Line 5")
|
||||
g.WriteLine("Line 6")
|
||||
g.FlushAndSync()
|
||||
err = g.WriteLine("Line 4")
|
||||
require.NoError(t, err)
|
||||
err = g.WriteLine("Line 5")
|
||||
require.NoError(t, err)
|
||||
err = g.WriteLine("Line 6")
|
||||
require.NoError(t, err)
|
||||
err = g.FlushAndSync()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Read g.Head.Path+"000"
|
||||
body1, err := ioutil.ReadFile(g.Head.Path + ".000")
|
||||
@@ -160,8 +177,10 @@ func TestWrite(t *testing.T) {
|
||||
g := createTestGroupWithHeadSizeLimit(t, 0)
|
||||
|
||||
written := []byte("Medusa")
|
||||
g.Write(written)
|
||||
g.FlushAndSync()
|
||||
_, err := g.Write(written)
|
||||
require.NoError(t, err)
|
||||
err = g.FlushAndSync()
|
||||
require.NoError(t, err)
|
||||
|
||||
read := make([]byte, len(written))
|
||||
gr, err := g.NewReader(0)
|
||||
@@ -181,12 +200,16 @@ func TestGroupReaderRead(t *testing.T) {
|
||||
g := createTestGroupWithHeadSizeLimit(t, 0)
|
||||
|
||||
professor := []byte("Professor Monster")
|
||||
g.Write(professor)
|
||||
g.FlushAndSync()
|
||||
_, err := g.Write(professor)
|
||||
require.NoError(t, err)
|
||||
err = g.FlushAndSync()
|
||||
require.NoError(t, err)
|
||||
g.RotateFile()
|
||||
frankenstein := []byte("Frankenstein's Monster")
|
||||
g.Write(frankenstein)
|
||||
g.FlushAndSync()
|
||||
_, err = g.Write(frankenstein)
|
||||
require.NoError(t, err)
|
||||
err = g.FlushAndSync()
|
||||
require.NoError(t, err)
|
||||
|
||||
totalWrittenLength := len(professor) + len(frankenstein)
|
||||
read := make([]byte, totalWrittenLength)
|
||||
@@ -210,13 +233,17 @@ func TestGroupReaderRead2(t *testing.T) {
|
||||
g := createTestGroupWithHeadSizeLimit(t, 0)
|
||||
|
||||
professor := []byte("Professor Monster")
|
||||
g.Write(professor)
|
||||
g.FlushAndSync()
|
||||
_, err := g.Write(professor)
|
||||
require.NoError(t, err)
|
||||
err = g.FlushAndSync()
|
||||
require.NoError(t, err)
|
||||
g.RotateFile()
|
||||
frankenstein := []byte("Frankenstein's Monster")
|
||||
frankensteinPart := []byte("Frankenstein")
|
||||
g.Write(frankensteinPart) // note writing only a part
|
||||
g.FlushAndSync()
|
||||
_, err = g.Write(frankensteinPart) // note writing only a part
|
||||
require.NoError(t, err)
|
||||
err = g.FlushAndSync()
|
||||
require.NoError(t, err)
|
||||
|
||||
totalLength := len(professor) + len(frankenstein)
|
||||
read := make([]byte, totalLength)
|
||||
@@ -251,8 +278,10 @@ func TestMaxIndex(t *testing.T) {
|
||||
|
||||
assert.Zero(t, g.MaxIndex(), "MaxIndex should be zero at the beginning")
|
||||
|
||||
g.WriteLine("Line 1")
|
||||
g.FlushAndSync()
|
||||
err := g.WriteLine("Line 1")
|
||||
require.NoError(t, err)
|
||||
err = g.FlushAndSync()
|
||||
require.NoError(t, err)
|
||||
g.RotateFile()
|
||||
|
||||
assert.Equal(t, 1, g.MaxIndex(), "MaxIndex should point to the last file")
|
||||
|
||||
@@ -17,7 +17,11 @@ func TestAddListenerForEventFireOnce(t *testing.T) {
|
||||
evsw := NewEventSwitch()
|
||||
err := evsw.Start()
|
||||
require.NoError(t, err)
|
||||
defer evsw.Stop() //nolint:errcheck // ignore for tests
|
||||
t.Cleanup(func() {
|
||||
if err := evsw.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
messages := make(chan EventData)
|
||||
err = evsw.AddListenerForEvent("listener", "event",
|
||||
@@ -40,7 +44,11 @@ func TestAddListenerForEventFireMany(t *testing.T) {
|
||||
evsw := NewEventSwitch()
|
||||
err := evsw.Start()
|
||||
require.NoError(t, err)
|
||||
defer evsw.Stop() //nolint:errcheck // ignore for tests
|
||||
t.Cleanup(func() {
|
||||
if err := evsw.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
doneSum := make(chan uint64)
|
||||
doneSending := make(chan uint64)
|
||||
@@ -70,7 +78,11 @@ func TestAddListenerForDifferentEvents(t *testing.T) {
|
||||
evsw := NewEventSwitch()
|
||||
err := evsw.Start()
|
||||
require.NoError(t, err)
|
||||
defer evsw.Stop() //nolint:errcheck // ignore for tests
|
||||
t.Cleanup(func() {
|
||||
if err := evsw.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
doneSum := make(chan uint64)
|
||||
doneSending1 := make(chan uint64)
|
||||
@@ -118,7 +130,12 @@ func TestAddDifferentListenerForDifferentEvents(t *testing.T) {
|
||||
evsw := NewEventSwitch()
|
||||
err := evsw.Start()
|
||||
require.NoError(t, err)
|
||||
defer evsw.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := evsw.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
doneSum1 := make(chan uint64)
|
||||
doneSum2 := make(chan uint64)
|
||||
@@ -185,7 +202,11 @@ func TestAddAndRemoveListenerConcurrency(t *testing.T) {
|
||||
evsw := NewEventSwitch()
|
||||
err := evsw.Start()
|
||||
require.NoError(t, err)
|
||||
defer evsw.Stop() //nolint:errcheck // ignore for tests
|
||||
t.Cleanup(func() {
|
||||
if err := evsw.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
done1 := make(chan struct{})
|
||||
done2 := make(chan struct{})
|
||||
@@ -231,7 +252,11 @@ func TestAddAndRemoveListener(t *testing.T) {
|
||||
evsw := NewEventSwitch()
|
||||
err := evsw.Start()
|
||||
require.NoError(t, err)
|
||||
defer evsw.Stop() //nolint:errcheck // ignore for tests
|
||||
t.Cleanup(func() {
|
||||
if err := evsw.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
doneSum1 := make(chan uint64)
|
||||
doneSum2 := make(chan uint64)
|
||||
@@ -278,7 +303,11 @@ func TestRemoveListener(t *testing.T) {
|
||||
evsw := NewEventSwitch()
|
||||
err := evsw.Start()
|
||||
require.NoError(t, err)
|
||||
defer evsw.Stop() //nolint:errcheck // ignore for tests
|
||||
t.Cleanup(func() {
|
||||
if err := evsw.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
count := 10
|
||||
sum1, sum2 := 0, 0
|
||||
@@ -335,7 +364,11 @@ func TestRemoveListenersAsync(t *testing.T) {
|
||||
evsw := NewEventSwitch()
|
||||
err := evsw.Start()
|
||||
require.NoError(t, err)
|
||||
defer evsw.Stop() //nolint:errcheck // ignore for tests
|
||||
t.Cleanup(func() {
|
||||
if err := evsw.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
doneSum1 := make(chan uint64)
|
||||
doneSum2 := make(chan uint64)
|
||||
|
||||
@@ -15,8 +15,13 @@ import (
|
||||
func TestExample(t *testing.T) {
|
||||
s := pubsub.NewServer()
|
||||
s.SetLogger(log.TestingLogger())
|
||||
s.Start()
|
||||
defer s.Stop()
|
||||
err := s.Start()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
subscription, err := s.Subscribe(ctx, "example-client", query.MustParse("abci.account.name='John'"))
|
||||
|
||||
@@ -25,7 +25,11 @@ func TestSubscribe(t *testing.T) {
|
||||
s.SetLogger(log.TestingLogger())
|
||||
err := s.Start()
|
||||
require.NoError(t, err)
|
||||
defer s.Stop() //nolint:errcheck // ignore for tests
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
subscription, err := s.Subscribe(ctx, clientID, query.Empty{})
|
||||
@@ -66,7 +70,11 @@ func TestSubscribeWithCapacity(t *testing.T) {
|
||||
s.SetLogger(log.TestingLogger())
|
||||
err := s.Start()
|
||||
require.NoError(t, err)
|
||||
defer s.Stop() //nolint:errcheck // ignore for tests
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
assert.Panics(t, func() {
|
||||
@@ -89,7 +97,11 @@ func TestSubscribeUnbuffered(t *testing.T) {
|
||||
s.SetLogger(log.TestingLogger())
|
||||
err := s.Start()
|
||||
require.NoError(t, err)
|
||||
defer s.Stop() //nolint:errcheck // ignore for tests
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
subscription, err := s.SubscribeUnbuffered(ctx, clientID, query.Empty{})
|
||||
@@ -120,7 +132,11 @@ func TestSlowClientIsRemovedWithErrOutOfCapacity(t *testing.T) {
|
||||
s.SetLogger(log.TestingLogger())
|
||||
err := s.Start()
|
||||
require.NoError(t, err)
|
||||
defer s.Stop() //nolint:errcheck // ignore for tests
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
subscription, err := s.Subscribe(ctx, clientID, query.Empty{})
|
||||
@@ -138,7 +154,11 @@ func TestDifferentClients(t *testing.T) {
|
||||
s.SetLogger(log.TestingLogger())
|
||||
err := s.Start()
|
||||
require.NoError(t, err)
|
||||
defer s.Stop() //nolint:errcheck // ignore for tests
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
subscription1, err := s.Subscribe(ctx, "client-1", query.MustParse("tm.events.type='NewBlock'"))
|
||||
@@ -178,7 +198,11 @@ func TestSubscribeDuplicateKeys(t *testing.T) {
|
||||
s := pubsub.NewServer()
|
||||
s.SetLogger(log.TestingLogger())
|
||||
require.NoError(t, s.Start())
|
||||
defer s.Stop() //nolint:errcheck // ignore for tests
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
testCases := []struct {
|
||||
query string
|
||||
@@ -229,7 +253,11 @@ func TestClientSubscribesTwice(t *testing.T) {
|
||||
s.SetLogger(log.TestingLogger())
|
||||
err := s.Start()
|
||||
require.NoError(t, err)
|
||||
defer s.Stop() //nolint:errcheck // ignore for tests
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
q := query.MustParse("tm.events.type='NewBlock'")
|
||||
@@ -254,7 +282,11 @@ func TestUnsubscribe(t *testing.T) {
|
||||
s.SetLogger(log.TestingLogger())
|
||||
err := s.Start()
|
||||
require.NoError(t, err)
|
||||
defer s.Stop() //nolint:errcheck // ignore for tests
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
subscription, err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"))
|
||||
@@ -274,7 +306,11 @@ func TestClientUnsubscribesTwice(t *testing.T) {
|
||||
s.SetLogger(log.TestingLogger())
|
||||
err := s.Start()
|
||||
require.NoError(t, err)
|
||||
defer s.Stop() //nolint:errcheck // ignore for tests
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
_, err = s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"))
|
||||
@@ -293,7 +329,11 @@ func TestResubscribe(t *testing.T) {
|
||||
s.SetLogger(log.TestingLogger())
|
||||
err := s.Start()
|
||||
require.NoError(t, err)
|
||||
defer s.Stop() //nolint:errcheck // ignore for tests
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
_, err = s.Subscribe(ctx, clientID, query.Empty{})
|
||||
@@ -313,7 +353,11 @@ func TestUnsubscribeAll(t *testing.T) {
|
||||
s.SetLogger(log.TestingLogger())
|
||||
err := s.Start()
|
||||
require.NoError(t, err)
|
||||
defer s.Stop() //nolint:errcheck // ignore for tests
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
subscription1, err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"))
|
||||
@@ -365,7 +409,12 @@ func benchmarkNClients(n int, b *testing.B) {
|
||||
s := pubsub.NewServer()
|
||||
err := s.Start()
|
||||
require.NoError(b, err)
|
||||
defer s.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
b.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
for i := 0; i < n; i++ {
|
||||
@@ -405,7 +454,11 @@ func benchmarkNClientsOneQuery(n int, b *testing.B) {
|
||||
s := pubsub.NewServer()
|
||||
err := s.Start()
|
||||
require.NoError(b, err)
|
||||
defer s.Stop() //nolint:errcheck // ignore for tests
|
||||
b.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
q := query.MustParse("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = 1")
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -149,18 +151,38 @@ func Test_Concurrency(t *testing.T) {
|
||||
go func(i int64) {
|
||||
defer wg.Done()
|
||||
|
||||
dbStore.SaveSignedHeaderAndValidatorSet(
|
||||
&types.SignedHeader{Header: &types.Header{Height: i}}, vals)
|
||||
err := dbStore.SaveSignedHeaderAndValidatorSet(
|
||||
&types.SignedHeader{Header: &types.Header{Height: i,
|
||||
ProposerAddress: tmrand.Bytes(crypto.AddressSize)}}, vals)
|
||||
require.NoError(t, err)
|
||||
|
||||
dbStore.SignedHeader(i)
|
||||
dbStore.ValidatorSet(i)
|
||||
dbStore.LastSignedHeaderHeight()
|
||||
dbStore.FirstSignedHeaderHeight()
|
||||
_, err = dbStore.SignedHeader(i)
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
_, err = dbStore.ValidatorSet(i)
|
||||
if err != nil {
|
||||
t.Log(err) // could not find validator set
|
||||
}
|
||||
_, err = dbStore.LastSignedHeaderHeight()
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
_, err = dbStore.FirstSignedHeaderHeight()
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
|
||||
dbStore.Prune(2)
|
||||
err = dbStore.Prune(2)
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
_ = dbStore.Size()
|
||||
|
||||
dbStore.DeleteSignedHeaderAndValidatorSet(1)
|
||||
err = dbStore.DeleteSignedHeaderAndValidatorSet(1)
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
}(int64(i))
|
||||
}
|
||||
|
||||
|
||||
14
node/node.go
14
node/node.go
@@ -1269,7 +1269,9 @@ func LoadStateFromDBOrGenesisDocProvider(
|
||||
}
|
||||
// save genesis doc to prevent a certain class of user errors (e.g. when it
|
||||
// was changed, accidentally or not). Also good for audit trail.
|
||||
saveGenesisDoc(stateDB, genDoc)
|
||||
if err := saveGenesisDoc(stateDB, genDoc); err != nil {
|
||||
return sm.State{}, nil, err
|
||||
}
|
||||
}
|
||||
state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
||||
if err != nil {
|
||||
@@ -1296,12 +1298,16 @@ func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
|
||||
}
|
||||
|
||||
// panics if failed to marshal the given genesis document
|
||||
func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) {
|
||||
func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) error {
|
||||
b, err := tmjson.Marshal(genDoc)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err))
|
||||
return fmt.Errorf("failed to save genesis doc due to marshaling error: %w", err)
|
||||
}
|
||||
db.SetSync(genesisDocKey, b)
|
||||
if err := db.SetSync(genesisDocKey, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createAndStartPrivValidatorSocketClient(
|
||||
|
||||
@@ -437,8 +437,6 @@ func expectSend(ch chan struct{}) bool {
|
||||
func TestMConnectionReadErrorBadEncoding(t *testing.T) {
|
||||
chOnErr := make(chan struct{})
|
||||
mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr)
|
||||
defer mconnClient.Stop() // nolint:errcheck // ignore for tests
|
||||
defer mconnServer.Stop() // nolint:errcheck // ignore for tests
|
||||
|
||||
client := mconnClient.conn
|
||||
|
||||
@@ -446,13 +444,23 @@ func TestMConnectionReadErrorBadEncoding(t *testing.T) {
|
||||
_, err := client.Write([]byte{1, 2, 3, 4, 5})
|
||||
require.NoError(t, err)
|
||||
assert.True(t, expectSend(chOnErr), "badly encoded msgPacket")
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := mconnClient.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := mconnServer.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestMConnectionReadErrorUnknownChannel(t *testing.T) {
|
||||
chOnErr := make(chan struct{})
|
||||
mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr)
|
||||
defer mconnClient.Stop()
|
||||
defer mconnServer.Stop()
|
||||
|
||||
msg := []byte("Ant-Man")
|
||||
|
||||
@@ -463,6 +471,18 @@ func TestMConnectionReadErrorUnknownChannel(t *testing.T) {
|
||||
// should cause an error
|
||||
assert.True(t, mconnClient.Send(0x02, msg))
|
||||
assert.True(t, expectSend(chOnErr), "unknown channel")
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := mconnClient.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := mconnServer.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestMConnectionReadErrorLongMessage(t *testing.T) {
|
||||
@@ -528,7 +548,8 @@ func TestMConnectionTrySend(t *testing.T) {
|
||||
msg := []byte("Semicolon-Woman")
|
||||
resultCh := make(chan string, 2)
|
||||
assert.True(t, mconn.TrySend(0x01, msg))
|
||||
server.Read(make([]byte, len(msg)))
|
||||
_, err = server.Read(make([]byte, len(msg)))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, mconn.CanSend(0x01))
|
||||
assert.True(t, mconn.TrySend(0x01, msg))
|
||||
assert.False(t, mconn.CanSend(0x01))
|
||||
|
||||
@@ -228,7 +228,8 @@ func TestDeriveSecretsAndChallengeGolden(t *testing.T) {
|
||||
if *update {
|
||||
t.Logf("Updating golden test vector file %s", goldenFilepath)
|
||||
data := createGoldenTestVectors(t)
|
||||
tmos.WriteFile(goldenFilepath, []byte(data), 0644)
|
||||
err := tmos.WriteFile(goldenFilepath, []byte(data), 0644)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
f, err := os.Open(goldenFilepath)
|
||||
if err != nil {
|
||||
@@ -263,7 +264,7 @@ func TestNilPubkey(t *testing.T) {
|
||||
var fooPrvKey = ed25519.GenPrivKey()
|
||||
var barPrvKey = privKeyWithNilPubKey{ed25519.GenPrivKey()}
|
||||
|
||||
go MakeSecretConnection(fooConn, fooPrvKey)
|
||||
go MakeSecretConnection(fooConn, fooPrvKey) //nolint:errcheck // ignore for tests
|
||||
|
||||
_, err := MakeSecretConnection(barConn, barPrvKey)
|
||||
require.Error(t, err)
|
||||
@@ -277,7 +278,7 @@ func TestNonEd25519Pubkey(t *testing.T) {
|
||||
var fooPrvKey = ed25519.GenPrivKey()
|
||||
var barPrvKey = secp256k1.GenPrivKey()
|
||||
|
||||
go MakeSecretConnection(fooConn, fooPrvKey)
|
||||
go MakeSecretConnection(fooConn, fooPrvKey) //nolint:errcheck // ignore for tests
|
||||
|
||||
_, err := MakeSecretConnection(barConn, barPrvKey)
|
||||
require.Error(t, err)
|
||||
|
||||
@@ -200,7 +200,9 @@ func (p *peer) FlushStop() {
|
||||
func (p *peer) OnStop() {
|
||||
p.metricsTicker.Stop()
|
||||
p.BaseService.OnStop()
|
||||
p.mconn.Stop() // stop everything and close the conn
|
||||
if err := p.mconn.Stop(); err != nil { // stop everything and close the conn
|
||||
p.Logger.Error("Error while stopping peer", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
//---------------------------------------------------
|
||||
|
||||
@@ -35,7 +35,8 @@ func TestAddrBookPickAddress(t *testing.T) {
|
||||
|
||||
randAddrs := randNetAddressPairs(t, 1)
|
||||
addrSrc := randAddrs[0]
|
||||
book.AddAddress(addrSrc.addr, addrSrc.src)
|
||||
err := book.AddAddress(addrSrc.addr, addrSrc.src)
|
||||
require.NoError(t, err)
|
||||
|
||||
// pick an address when we only have new address
|
||||
addr = book.PickAddress(0)
|
||||
@@ -68,7 +69,8 @@ func TestAddrBookSaveLoad(t *testing.T) {
|
||||
|
||||
book = NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
book.Start()
|
||||
err := book.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, book.Empty())
|
||||
|
||||
@@ -76,7 +78,8 @@ func TestAddrBookSaveLoad(t *testing.T) {
|
||||
randAddrs := randNetAddressPairs(t, 100)
|
||||
|
||||
for _, addrSrc := range randAddrs {
|
||||
book.AddAddress(addrSrc.addr, addrSrc.src)
|
||||
err := book.AddAddress(addrSrc.addr, addrSrc.src)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, 100, book.Size())
|
||||
@@ -84,7 +87,8 @@ func TestAddrBookSaveLoad(t *testing.T) {
|
||||
|
||||
book = NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
book.Start()
|
||||
err = book.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 100, book.Size())
|
||||
}
|
||||
@@ -100,7 +104,8 @@ func TestAddrBookLookup(t *testing.T) {
|
||||
for _, addrSrc := range randAddrs {
|
||||
addr := addrSrc.addr
|
||||
src := addrSrc.src
|
||||
book.AddAddress(addr, src)
|
||||
err := book.AddAddress(addr, src)
|
||||
require.NoError(t, err)
|
||||
|
||||
ka := book.HasAddress(addr)
|
||||
assert.True(t, ka, "Expected to find KnownAddress %v but wasn't there.", addr)
|
||||
@@ -116,7 +121,8 @@ func TestAddrBookPromoteToOld(t *testing.T) {
|
||||
book := NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
for _, addrSrc := range randAddrs {
|
||||
book.AddAddress(addrSrc.addr, addrSrc.src)
|
||||
err := book.AddAddress(addrSrc.addr, addrSrc.src)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Attempt all addresses.
|
||||
@@ -161,9 +167,12 @@ func TestAddrBookHandlesDuplicates(t *testing.T) {
|
||||
|
||||
differentSrc := randIPv4Address(t)
|
||||
for _, addrSrc := range randAddrs {
|
||||
book.AddAddress(addrSrc.addr, addrSrc.src)
|
||||
book.AddAddress(addrSrc.addr, addrSrc.src) // duplicate
|
||||
book.AddAddress(addrSrc.addr, differentSrc) // different src
|
||||
err := book.AddAddress(addrSrc.addr, addrSrc.src)
|
||||
require.NoError(t, err)
|
||||
err = book.AddAddress(addrSrc.addr, addrSrc.src) // duplicate
|
||||
require.NoError(t, err)
|
||||
err = book.AddAddress(addrSrc.addr, differentSrc) // different src
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, 100, book.Size())
|
||||
@@ -209,7 +218,8 @@ func TestAddrBookRemoveAddress(t *testing.T) {
|
||||
book.SetLogger(log.TestingLogger())
|
||||
|
||||
addr := randIPv4Address(t)
|
||||
book.AddAddress(addr, addr)
|
||||
err := book.AddAddress(addr, addr)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, book.Size())
|
||||
|
||||
book.RemoveAddress(addr)
|
||||
@@ -260,7 +270,8 @@ func TestAddrBookGetSelection(t *testing.T) {
|
||||
|
||||
// 2) add one address
|
||||
addr := randIPv4Address(t)
|
||||
book.AddAddress(addr, addr)
|
||||
err := book.AddAddress(addr, addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, len(book.GetSelection()))
|
||||
assert.Equal(t, addr, book.GetSelection()[0])
|
||||
@@ -268,7 +279,8 @@ func TestAddrBookGetSelection(t *testing.T) {
|
||||
// 3) add a bunch of addresses
|
||||
randAddrs := randNetAddressPairs(t, 100)
|
||||
for _, addrSrc := range randAddrs {
|
||||
book.AddAddress(addrSrc.addr, addrSrc.src)
|
||||
err := book.AddAddress(addrSrc.addr, addrSrc.src)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// check there is no duplicates
|
||||
@@ -301,7 +313,8 @@ func TestAddrBookGetSelectionWithBias(t *testing.T) {
|
||||
|
||||
// 2) add one address
|
||||
addr := randIPv4Address(t)
|
||||
book.AddAddress(addr, addr)
|
||||
err := book.AddAddress(addr, addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
selection = book.GetSelectionWithBias(biasTowardsNewAddrs)
|
||||
assert.Equal(t, 1, len(selection))
|
||||
@@ -310,7 +323,8 @@ func TestAddrBookGetSelectionWithBias(t *testing.T) {
|
||||
// 3) add a bunch of addresses
|
||||
randAddrs := randNetAddressPairs(t, 100)
|
||||
for _, addrSrc := range randAddrs {
|
||||
book.AddAddress(addrSrc.addr, addrSrc.src)
|
||||
err := book.AddAddress(addrSrc.addr, addrSrc.src)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// check there is no duplicates
|
||||
@@ -376,7 +390,8 @@ func TestAddrBookHasAddress(t *testing.T) {
|
||||
book := NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
addr := randIPv4Address(t)
|
||||
book.AddAddress(addr, addr)
|
||||
err := book.AddAddress(addr, addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, book.HasAddress(addr))
|
||||
|
||||
@@ -442,7 +457,8 @@ func TestAddrBookEmpty(t *testing.T) {
|
||||
require.True(t, book.Empty())
|
||||
|
||||
// Check that book with address is not empty
|
||||
book.AddAddress(randIPv4Address(t), randIPv4Address(t))
|
||||
err := book.AddAddress(randIPv4Address(t), randIPv4Address(t))
|
||||
require.NoError(t, err)
|
||||
require.False(t, book.Empty())
|
||||
}
|
||||
|
||||
@@ -675,13 +691,15 @@ func createAddrBookWithMOldAndNNewAddrs(t *testing.T, nOld, nNew int) (book *add
|
||||
|
||||
randAddrs := randNetAddressPairs(t, nOld)
|
||||
for _, addr := range randAddrs {
|
||||
book.AddAddress(addr.addr, addr.src)
|
||||
err := book.AddAddress(addr.addr, addr.src)
|
||||
require.NoError(t, err)
|
||||
book.MarkGood(addr.addr.ID)
|
||||
}
|
||||
|
||||
randAddrs = randNetAddressPairs(t, nNew)
|
||||
for _, addr := range randAddrs {
|
||||
book.AddAddress(addr.addr, addr.src)
|
||||
err := book.AddAddress(addr.addr, addr.src)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
return
|
||||
|
||||
@@ -117,8 +117,16 @@ func initSwitchFunc(i int, sw *Switch) *Switch {
|
||||
|
||||
func TestSwitches(t *testing.T) {
|
||||
s1, s2 := MakeSwitchPair(t, initSwitchFunc)
|
||||
defer s1.Stop()
|
||||
defer s2.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := s1.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := s2.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
if s1.Peers().Size() != 1 {
|
||||
t.Errorf("expected exactly 1 peer in s1, got %v", s1.Peers().Size())
|
||||
@@ -219,12 +227,17 @@ func TestSwitchPeerFilter(t *testing.T) {
|
||||
SwitchPeerFilters(filters...),
|
||||
)
|
||||
)
|
||||
defer sw.Stop()
|
||||
sw.Start()
|
||||
t.Cleanup(func() {
|
||||
if err := sw.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// simulate remote peer
|
||||
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
|
||||
rp.Start()
|
||||
defer rp.Stop()
|
||||
t.Cleanup(rp.Stop)
|
||||
|
||||
p, err := sw.transport.Dial(*rp.Addr(), peerConfig{
|
||||
chDescs: sw.chDescs,
|
||||
@@ -264,7 +277,12 @@ func TestSwitchPeerFilterTimeout(t *testing.T) {
|
||||
SwitchPeerFilters(filters...),
|
||||
)
|
||||
)
|
||||
defer sw.Stop()
|
||||
sw.Start()
|
||||
t.Cleanup(func() {
|
||||
if err := sw.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
|
||||
// simulate remote peer
|
||||
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
|
||||
@@ -289,8 +307,13 @@ func TestSwitchPeerFilterTimeout(t *testing.T) {
|
||||
|
||||
func TestSwitchPeerFilterDuplicate(t *testing.T) {
|
||||
sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
|
||||
sw.Start()
|
||||
defer sw.Stop()
|
||||
err := sw.Start()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
if err := sw.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// simulate remote peer
|
||||
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
|
||||
@@ -336,7 +359,11 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer sw.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := sw.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// simulate remote peer
|
||||
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
|
||||
@@ -404,7 +431,11 @@ func TestSwitchStopPeerForError(t *testing.T) {
|
||||
|
||||
// stop sw2. this should cause the p to fail,
|
||||
// which results in calling StopPeerForError internally
|
||||
sw2.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := sw2.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// now call StopPeerForError explicitly, eg. from a reactor
|
||||
sw1.StopPeerForError(p, fmt.Errorf("some err"))
|
||||
@@ -417,7 +448,11 @@ func TestSwitchReconnectsToOutboundPersistentPeer(t *testing.T) {
|
||||
sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
|
||||
err := sw.Start()
|
||||
require.NoError(t, err)
|
||||
defer sw.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := sw.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// 1. simulate failure by closing connection
|
||||
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
|
||||
@@ -462,7 +497,11 @@ func TestSwitchReconnectsToInboundPersistentPeer(t *testing.T) {
|
||||
sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
|
||||
err := sw.Start()
|
||||
require.NoError(t, err)
|
||||
defer sw.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := sw.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// 1. simulate failure by closing the connection
|
||||
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
|
||||
@@ -491,7 +530,11 @@ func TestSwitchDialPeersAsync(t *testing.T) {
|
||||
sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
|
||||
err := sw.Start()
|
||||
require.NoError(t, err)
|
||||
defer sw.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := sw.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
|
||||
rp.Start()
|
||||
@@ -517,7 +560,12 @@ func TestSwitchFullConnectivity(t *testing.T) {
|
||||
switches := MakeConnectedSwitches(cfg, 3, initSwitchFunc, Connect2Switches)
|
||||
defer func() {
|
||||
for _, sw := range switches {
|
||||
sw.Stop()
|
||||
sw := sw
|
||||
t.Cleanup(func() {
|
||||
if err := sw.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -549,7 +597,11 @@ func TestSwitchAcceptRoutine(t *testing.T) {
|
||||
sw.AddUnconditionalPeerIDs(unconditionalPeerIDs)
|
||||
err := sw.Start()
|
||||
require.NoError(t, err)
|
||||
defer sw.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := sw.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// 0. check there are no peers
|
||||
assert.Equal(t, 0, sw.Peers().Size())
|
||||
@@ -653,7 +705,8 @@ func TestSwitchAcceptRoutineErrorCases(t *testing.T) {
|
||||
assert.NotPanics(t, func() {
|
||||
err := sw.Start()
|
||||
assert.NoError(t, err)
|
||||
sw.Stop()
|
||||
err = sw.Stop()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -739,8 +792,18 @@ func BenchmarkSwitchBroadcast(b *testing.B) {
|
||||
}, false))
|
||||
return sw
|
||||
})
|
||||
defer s1.Stop()
|
||||
defer s2.Stop()
|
||||
|
||||
b.Cleanup(func() {
|
||||
if err := s1.Stop(); err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
b.Cleanup(func() {
|
||||
if err := s2.Stop(); err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Allow time for goroutines to boot up
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
@@ -5,11 +5,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTrustMetricScores(t *testing.T) {
|
||||
tm := NewMetric()
|
||||
tm.Start()
|
||||
err := tm.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Perfect score
|
||||
tm.GoodEvents(1)
|
||||
@@ -20,7 +22,8 @@ func TestTrustMetricScores(t *testing.T) {
|
||||
tm.BadEvents(10)
|
||||
score = tm.TrustScore()
|
||||
assert.NotEqual(t, 100, score)
|
||||
tm.Stop()
|
||||
err = tm.Stop()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestTrustMetricConfig(t *testing.T) {
|
||||
@@ -32,7 +35,8 @@ func TestTrustMetricConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
tm := NewMetricWithConfig(config)
|
||||
tm.Start()
|
||||
err := tm.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// The max time intervals should be the TrackingWindow / IntervalLen
|
||||
assert.Equal(t, int(config.TrackingWindow/config.IntervalLength), tm.maxIntervals)
|
||||
@@ -41,18 +45,21 @@ func TestTrustMetricConfig(t *testing.T) {
|
||||
// These weights should still be the default values
|
||||
assert.Equal(t, dc.ProportionalWeight, tm.proportionalWeight)
|
||||
assert.Equal(t, dc.IntegralWeight, tm.integralWeight)
|
||||
tm.Stop()
|
||||
err = tm.Stop()
|
||||
require.NoError(t, err)
|
||||
tm.Wait()
|
||||
|
||||
config.ProportionalWeight = 0.3
|
||||
config.IntegralWeight = 0.7
|
||||
tm = NewMetricWithConfig(config)
|
||||
tm.Start()
|
||||
err = tm.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// These weights should be equal to our custom values
|
||||
assert.Equal(t, config.ProportionalWeight, tm.proportionalWeight)
|
||||
assert.Equal(t, config.IntegralWeight, tm.integralWeight)
|
||||
tm.Stop()
|
||||
err = tm.Stop()
|
||||
require.NoError(t, err)
|
||||
tm.Wait()
|
||||
}
|
||||
|
||||
@@ -72,7 +79,8 @@ func _TestTrustMetricStopPause(t *testing.T) {
|
||||
tt := NewTestTicker()
|
||||
tm := NewMetric()
|
||||
tm.SetTicker(tt)
|
||||
tm.Start()
|
||||
err := tm.Start()
|
||||
require.NoError(t, err)
|
||||
// Allow some time intervals to pass and pause
|
||||
tt.NextTick()
|
||||
tt.NextTick()
|
||||
@@ -91,7 +99,8 @@ func _TestTrustMetricStopPause(t *testing.T) {
|
||||
// Allow some time intervals to pass and stop
|
||||
tt.NextTick()
|
||||
tt.NextTick()
|
||||
tm.Stop()
|
||||
err = tm.Stop()
|
||||
require.NoError(t, err)
|
||||
tm.Wait()
|
||||
|
||||
second := tm.Copy().numIntervals
|
||||
|
||||
@@ -31,7 +31,8 @@ func TestTrustMetricStoreSaveLoad(t *testing.T) {
|
||||
// Load the data from the file
|
||||
store = NewTrustMetricStore(historyDB, DefaultConfig())
|
||||
store.SetLogger(log.TestingLogger())
|
||||
store.Start()
|
||||
err = store.Start()
|
||||
require.NoError(t, err)
|
||||
// Make sure we still have 0 entries
|
||||
assert.Zero(t, store.Size())
|
||||
|
||||
@@ -48,7 +49,8 @@ func TestTrustMetricStoreSaveLoad(t *testing.T) {
|
||||
tm := NewMetric()
|
||||
|
||||
tm.SetTicker(tt[i])
|
||||
tm.Start()
|
||||
err = tm.Start()
|
||||
require.NoError(t, err)
|
||||
store.AddPeerTrustMetric(key, tm)
|
||||
|
||||
tm.BadEvents(10)
|
||||
@@ -62,12 +64,14 @@ func TestTrustMetricStoreSaveLoad(t *testing.T) {
|
||||
tt[i].NextTick()
|
||||
}
|
||||
// Stop all the trust metrics and save
|
||||
store.Stop()
|
||||
err = store.Stop()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Load the data from the DB
|
||||
store = NewTrustMetricStore(historyDB, DefaultConfig())
|
||||
store.SetLogger(log.TestingLogger())
|
||||
store.Start()
|
||||
err = store.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that we still have 100 peers with imperfect trust values
|
||||
assert.Equal(t, 100, store.Size())
|
||||
@@ -75,7 +79,8 @@ func TestTrustMetricStoreSaveLoad(t *testing.T) {
|
||||
assert.NotEqual(t, 1.0, tm.TrustValue())
|
||||
}
|
||||
|
||||
store.Stop()
|
||||
err = store.Stop()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestTrustMetricStoreConfig(t *testing.T) {
|
||||
@@ -90,7 +95,8 @@ func TestTrustMetricStoreConfig(t *testing.T) {
|
||||
// Create a store with custom config
|
||||
store := NewTrustMetricStore(historyDB, config)
|
||||
store.SetLogger(log.TestingLogger())
|
||||
store.Start()
|
||||
err = store.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Have the store make us a metric with the config
|
||||
tm := store.GetPeerTrustMetric("TestKey")
|
||||
@@ -98,7 +104,8 @@ func TestTrustMetricStoreConfig(t *testing.T) {
|
||||
// Check that the options made it to the metric
|
||||
assert.Equal(t, 0.5, tm.proportionalWeight)
|
||||
assert.Equal(t, 0.5, tm.integralWeight)
|
||||
store.Stop()
|
||||
err = store.Stop()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestTrustMetricStoreLookup(t *testing.T) {
|
||||
@@ -107,7 +114,8 @@ func TestTrustMetricStoreLookup(t *testing.T) {
|
||||
|
||||
store := NewTrustMetricStore(historyDB, DefaultConfig())
|
||||
store.SetLogger(log.TestingLogger())
|
||||
store.Start()
|
||||
err = store.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create 100 peers in the trust metric store
|
||||
for i := 0; i < 100; i++ {
|
||||
@@ -119,7 +127,8 @@ func TestTrustMetricStoreLookup(t *testing.T) {
|
||||
assert.NotNil(t, ktm, "Expected to find TrustMetric %s but wasn't there.", key)
|
||||
}
|
||||
|
||||
store.Stop()
|
||||
err = store.Stop()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestTrustMetricStorePeerScore(t *testing.T) {
|
||||
@@ -128,7 +137,8 @@ func TestTrustMetricStorePeerScore(t *testing.T) {
|
||||
|
||||
store := NewTrustMetricStore(historyDB, DefaultConfig())
|
||||
store.SetLogger(log.TestingLogger())
|
||||
store.Start()
|
||||
err = store.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
key := "TestKey"
|
||||
tm := store.GetPeerTrustMetric(key)
|
||||
@@ -152,5 +162,6 @@ func TestTrustMetricStorePeerScore(t *testing.T) {
|
||||
// We will remember our experiences with this peer
|
||||
tm = store.GetPeerTrustMetric(key)
|
||||
assert.NotEqual(t, 100, tm.TrustScore())
|
||||
store.Stop()
|
||||
err = store.Stop()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -65,8 +65,17 @@ func TestSignerClose(t *testing.T) {
|
||||
|
||||
func TestSignerPing(t *testing.T) {
|
||||
for _, tc := range getSignerTestCases(t) {
|
||||
defer tc.signerServer.Stop()
|
||||
defer tc.signerClient.Close()
|
||||
tc := tc
|
||||
t.Cleanup(func() {
|
||||
if err := tc.signerServer.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := tc.signerClient.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
err := tc.signerClient.Ping()
|
||||
assert.NoError(t, err)
|
||||
@@ -75,8 +84,17 @@ func TestSignerPing(t *testing.T) {
|
||||
|
||||
func TestSignerGetPubKey(t *testing.T) {
|
||||
for _, tc := range getSignerTestCases(t) {
|
||||
defer tc.signerServer.Stop()
|
||||
defer tc.signerClient.Close()
|
||||
tc := tc
|
||||
t.Cleanup(func() {
|
||||
if err := tc.signerServer.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := tc.signerClient.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
pubKey, err := tc.signerClient.GetPubKey()
|
||||
require.NoError(t, err)
|
||||
@@ -116,8 +134,17 @@ func TestSignerProposal(t *testing.T) {
|
||||
Timestamp: ts,
|
||||
}
|
||||
|
||||
defer tc.signerServer.Stop()
|
||||
defer tc.signerClient.Close()
|
||||
tc := tc
|
||||
t.Cleanup(func() {
|
||||
if err := tc.signerServer.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := tc.signerClient.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
require.NoError(t, tc.mockPV.SignProposal(tc.chainID, want.ToProto()))
|
||||
require.NoError(t, tc.signerClient.SignProposal(tc.chainID, have.ToProto()))
|
||||
@@ -151,8 +178,17 @@ func TestSignerVote(t *testing.T) {
|
||||
ValidatorIndex: 1,
|
||||
}
|
||||
|
||||
defer tc.signerServer.Stop()
|
||||
defer tc.signerClient.Close()
|
||||
tc := tc
|
||||
t.Cleanup(func() {
|
||||
if err := tc.signerServer.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := tc.signerClient.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
require.NoError(t, tc.mockPV.SignVote(tc.chainID, want.ToProto()))
|
||||
require.NoError(t, tc.signerClient.SignVote(tc.chainID, have.ToProto()))
|
||||
@@ -186,8 +222,17 @@ func TestSignerVoteResetDeadline(t *testing.T) {
|
||||
ValidatorIndex: 1,
|
||||
}
|
||||
|
||||
defer tc.signerServer.Stop()
|
||||
defer tc.signerClient.Close()
|
||||
tc := tc
|
||||
t.Cleanup(func() {
|
||||
if err := tc.signerServer.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := tc.signerClient.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
time.Sleep(testTimeoutReadWrite2o3)
|
||||
|
||||
@@ -231,8 +276,17 @@ func TestSignerVoteKeepAlive(t *testing.T) {
|
||||
ValidatorIndex: 1,
|
||||
}
|
||||
|
||||
defer tc.signerServer.Stop()
|
||||
defer tc.signerClient.Close()
|
||||
tc := tc
|
||||
t.Cleanup(func() {
|
||||
if err := tc.signerServer.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := tc.signerClient.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Check that even if the client does not request a
|
||||
// signature for a long time. The service is still available
|
||||
@@ -256,8 +310,17 @@ func TestSignerSignProposalErrors(t *testing.T) {
|
||||
tc.signerServer.privVal = types.NewErroringMockPV()
|
||||
tc.mockPV = types.NewErroringMockPV()
|
||||
|
||||
defer tc.signerServer.Stop()
|
||||
defer tc.signerClient.Close()
|
||||
tc := tc
|
||||
t.Cleanup(func() {
|
||||
if err := tc.signerServer.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := tc.signerClient.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
ts := time.Now()
|
||||
hash := tmrand.Bytes(tmhash.Size)
|
||||
@@ -302,8 +365,17 @@ func TestSignerSignVoteErrors(t *testing.T) {
|
||||
tc.signerServer.privVal = types.NewErroringMockPV()
|
||||
tc.mockPV = types.NewErroringMockPV()
|
||||
|
||||
defer tc.signerServer.Stop()
|
||||
defer tc.signerClient.Close()
|
||||
tc := tc
|
||||
t.Cleanup(func() {
|
||||
if err := tc.signerServer.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := tc.signerClient.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
err := tc.signerClient.SignVote(tc.chainID, vote.ToProto())
|
||||
require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error())
|
||||
@@ -345,8 +417,17 @@ func TestSignerUnexpectedResponse(t *testing.T) {
|
||||
|
||||
tc.signerServer.SetRequestHandler(brokenHandler)
|
||||
|
||||
defer tc.signerServer.Stop()
|
||||
defer tc.signerClient.Close()
|
||||
tc := tc
|
||||
t.Cleanup(func() {
|
||||
if err := tc.signerServer.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := tc.signerClient.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
ts := time.Now()
|
||||
want := &types.Vote{Timestamp: ts, Type: tmproto.PrecommitType}
|
||||
|
||||
@@ -55,7 +55,11 @@ func TestEcho(t *testing.T) {
|
||||
if err := s.Start(); err != nil {
|
||||
t.Fatalf("Error starting socket server: %v", err.Error())
|
||||
}
|
||||
defer s.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Start client
|
||||
cli, err := clientCreator.NewABCIClient()
|
||||
@@ -89,7 +93,11 @@ func BenchmarkEcho(b *testing.B) {
|
||||
if err := s.Start(); err != nil {
|
||||
b.Fatalf("Error starting socket server: %v", err.Error())
|
||||
}
|
||||
defer s.Stop()
|
||||
b.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Start client
|
||||
cli, err := clientCreator.NewABCIClient()
|
||||
@@ -128,7 +136,11 @@ func TestInfo(t *testing.T) {
|
||||
if err := s.Start(); err != nil {
|
||||
t.Fatalf("Error starting socket server: %v", err.Error())
|
||||
}
|
||||
defer s.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Start client
|
||||
cli, err := clientCreator.NewABCIClient()
|
||||
|
||||
@@ -17,7 +17,11 @@ func TestUnsafeDialSeeds(t *testing.T) {
|
||||
func(n int, sw *p2p.Switch) *p2p.Switch { return sw })
|
||||
err := sw.Start()
|
||||
require.NoError(t, err)
|
||||
defer sw.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := sw.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
env.Logger = log.TestingLogger()
|
||||
env.P2PPeers = sw
|
||||
@@ -47,7 +51,11 @@ func TestUnsafeDialPeers(t *testing.T) {
|
||||
func(n int, sw *p2p.Switch) *p2p.Switch { return sw })
|
||||
err := sw.Start()
|
||||
require.NoError(t, err)
|
||||
defer sw.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := sw.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
env.Logger = log.TestingLogger()
|
||||
env.P2PPeers = sw
|
||||
|
||||
@@ -287,7 +287,8 @@ func TestServersAndClientsBasic(t *testing.T) {
|
||||
require.Nil(t, err)
|
||||
fmt.Printf("=== testing server on %s using WS client", addr)
|
||||
testWithWSClient(t, cl3)
|
||||
cl3.Stop()
|
||||
err = cl3.Stop()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -317,7 +318,11 @@ func TestWSNewWSRPCFunc(t *testing.T) {
|
||||
cl.SetLogger(log.TestingLogger())
|
||||
err = cl.Start()
|
||||
require.Nil(t, err)
|
||||
defer cl.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := cl.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
val := testVal
|
||||
params := map[string]interface{}{
|
||||
@@ -343,7 +348,11 @@ func TestWSHandlesArrayParams(t *testing.T) {
|
||||
cl.SetLogger(log.TestingLogger())
|
||||
err = cl.Start()
|
||||
require.Nil(t, err)
|
||||
defer cl.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := cl.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
val := testVal
|
||||
params := []interface{}{val}
|
||||
@@ -369,7 +378,11 @@ func TestWSClientPingPong(t *testing.T) {
|
||||
cl.SetLogger(log.TestingLogger())
|
||||
err = cl.Start()
|
||||
require.Nil(t, err)
|
||||
defer cl.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := cl.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
time.Sleep(6 * time.Second)
|
||||
}
|
||||
|
||||
@@ -141,7 +141,9 @@ func StartTendermint(app abci.Application, opts ...func(*Options)) *nm.Node {
|
||||
// StopTendermint stops a test tendermint server, waits until it's stopped and
|
||||
// cleans up test/config files.
|
||||
func StopTendermint(node *nm.Node) {
|
||||
node.Stop()
|
||||
if err := node.Stop(); err != nil {
|
||||
node.Logger.Error("Error when tryint to stop node", "err", err)
|
||||
}
|
||||
node.Wait()
|
||||
os.RemoveAll(node.Config().RootDir)
|
||||
}
|
||||
|
||||
@@ -187,10 +187,14 @@ func TestTxSearchDeprecatedIndexing(t *testing.T) {
|
||||
txResult2.Index,
|
||||
))
|
||||
|
||||
b.Set(depKey, hash2)
|
||||
b.Set(keyForHeight(txResult2), hash2)
|
||||
b.Set(hash2, rawBytes)
|
||||
b.Write()
|
||||
err = b.Set(depKey, hash2)
|
||||
require.NoError(t, err)
|
||||
err = b.Set(keyForHeight(txResult2), hash2)
|
||||
require.NoError(t, err)
|
||||
err = b.Set(hash2, rawBytes)
|
||||
require.NoError(t, err)
|
||||
err = b.Write()
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
q string
|
||||
|
||||
@@ -63,7 +63,11 @@ func TestReactor_Receive_ChunkRequest(t *testing.T) {
|
||||
r := NewReactor(conn, nil, "")
|
||||
err := r.Start()
|
||||
require.NoError(t, err)
|
||||
defer r.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := r.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
r.Receive(ChunkChannel, peer, mustEncodeMsg(tc.request))
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
@@ -136,7 +140,11 @@ func TestReactor_Receive_SnapshotsRequest(t *testing.T) {
|
||||
r := NewReactor(conn, nil, "")
|
||||
err := r.Start()
|
||||
require.NoError(t, err)
|
||||
defer r.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := r.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
r.Receive(SnapshotChannel, peer, mustEncodeMsg(&ssproto.SnapshotsRequest{}))
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
@@ -218,12 +218,13 @@ func TestSyncer_SyncAny_abort(t *testing.T) {
|
||||
syncer, connSnapshot := setupOfferSyncer(t)
|
||||
|
||||
s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
|
||||
syncer.AddSnapshot(simplePeer("id"), s)
|
||||
_, err := syncer.AddSnapshot(simplePeer("id"), s)
|
||||
require.NoError(t, err)
|
||||
connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{
|
||||
Snapshot: toABCI(s), AppHash: []byte("app_hash"),
|
||||
}).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil)
|
||||
|
||||
_, _, err := syncer.SyncAny(0)
|
||||
_, _, err = syncer.SyncAny(0)
|
||||
assert.Equal(t, errAbort, err)
|
||||
connSnapshot.AssertExpectations(t)
|
||||
}
|
||||
@@ -235,9 +236,12 @@ func TestSyncer_SyncAny_reject(t *testing.T) {
|
||||
s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}}
|
||||
s12 := &snapshot{Height: 1, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}}
|
||||
s11 := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
|
||||
syncer.AddSnapshot(simplePeer("id"), s22)
|
||||
syncer.AddSnapshot(simplePeer("id"), s12)
|
||||
syncer.AddSnapshot(simplePeer("id"), s11)
|
||||
_, err := syncer.AddSnapshot(simplePeer("id"), s22)
|
||||
require.NoError(t, err)
|
||||
_, err = syncer.AddSnapshot(simplePeer("id"), s12)
|
||||
require.NoError(t, err)
|
||||
_, err = syncer.AddSnapshot(simplePeer("id"), s11)
|
||||
require.NoError(t, err)
|
||||
|
||||
connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{
|
||||
Snapshot: toABCI(s22), AppHash: []byte("app_hash"),
|
||||
@@ -251,7 +255,7 @@ func TestSyncer_SyncAny_reject(t *testing.T) {
|
||||
Snapshot: toABCI(s11), AppHash: []byte("app_hash"),
|
||||
}).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil)
|
||||
|
||||
_, _, err := syncer.SyncAny(0)
|
||||
_, _, err = syncer.SyncAny(0)
|
||||
assert.Equal(t, errNoSnapshots, err)
|
||||
connSnapshot.AssertExpectations(t)
|
||||
}
|
||||
@@ -263,9 +267,12 @@ func TestSyncer_SyncAny_reject_format(t *testing.T) {
|
||||
s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}}
|
||||
s12 := &snapshot{Height: 1, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}}
|
||||
s11 := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
|
||||
syncer.AddSnapshot(simplePeer("id"), s22)
|
||||
syncer.AddSnapshot(simplePeer("id"), s12)
|
||||
syncer.AddSnapshot(simplePeer("id"), s11)
|
||||
_, err := syncer.AddSnapshot(simplePeer("id"), s22)
|
||||
require.NoError(t, err)
|
||||
_, err = syncer.AddSnapshot(simplePeer("id"), s12)
|
||||
require.NoError(t, err)
|
||||
_, err = syncer.AddSnapshot(simplePeer("id"), s11)
|
||||
require.NoError(t, err)
|
||||
|
||||
connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{
|
||||
Snapshot: toABCI(s22), AppHash: []byte("app_hash"),
|
||||
@@ -275,7 +282,7 @@ func TestSyncer_SyncAny_reject_format(t *testing.T) {
|
||||
Snapshot: toABCI(s11), AppHash: []byte("app_hash"),
|
||||
}).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil)
|
||||
|
||||
_, _, err := syncer.SyncAny(0)
|
||||
_, _, err = syncer.SyncAny(0)
|
||||
assert.Equal(t, errAbort, err)
|
||||
connSnapshot.AssertExpectations(t)
|
||||
}
|
||||
@@ -323,12 +330,13 @@ func TestSyncer_SyncAny_abciError(t *testing.T) {
|
||||
|
||||
errBoom := errors.New("boom")
|
||||
s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
|
||||
syncer.AddSnapshot(simplePeer("id"), s)
|
||||
_, err := syncer.AddSnapshot(simplePeer("id"), s)
|
||||
require.NoError(t, err)
|
||||
connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{
|
||||
Snapshot: toABCI(s), AppHash: []byte("app_hash"),
|
||||
}).Once().Return(nil, errBoom)
|
||||
|
||||
_, _, err := syncer.SyncAny(0)
|
||||
_, _, err = syncer.SyncAny(0)
|
||||
assert.True(t, errors.Is(err, errBoom))
|
||||
connSnapshot.AssertExpectations(t)
|
||||
}
|
||||
@@ -403,7 +411,8 @@ func TestSyncer_applyChunks_Results(t *testing.T) {
|
||||
|
||||
body := []byte{1, 2, 3}
|
||||
chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 1}, "")
|
||||
chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: body})
|
||||
require.NoError(t, err)
|
||||
_, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: body})
|
||||
require.NoError(t, err)
|
||||
|
||||
connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{
|
||||
@@ -481,7 +490,7 @@ func TestSyncer_applyChunks_RefetchChunks(t *testing.T) {
|
||||
// check the queue contents, and finally close the queue to end the goroutine.
|
||||
// We don't really care about the result of applyChunks, since it has separate test.
|
||||
go func() {
|
||||
syncer.applyChunks(chunks)
|
||||
syncer.applyChunks(chunks) //nolint:errcheck // purposefully ignore error
|
||||
}()
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
@@ -522,12 +531,18 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) {
|
||||
|
||||
s1 := &snapshot{Height: 1, Format: 1, Chunks: 3}
|
||||
s2 := &snapshot{Height: 2, Format: 1, Chunks: 3}
|
||||
syncer.AddSnapshot(peerA, s1)
|
||||
syncer.AddSnapshot(peerA, s2)
|
||||
syncer.AddSnapshot(peerB, s1)
|
||||
syncer.AddSnapshot(peerB, s2)
|
||||
syncer.AddSnapshot(peerC, s1)
|
||||
syncer.AddSnapshot(peerC, s2)
|
||||
_, err := syncer.AddSnapshot(peerA, s1)
|
||||
require.NoError(t, err)
|
||||
_, err = syncer.AddSnapshot(peerA, s2)
|
||||
require.NoError(t, err)
|
||||
_, err = syncer.AddSnapshot(peerB, s1)
|
||||
require.NoError(t, err)
|
||||
_, err = syncer.AddSnapshot(peerB, s2)
|
||||
require.NoError(t, err)
|
||||
_, err = syncer.AddSnapshot(peerC, s1)
|
||||
require.NoError(t, err)
|
||||
_, err = syncer.AddSnapshot(peerC, s2)
|
||||
require.NoError(t, err)
|
||||
|
||||
chunks, err := newChunkQueue(s1, "")
|
||||
require.NoError(t, err)
|
||||
@@ -566,7 +581,7 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) {
|
||||
// However, it will block on e.g. retry result, so we spawn a goroutine that will
|
||||
// be shut down when the chunk queue closes.
|
||||
go func() {
|
||||
syncer.applyChunks(chunks)
|
||||
syncer.applyChunks(chunks) //nolint:errcheck // purposefully ignore error
|
||||
}()
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
Reference in New Issue
Block a user