mirror of
https://github.com/tendermint/tendermint.git
synced 2026-05-01 12:55:44 +00:00
lint: add errchecks (#5316)
## Description Work towards enabling errcheck ref #5059
This commit is contained in:
@@ -99,7 +99,9 @@ func (cli *grpcClient) StopForError(err error) {
|
||||
cli.mtx.Unlock()
|
||||
|
||||
cli.Logger.Error(fmt.Sprintf("Stopping abci.grpcClient for error: %v", err.Error()))
|
||||
cli.Stop()
|
||||
if err := cli.Stop(); err != nil {
|
||||
cli.Logger.Error("Error stopping abci.grpcClient", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *grpcClient) Error() error {
|
||||
|
||||
@@ -413,7 +413,9 @@ func (cli *socketClient) LoadSnapshotChunkSync(
|
||||
func (cli *socketClient) ApplySnapshotChunkSync(
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestApplySnapshotChunk(req))
|
||||
cli.FlushSync()
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetApplySnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
@@ -500,5 +502,7 @@ func (cli *socketClient) stopForError(err error) {
|
||||
cli.mtx.Unlock()
|
||||
|
||||
cli.Logger.Error(fmt.Sprintf("Stopping abci.socketClient for error: %v", err.Error()))
|
||||
cli.Stop()
|
||||
if err := cli.Stop(); err != nil {
|
||||
cli.Logger.Error("Error stopping abci.socketClient", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,15 +59,26 @@ func testCounter() {
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatalf("starting %q err: %v", abciApp, err)
|
||||
}
|
||||
defer cmd.Wait()
|
||||
defer cmd.Process.Kill()
|
||||
defer func() {
|
||||
if err := cmd.Wait(); err != nil {
|
||||
log.Printf("error while waiting for cmd to exit: %v", err)
|
||||
}
|
||||
|
||||
if err := cmd.Process.Kill(); err != nil {
|
||||
log.Printf("error on process kill: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil {
|
||||
log.Fatalf("echo failed: %v", err)
|
||||
}
|
||||
|
||||
client := startClient(abciType)
|
||||
defer client.Stop()
|
||||
defer func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
log.Printf("error trying client stop: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
setOption(client, "serial", "on")
|
||||
commit(client, nil)
|
||||
|
||||
@@ -20,7 +20,9 @@ func TestMockReporter(t *testing.T) {
|
||||
}
|
||||
|
||||
badMessage := bh.BadMessage(peerID, "bad message")
|
||||
pr.Report(badMessage)
|
||||
if err := pr.Report(badMessage); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
behaviours = pr.GetBehaviours(peerID)
|
||||
if len(behaviours) != 1 {
|
||||
t.Error("Expected the peer have one reported behaviour")
|
||||
@@ -164,7 +166,9 @@ func TestMockPeerBehaviourReporterConcurrency(t *testing.T) {
|
||||
for {
|
||||
select {
|
||||
case pb := <-scriptItems:
|
||||
pr.Report(pb.behaviour)
|
||||
if err := pr.Report(pb.behaviour); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ func zipDir(src, dest string) error {
|
||||
dirName := filepath.Base(dest)
|
||||
baseDir := strings.TrimSuffix(dirName, filepath.Ext(dirName))
|
||||
|
||||
filepath.Walk(src, func(path string, info os.FileInfo, err error) error {
|
||||
return filepath.Walk(src, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -69,7 +69,6 @@ func zipDir(src, dest string) error {
|
||||
return err
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyFile copies a file from src to dest and returns an error upon failure. The
|
||||
|
||||
@@ -58,7 +58,9 @@ func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logg
|
||||
logger.Error("Error removing all blockchain history", "dir", dbDir, "err", err)
|
||||
}
|
||||
// recreate the dbDir since the privVal state needs to live there
|
||||
tmos.EnsureDir(dbDir, 0700)
|
||||
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
|
||||
logger.Error("unable to recreate dbDir", "err", err)
|
||||
}
|
||||
resetFilePV(privValKeyFile, privValStateFile, logger)
|
||||
}
|
||||
|
||||
|
||||
@@ -120,7 +120,9 @@ func NewRunNodeCmd(nodeProvider nm.Provider) *cobra.Command {
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
if n.IsRunning() {
|
||||
n.Stop()
|
||||
if err := n.Stop(); err != nil {
|
||||
logger.Error("unable to stop the node", "error", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -84,7 +84,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
|
||||
eventBus := types.NewEventBus()
|
||||
eventBus.SetLogger(log.TestingLogger().With("module", "events"))
|
||||
eventBus.Start()
|
||||
err = eventBus.Start()
|
||||
require.NoError(t, err)
|
||||
cs.SetEventBus(eventBus)
|
||||
|
||||
cs.SetTimeoutTicker(tickerFunc())
|
||||
@@ -254,9 +255,11 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
|
||||
defer func() {
|
||||
for _, r := range reactors {
|
||||
if rr, ok := r.(*ByzantineReactor); ok {
|
||||
rr.reactor.Switch.Stop()
|
||||
err := rr.reactor.Switch.Stop()
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
r.(*Reactor).Switch.Stop()
|
||||
err := r.(*Reactor).Switch.Stop()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -398,7 +398,10 @@ func newStateWithConfigAndBlockStore(
|
||||
|
||||
eventBus := types.NewEventBus()
|
||||
eventBus.SetLogger(log.TestingLogger().With("module", "events"))
|
||||
eventBus.Start()
|
||||
err := eventBus.Start()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
cs.SetEventBus(eventBus)
|
||||
return cs
|
||||
}
|
||||
|
||||
@@ -83,7 +83,10 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, sw
|
||||
PartSetHeader: types.PartSetHeader{Total: 1, Hash: tmrand.Bytes(32)}},
|
||||
}
|
||||
p := precommit.ToProto()
|
||||
cs.privValidator.SignVote(cs.state.ChainID, p)
|
||||
err = cs.privValidator.SignVote(cs.state.ChainID, p)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
precommit.Signature = p.Signature
|
||||
cs.privValidator = nil // disable priv val so we don't do normal votes
|
||||
cs.mtx.Unlock()
|
||||
|
||||
@@ -89,11 +89,15 @@ func stopConsensusNet(logger log.Logger, reactors []*Reactor, eventBuses []*type
|
||||
logger.Info("stopConsensusNet", "n", len(reactors))
|
||||
for i, r := range reactors {
|
||||
logger.Info("stopConsensusNet: Stopping Reactor", "i", i)
|
||||
r.Switch.Stop()
|
||||
if err := r.Switch.Stop(); err != nil {
|
||||
logger.Error("error trying to stop switch", "error", err)
|
||||
}
|
||||
}
|
||||
for i, b := range eventBuses {
|
||||
logger.Info("stopConsensusNet: Stopping eventBus", "i", i)
|
||||
b.Stop()
|
||||
if err := b.Stop(); err != nil {
|
||||
logger.Error("error trying to stop eventbus", "error", err)
|
||||
}
|
||||
}
|
||||
logger.Info("stopConsensusNet: DONE", "n", len(reactors))
|
||||
}
|
||||
@@ -167,7 +171,8 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
|
||||
eventBus := types.NewEventBus()
|
||||
eventBus.SetLogger(log.TestingLogger().With("module", "events"))
|
||||
eventBus.Start()
|
||||
err := eventBus.Start()
|
||||
require.NoError(t, err)
|
||||
cs.SetEventBus(eventBus)
|
||||
|
||||
cs.SetTimeoutTicker(tickerFunc())
|
||||
@@ -670,7 +675,8 @@ func timeoutWaitGroup(t *testing.T, n int, f func(int), css []*State) {
|
||||
t.Log("")
|
||||
}
|
||||
os.Stdout.Write([]byte("pprof.Lookup('goroutine'):\n"))
|
||||
pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
||||
err := pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
||||
require.NoError(t, err)
|
||||
capture()
|
||||
panic("Timed out waiting for all validators to commit a block")
|
||||
}
|
||||
|
||||
@@ -107,7 +107,9 @@ func sendTxs(ctx context.Context, cs *State) {
|
||||
return
|
||||
default:
|
||||
tx := []byte{byte(i)}
|
||||
assertMempool(cs.txNotifier).CheckTx(tx, nil, mempl.TxInfo{})
|
||||
if err := assertMempool(cs.txNotifier).CheckTx(tx, nil, mempl.TxInfo{}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
@@ -196,7 +198,7 @@ LOOP:
|
||||
startNewStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateDB)
|
||||
|
||||
// stop consensus state and transactions sender (initFn)
|
||||
cs.Stop() // Logging this error causes failure
|
||||
cs.Stop() //nolint:errcheck // Logging this error causes failure
|
||||
cancel()
|
||||
|
||||
// if we reached the required height, exit
|
||||
@@ -683,7 +685,11 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
|
||||
wal.SetLogger(log.TestingLogger())
|
||||
err = wal.Start()
|
||||
require.NoError(t, err)
|
||||
defer wal.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := wal.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
chain, commits, err = makeBlockchainFromWAL(wal)
|
||||
require.NoError(t, err)
|
||||
pubKey, err := privVal.GetPubKey()
|
||||
@@ -730,7 +736,11 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
|
||||
t.Fatalf("Error starting proxy app connections: %v", err)
|
||||
}
|
||||
|
||||
defer proxyApp.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := proxyApp.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
err := handshaker.Handshake(proxyApp)
|
||||
if expectError {
|
||||
@@ -896,7 +906,11 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
|
||||
proxyApp := proxy.NewAppConns(clientCreator)
|
||||
err := proxyApp.Start()
|
||||
require.NoError(t, err)
|
||||
defer proxyApp.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := proxyApp.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
assert.Panics(t, func() {
|
||||
h := NewHandshaker(stateDB, state, store, genDoc)
|
||||
@@ -916,7 +930,11 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
|
||||
proxyApp := proxy.NewAppConns(clientCreator)
|
||||
err := proxyApp.Start()
|
||||
require.NoError(t, err)
|
||||
defer proxyApp.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := proxyApp.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
assert.Panics(t, func() {
|
||||
h := NewHandshaker(stateDB, state, store, genDoc)
|
||||
@@ -1211,7 +1229,11 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
t.Fatalf("Error starting proxy app connections: %v", err)
|
||||
}
|
||||
defer proxyApp.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := proxyApp.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
if err := handshaker.Handshake(proxyApp); err != nil {
|
||||
t.Fatalf("Error on abci handshake: %v", err)
|
||||
}
|
||||
|
||||
@@ -405,8 +405,12 @@ func (cs *State) loadWalFile() error {
|
||||
|
||||
// OnStop implements service.Service.
|
||||
func (cs *State) OnStop() {
|
||||
cs.evsw.Stop()
|
||||
cs.timeoutTicker.Stop()
|
||||
if err := cs.evsw.Stop(); err != nil {
|
||||
cs.Logger.Error("error trying to stop eventSwitch", "error", err)
|
||||
}
|
||||
if err := cs.timeoutTicker.Stop(); err != nil {
|
||||
cs.Logger.Error("error trying to stop timeoutTicket", "error", err)
|
||||
}
|
||||
// WAL is stopped in receiveRoutine.
|
||||
}
|
||||
|
||||
@@ -678,7 +682,9 @@ func (cs *State) receiveRoutine(maxSteps int) {
|
||||
// priv_val tracks LastSig
|
||||
|
||||
// close wal now that we're done writing to it
|
||||
cs.wal.Stop()
|
||||
if err := cs.wal.Stop(); err != nil {
|
||||
cs.Logger.Error("error trying to stop wal", "error", err)
|
||||
}
|
||||
cs.wal.Wait()
|
||||
|
||||
close(cs.done)
|
||||
|
||||
@@ -248,11 +248,15 @@ func TestStateFullRound1(t *testing.T) {
|
||||
|
||||
// NOTE: buffer capacity of 0 ensures we can validate prevote and last commit
|
||||
// before consensus can move to the next height (and cause a race condition)
|
||||
cs.eventBus.Stop()
|
||||
if err := cs.eventBus.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
eventBus := types.NewEventBusWithBufferCapacity(0)
|
||||
eventBus.SetLogger(log.TestingLogger().With("module", "events"))
|
||||
cs.SetEventBus(eventBus)
|
||||
eventBus.Start()
|
||||
if err := eventBus.Start(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote)
|
||||
propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal)
|
||||
|
||||
@@ -126,7 +126,9 @@ func (wal *BaseWAL) OnStart() error {
|
||||
if err != nil {
|
||||
return err
|
||||
} else if size == 0 {
|
||||
wal.WriteSync(EndHeightMessage{0})
|
||||
if err := wal.WriteSync(EndHeightMessage{0}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = wal.group.Start()
|
||||
if err != nil {
|
||||
@@ -161,8 +163,12 @@ func (wal *BaseWAL) FlushAndSync() error {
|
||||
// before cleaning up files.
|
||||
func (wal *BaseWAL) OnStop() {
|
||||
wal.flushTicker.Stop()
|
||||
wal.FlushAndSync()
|
||||
wal.group.Stop()
|
||||
if err := wal.FlushAndSync(); err != nil {
|
||||
wal.Logger.Error("error on flush data to disk", "error", err)
|
||||
}
|
||||
if err := wal.group.Stop(); err != nil {
|
||||
wal.Logger.Error("error trying to stop wal", "error", err)
|
||||
}
|
||||
wal.group.Close()
|
||||
}
|
||||
|
||||
|
||||
@@ -61,14 +61,22 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start proxy app connections: %w", err)
|
||||
}
|
||||
defer proxyApp.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := proxyApp.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
eventBus := types.NewEventBus()
|
||||
eventBus.SetLogger(logger.With("module", "events"))
|
||||
if err := eventBus.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start event bus: %w", err)
|
||||
}
|
||||
defer eventBus.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := eventBus.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
mempool := emptyMempool{}
|
||||
evpool := emptyEvidencePool{}
|
||||
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
|
||||
@@ -85,7 +93,10 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||
numBlocksWritten := make(chan struct{})
|
||||
wal := newByteBufferWAL(logger, NewWALEncoder(wr), int64(numBlocks), numBlocksWritten)
|
||||
// see wal.go#103
|
||||
wal.Write(EndHeightMessage{0})
|
||||
if err := wal.Write(EndHeightMessage{0}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
consensusState.wal = wal
|
||||
|
||||
if err := consensusState.Start(); err != nil {
|
||||
@@ -94,10 +105,14 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||
|
||||
select {
|
||||
case <-numBlocksWritten:
|
||||
consensusState.Stop()
|
||||
if err := consensusState.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
return nil
|
||||
case <-time.After(1 * time.Minute):
|
||||
consensusState.Stop()
|
||||
if err := consensusState.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
return fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,7 +46,9 @@ func TestWALTruncate(t *testing.T) {
|
||||
err = wal.Start()
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
wal.Stop()
|
||||
if err := wal.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
// wait for the wal to finish shutting down so we
|
||||
// can safely remove the directory
|
||||
wal.Wait()
|
||||
@@ -60,7 +62,9 @@ func TestWALTruncate(t *testing.T) {
|
||||
|
||||
time.Sleep(1 * time.Millisecond) //wait groupCheckDuration, make sure RotateFile run
|
||||
|
||||
wal.FlushAndSync()
|
||||
if err := wal.FlushAndSync(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
h := int64(50)
|
||||
gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{})
|
||||
@@ -115,7 +119,9 @@ func TestWALWrite(t *testing.T) {
|
||||
err = wal.Start()
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
wal.Stop()
|
||||
if err := wal.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
// wait for the wal to finish shutting down so we
|
||||
// can safely remove the directory
|
||||
wal.Wait()
|
||||
@@ -191,7 +197,9 @@ func TestWALPeriodicSync(t *testing.T) {
|
||||
|
||||
require.NoError(t, wal.Start())
|
||||
defer func() {
|
||||
wal.Stop()
|
||||
if err := wal.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
wal.Wait()
|
||||
}()
|
||||
|
||||
@@ -236,7 +244,9 @@ func benchmarkWalDecode(b *testing.B, n int) {
|
||||
enc := NewWALEncoder(buf)
|
||||
|
||||
data := nBytes(n)
|
||||
enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second).UTC()})
|
||||
if err := enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second).UTC()}); err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
|
||||
encoded := buf.Bytes()
|
||||
|
||||
|
||||
@@ -6,6 +6,6 @@ import (
|
||||
|
||||
func Sha256(bytes []byte) []byte {
|
||||
hasher := sha256.New()
|
||||
hasher.Write(bytes)
|
||||
hasher.Write(bytes) //nolint:errcheck // ignore error
|
||||
return hasher.Sum(nil)
|
||||
}
|
||||
|
||||
@@ -80,13 +80,13 @@ func (op ValueOp) Run(args [][]byte) ([][]byte, error) {
|
||||
}
|
||||
value := args[0]
|
||||
hasher := tmhash.New()
|
||||
hasher.Write(value) // does not error
|
||||
hasher.Write(value) //nolint: errcheck // does not error
|
||||
vhash := hasher.Sum(nil)
|
||||
|
||||
bz := new(bytes.Buffer)
|
||||
// Wrap <op.Key, vhash> to hash the KVPair.
|
||||
encodeByteSlice(bz, op.key) // does not error
|
||||
encodeByteSlice(bz, vhash) // does not error
|
||||
encodeByteSlice(bz, op.key) //nolint: errcheck // does not error
|
||||
encodeByteSlice(bz, vhash) //nolint: errcheck // does not error
|
||||
kvhash := leafHash(bz.Bytes())
|
||||
|
||||
if !bytes.Equal(kvhash, op.Proof.LeafHash) {
|
||||
|
||||
@@ -23,10 +23,22 @@ func TestRandom(t *testing.T) {
|
||||
pl := mr.Intn(16384)
|
||||
ad := make([]byte, al)
|
||||
plaintext := make([]byte, pl)
|
||||
cr.Read(key[:])
|
||||
cr.Read(nonce[:])
|
||||
cr.Read(ad)
|
||||
cr.Read(plaintext)
|
||||
_, err := cr.Read(key[:])
|
||||
if err != nil {
|
||||
t.Errorf("error on read: %w", err)
|
||||
}
|
||||
_, err = cr.Read(nonce[:])
|
||||
if err != nil {
|
||||
t.Errorf("error on read: %w", err)
|
||||
}
|
||||
_, err = cr.Read(ad)
|
||||
if err != nil {
|
||||
t.Errorf("error on read: %w", err)
|
||||
}
|
||||
_, err = cr.Read(plaintext)
|
||||
if err != nil {
|
||||
t.Errorf("error on read: %w", err)
|
||||
}
|
||||
|
||||
aead, err := New(key[:])
|
||||
if err != nil {
|
||||
|
||||
@@ -18,7 +18,9 @@ func BenchmarkReap(b *testing.B) {
|
||||
for i := 0; i < size; i++ {
|
||||
tx := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(tx, uint64(i))
|
||||
mempool.CheckTx(tx, nil, TxInfo{})
|
||||
if err := mempool.CheckTx(tx, nil, TxInfo{}); err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@@ -35,7 +37,9 @@ func BenchmarkCheckTx(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
tx := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(tx, uint64(i))
|
||||
mempool.CheckTx(tx, nil, TxInfo{})
|
||||
if err := mempool.CheckTx(tx, nil, TxInfo{}); err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,8 @@ func TestCacheRemove(t *testing.T) {
|
||||
for i := 0; i < numTxs; i++ {
|
||||
// probability of collision is 2**-256
|
||||
txBytes := make([]byte, 32)
|
||||
rand.Read(txBytes)
|
||||
_, err := rand.Read(txBytes)
|
||||
require.NoError(t, err)
|
||||
txs[i] = txBytes
|
||||
cache.Push(txBytes)
|
||||
// make sure its added to both the linked list and the map
|
||||
@@ -67,7 +68,8 @@ func TestCacheAfterUpdate(t *testing.T) {
|
||||
tx := types.Tx{byte(v)}
|
||||
updateTxs = append(updateTxs, tx)
|
||||
}
|
||||
mempool.Update(int64(tcIndex), updateTxs, abciResponses(len(updateTxs), abci.CodeTypeOK), nil, nil)
|
||||
err := mempool.Update(int64(tcIndex), updateTxs, abciResponses(len(updateTxs), abci.CodeTypeOK), nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, v := range tc.reAddIndices {
|
||||
tx := types.Tx{byte(v)}
|
||||
|
||||
@@ -170,7 +170,8 @@ func TestMempoolFilters(t *testing.T) {
|
||||
{10, PreCheckMaxBytes(20), PostCheckMaxGas(0), 0},
|
||||
}
|
||||
for tcIndex, tt := range tests {
|
||||
mempool.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter)
|
||||
err := mempool.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter)
|
||||
require.NoError(t, err)
|
||||
checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID)
|
||||
require.Equal(t, tt.expectedNumTxs, mempool.Size(), "mempool had the incorrect size, on test case %d", tcIndex)
|
||||
mempool.Flush()
|
||||
@@ -185,8 +186,9 @@ func TestMempoolUpdate(t *testing.T) {
|
||||
|
||||
// 1. Adds valid txs to the cache
|
||||
{
|
||||
mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
|
||||
err := mempool.CheckTx([]byte{0x01}, nil, TxInfo{})
|
||||
err := mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
|
||||
require.NoError(t, err)
|
||||
err = mempool.CheckTx([]byte{0x01}, nil, TxInfo{})
|
||||
if assert.Error(t, err) {
|
||||
assert.Equal(t, ErrTxInCache, err)
|
||||
}
|
||||
@@ -196,7 +198,8 @@ func TestMempoolUpdate(t *testing.T) {
|
||||
{
|
||||
err := mempool.CheckTx([]byte{0x02}, nil, TxInfo{})
|
||||
require.NoError(t, err)
|
||||
mempool.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
|
||||
err = mempool.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Zero(t, mempool.Size())
|
||||
}
|
||||
|
||||
@@ -204,11 +207,12 @@ func TestMempoolUpdate(t *testing.T) {
|
||||
{
|
||||
err := mempool.CheckTx([]byte{0x03}, nil, TxInfo{})
|
||||
require.NoError(t, err)
|
||||
mempool.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil)
|
||||
err = mempool.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Zero(t, mempool.Size())
|
||||
|
||||
err = mempool.CheckTx([]byte{0x03}, nil, TxInfo{})
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -385,7 +389,8 @@ func TestMempoolCloseWAL(t *testing.T) {
|
||||
mempool, cleanup := newMempoolWithAppAndConfig(cc, wcfg)
|
||||
defer cleanup()
|
||||
mempool.height = 10
|
||||
mempool.InitWAL()
|
||||
err = mempool.InitWAL()
|
||||
require.NoError(t, err)
|
||||
|
||||
// 4. Ensure that the directory contains the WAL file
|
||||
m2, err := filepath.Glob(filepath.Join(rootDir, "*"))
|
||||
@@ -393,7 +398,8 @@ func TestMempoolCloseWAL(t *testing.T) {
|
||||
require.Equal(t, 1, len(m2), "expecting the wal match in")
|
||||
|
||||
// 5. Write some contents to the WAL
|
||||
mempool.CheckTx(types.Tx([]byte("foo")), nil, TxInfo{})
|
||||
err = mempool.CheckTx(types.Tx([]byte("foo")), nil, TxInfo{})
|
||||
require.NoError(t, err)
|
||||
walFilepath := mempool.wal.Path
|
||||
sum1 := checksumFile(walFilepath, t)
|
||||
|
||||
@@ -403,7 +409,8 @@ func TestMempoolCloseWAL(t *testing.T) {
|
||||
// 7. Invoke CloseWAL() and ensure it discards the
|
||||
// WAL thus any other write won't go through.
|
||||
mempool.CloseWAL()
|
||||
mempool.CheckTx(types.Tx([]byte("bar")), nil, TxInfo{})
|
||||
err = mempool.CheckTx(types.Tx([]byte("bar")), nil, TxInfo{})
|
||||
require.NoError(t, err)
|
||||
sum2 := checksumFile(walFilepath, t)
|
||||
require.Equal(t, sum1, sum2, "expected no change to the WAL after invoking CloseWAL() since it was discarded")
|
||||
|
||||
@@ -481,7 +488,8 @@ func TestMempoolTxsBytes(t *testing.T) {
|
||||
assert.EqualValues(t, 1, mempool.TxsBytes())
|
||||
|
||||
// 3. zero again after tx is removed by Update
|
||||
mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
|
||||
err = mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 0, mempool.TxsBytes())
|
||||
|
||||
// 4. zero after Flush
|
||||
@@ -517,7 +525,11 @@ func TestMempoolTxsBytes(t *testing.T) {
|
||||
appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
|
||||
err = appConnCon.Start()
|
||||
require.Nil(t, err)
|
||||
defer appConnCon.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := appConnCon.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes})
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 0, res.Code)
|
||||
@@ -526,7 +538,8 @@ func TestMempoolTxsBytes(t *testing.T) {
|
||||
require.NotEmpty(t, res2.Data)
|
||||
|
||||
// Pretend like we committed nothing so txBytes gets rechecked and removed.
|
||||
mempool.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil)
|
||||
err = mempool.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 0, mempool.TxsBytes())
|
||||
|
||||
// 7. Test RemoveTxByKey function
|
||||
@@ -548,7 +561,11 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) {
|
||||
sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6))
|
||||
app := kvstore.NewApplication()
|
||||
cc, server := newRemoteApp(t, sockPath, app)
|
||||
defer server.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
config := cfg.ResetTestRoot("mempool_test")
|
||||
mempool, cleanup := newMempoolWithAppAndConfig(cc, config)
|
||||
defer cleanup()
|
||||
@@ -570,7 +587,7 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) {
|
||||
tx := txs[txNum]
|
||||
|
||||
// this will err with ErrTxInCache many times ...
|
||||
mempool.CheckTx(tx, nil, TxInfo{SenderID: uint16(peerID)})
|
||||
mempool.CheckTx(tx, nil, TxInfo{SenderID: uint16(peerID)}) //nolint: errcheck // will error
|
||||
}
|
||||
err := mempool.FlushAppConn()
|
||||
require.NoError(t, err)
|
||||
@@ -597,7 +614,7 @@ func newRemoteApp(
|
||||
}
|
||||
func checksumIt(data []byte) string {
|
||||
h := sha256.New()
|
||||
h.Write(data)
|
||||
h.Write(data) //nolint: errcheck // ignore errcheck
|
||||
return fmt.Sprintf("%x", h.Sum(nil))
|
||||
}
|
||||
|
||||
|
||||
@@ -48,7 +48,9 @@ func TestReactorBroadcastTxMessage(t *testing.T) {
|
||||
reactors := makeAndConnectReactors(config, N)
|
||||
defer func() {
|
||||
for _, r := range reactors {
|
||||
r.Stop()
|
||||
if err := r.Stop(); err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
for _, r := range reactors {
|
||||
@@ -69,7 +71,9 @@ func TestReactorNoBroadcastToSender(t *testing.T) {
|
||||
reactors := makeAndConnectReactors(config, N)
|
||||
defer func() {
|
||||
for _, r := range reactors {
|
||||
r.Stop()
|
||||
if err := r.Stop(); err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -88,7 +92,9 @@ func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) {
|
||||
reactors := makeAndConnectReactors(config, N)
|
||||
defer func() {
|
||||
for _, r := range reactors {
|
||||
r.Stop()
|
||||
if err := r.Stop(); err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -112,7 +118,9 @@ func TestBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) {
|
||||
|
||||
// stop reactors
|
||||
for _, r := range reactors {
|
||||
r.Stop()
|
||||
if err := r.Stop(); err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// check that we are not leaking any go-routines
|
||||
@@ -159,7 +167,9 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) {
|
||||
reactors := makeAndConnectReactors(config, N)
|
||||
defer func() {
|
||||
for _, r := range reactors {
|
||||
r.Stop()
|
||||
if err := r.Stop(); err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
reactor := reactors[0]
|
||||
|
||||
16
node/node.go
16
node/node.go
@@ -911,11 +911,17 @@ func (n *Node) OnStop() {
|
||||
n.Logger.Info("Stopping Node")
|
||||
|
||||
// first stop the non-reactor services
|
||||
n.eventBus.Stop()
|
||||
n.indexerService.Stop()
|
||||
if err := n.eventBus.Stop(); err != nil {
|
||||
n.Logger.Error("Error closing eventBus", "err", err)
|
||||
}
|
||||
if err := n.indexerService.Stop(); err != nil {
|
||||
n.Logger.Error("Error closing indexerService", "err", err)
|
||||
}
|
||||
|
||||
// now stop the reactors
|
||||
n.sw.Stop()
|
||||
if err := n.sw.Stop(); err != nil {
|
||||
n.Logger.Error("Error closing switch", "err", err)
|
||||
}
|
||||
|
||||
// stop mempool WAL
|
||||
if n.config.Mempool.WalEnabled() {
|
||||
@@ -937,7 +943,9 @@ func (n *Node) OnStop() {
|
||||
}
|
||||
|
||||
if pvsc, ok := n.privValidator.(service.Service); ok {
|
||||
pvsc.Stop()
|
||||
if err := pvsc.Stop(); err != nil {
|
||||
n.Logger.Error("Error closing private validator", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if n.prometheusSrv != nil {
|
||||
|
||||
@@ -25,14 +25,18 @@ func TestPeerBasic(t *testing.T) {
|
||||
// simulate remote peer
|
||||
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
|
||||
rp.Start()
|
||||
defer rp.Stop()
|
||||
t.Cleanup(rp.Stop)
|
||||
|
||||
p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), cfg, tmconn.DefaultMConnConfig())
|
||||
require.Nil(err)
|
||||
|
||||
err = p.Start()
|
||||
require.Nil(err)
|
||||
defer p.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := p.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
assert.True(p.IsRunning())
|
||||
assert.True(p.IsOutbound())
|
||||
@@ -51,7 +55,7 @@ func TestPeerSend(t *testing.T) {
|
||||
// simulate remote peer
|
||||
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: config}
|
||||
rp.Start()
|
||||
defer rp.Stop()
|
||||
t.Cleanup(rp.Stop)
|
||||
|
||||
p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config, tmconn.DefaultMConnConfig())
|
||||
require.Nil(err)
|
||||
@@ -59,7 +63,11 @@ func TestPeerSend(t *testing.T) {
|
||||
err = p.Start()
|
||||
require.Nil(err)
|
||||
|
||||
defer p.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := p.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
assert.True(p.CanSend(testCh))
|
||||
assert.True(p.Send(testCh, []byte("Asylum")))
|
||||
|
||||
@@ -245,7 +245,9 @@ func (sw *Switch) OnStop() {
|
||||
// Stop reactors
|
||||
sw.Logger.Debug("Switch: Stopping reactors")
|
||||
for _, reactor := range sw.reactors {
|
||||
reactor.Stop()
|
||||
if err := reactor.Stop(); err != nil {
|
||||
sw.Logger.Error("error while stopped reactor", "reactor", reactor, "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -349,7 +351,9 @@ func (sw *Switch) StopPeerGracefully(peer Peer) {
|
||||
|
||||
func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) {
|
||||
sw.transport.Cleanup(peer)
|
||||
peer.Stop()
|
||||
if err := peer.Stop(); err != nil {
|
||||
sw.Logger.Error("error while stopping peer", "error", err) // TODO: should return error to be handled accordingly
|
||||
}
|
||||
|
||||
for _, reactor := range sw.reactors {
|
||||
reactor.RemovePeer(peer, reason)
|
||||
|
||||
@@ -227,7 +227,8 @@ func TestSwitchPeerFilter(t *testing.T) {
|
||||
SwitchPeerFilters(filters...),
|
||||
)
|
||||
)
|
||||
sw.Start()
|
||||
err := sw.Start()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
if err := sw.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
@@ -277,7 +278,8 @@ func TestSwitchPeerFilterTimeout(t *testing.T) {
|
||||
SwitchPeerFilters(filters...),
|
||||
)
|
||||
)
|
||||
sw.Start()
|
||||
err := sw.Start()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
if err := sw.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
@@ -384,7 +386,8 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
|
||||
require.NotNil(sw.Peers().Get(rp.ID()))
|
||||
|
||||
// simulate failure by closing connection
|
||||
p.(*peer).CloseConn()
|
||||
err = p.(*peer).CloseConn()
|
||||
require.NoError(err)
|
||||
|
||||
assertNoPeersAfterTimeout(t, sw, 100*time.Millisecond)
|
||||
assert.False(p.IsRunning())
|
||||
@@ -396,7 +399,7 @@ func TestSwitchStopPeerForError(t *testing.T) {
|
||||
|
||||
scrapeMetrics := func() string {
|
||||
resp, err := http.Get(s.URL)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
defer resp.Body.Close()
|
||||
buf, _ := ioutil.ReadAll(resp.Body)
|
||||
return string(buf)
|
||||
@@ -467,7 +470,8 @@ func TestSwitchReconnectsToOutboundPersistentPeer(t *testing.T) {
|
||||
require.NotNil(t, sw.Peers().Get(rp.ID()))
|
||||
|
||||
p := sw.Peers().List()[0]
|
||||
p.(*peer).CloseConn()
|
||||
err = p.(*peer).CloseConn()
|
||||
require.NoError(t, err)
|
||||
|
||||
waitUntilSwitchHasAtLeastNPeers(sw, 1)
|
||||
assert.False(t, p.IsRunning()) // old peer instance
|
||||
@@ -594,8 +598,9 @@ func TestSwitchAcceptRoutine(t *testing.T) {
|
||||
|
||||
// make switch
|
||||
sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
|
||||
sw.AddUnconditionalPeerIDs(unconditionalPeerIDs)
|
||||
err := sw.Start()
|
||||
err := sw.AddUnconditionalPeerIDs(unconditionalPeerIDs)
|
||||
require.NoError(t, err)
|
||||
err = sw.Start()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
if err := sw.Stop(); err != nil {
|
||||
@@ -635,7 +640,8 @@ func TestSwitchAcceptRoutine(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
// check conn is closed
|
||||
one := make([]byte, 1)
|
||||
conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond))
|
||||
err = conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond))
|
||||
require.NoError(t, err)
|
||||
_, err = conn.Read(one)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size())
|
||||
@@ -689,22 +695,24 @@ func TestSwitchAcceptRoutineErrorCases(t *testing.T) {
|
||||
sw := NewSwitch(cfg, errorTransport{ErrFilterTimeout{}})
|
||||
assert.NotPanics(t, func() {
|
||||
err := sw.Start()
|
||||
assert.NoError(t, err)
|
||||
sw.Stop()
|
||||
require.NoError(t, err)
|
||||
err = sw.Stop()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
sw = NewSwitch(cfg, errorTransport{ErrRejected{conn: nil, err: errors.New("filtered"), isFiltered: true}})
|
||||
assert.NotPanics(t, func() {
|
||||
err := sw.Start()
|
||||
assert.NoError(t, err)
|
||||
sw.Stop()
|
||||
require.NoError(t, err)
|
||||
err = sw.Stop()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
// TODO(melekes) check we remove our address from addrBook
|
||||
|
||||
sw = NewSwitch(cfg, errorTransport{ErrTransportClosed{}})
|
||||
assert.NotPanics(t, func() {
|
||||
err := sw.Start()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
err = sw.Stop()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@@ -751,7 +759,11 @@ func TestSwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) {
|
||||
})
|
||||
err := sw.Start()
|
||||
require.NoError(t, err)
|
||||
defer sw.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := sw.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// add peer
|
||||
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
|
||||
|
||||
@@ -72,7 +72,9 @@ func (tms *MetricStore) OnStop() {
|
||||
|
||||
// Stop all trust metric go-routines
|
||||
for _, tm := range tms.peerMetrics {
|
||||
tm.Stop()
|
||||
if err := tm.Stop(); err != nil {
|
||||
tms.Logger.Error("unable to stop metric store", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Make the final trust history data save
|
||||
@@ -108,7 +110,9 @@ func (tms *MetricStore) GetPeerTrustMetric(key string) *Metric {
|
||||
if !ok {
|
||||
// If the metric is not available, we will create it
|
||||
tm = NewMetricWithConfig(tms.config)
|
||||
tm.Start()
|
||||
if err := tm.Start(); err != nil {
|
||||
tms.Logger.Error("unable to start metric store", "error", err)
|
||||
}
|
||||
// The metric needs to be in the map
|
||||
tms.peerMetrics[key] = tm
|
||||
}
|
||||
@@ -168,7 +172,9 @@ func (tms *MetricStore) loadFromDB() bool {
|
||||
for key, p := range peers {
|
||||
tm := NewMetricWithConfig(tms.config)
|
||||
|
||||
tm.Start()
|
||||
if err := tm.Start(); err != nil {
|
||||
tms.Logger.Error("unable to start metric", "error", err)
|
||||
}
|
||||
tm.Init(p)
|
||||
// Load the peer trust metric into the store
|
||||
tms.peerMetrics[key] = tm
|
||||
@@ -193,7 +199,9 @@ func (tms *MetricStore) saveToDB() {
|
||||
tms.Logger.Error("Failed to encode the TrustHistory", "err", err)
|
||||
return
|
||||
}
|
||||
tms.db.SetSync(trustMetricKey, bytes)
|
||||
if err := tms.db.SetSync(trustMetricKey, bytes); err != nil {
|
||||
tms.Logger.Error("failed to flush data to disk", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Periodically saves the trust history data to the DB
|
||||
|
||||
@@ -73,7 +73,11 @@ func TestSignerRemoteRetryTCPOnly(t *testing.T) {
|
||||
|
||||
err = signerServer.Start()
|
||||
require.NoError(t, err)
|
||||
defer signerServer.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := signerServer.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
select {
|
||||
case attempts := <-attemptCh:
|
||||
@@ -104,12 +108,18 @@ func TestRetryConnToRemoteSigner(t *testing.T) {
|
||||
signerServer := NewSignerServer(dialerEndpoint, chainID, mockPV)
|
||||
|
||||
startListenerEndpointAsync(t, listenerEndpoint, endpointIsOpenCh)
|
||||
defer listenerEndpoint.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := listenerEndpoint.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
require.NoError(t, signerServer.Start())
|
||||
assert.True(t, signerServer.IsRunning())
|
||||
<-endpointIsOpenCh
|
||||
signerServer.Stop()
|
||||
if err := signerServer.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
dialerEndpoint2 := NewSignerDialerEndpoint(
|
||||
logger,
|
||||
@@ -120,7 +130,11 @@ func TestRetryConnToRemoteSigner(t *testing.T) {
|
||||
// let some pings pass
|
||||
require.NoError(t, signerServer2.Start())
|
||||
assert.True(t, signerServer2.IsRunning())
|
||||
defer signerServer2.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := signerServer2.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// give the client some time to re-establish the conn to the remote signer
|
||||
// should see sth like this in the logs:
|
||||
|
||||
@@ -157,16 +157,24 @@ func (app *multiAppConn) killTMOnClientError() {
|
||||
|
||||
func (app *multiAppConn) stopAllClients() {
|
||||
if app.consensusConnClient != nil {
|
||||
app.consensusConnClient.Stop()
|
||||
if err := app.consensusConnClient.Stop(); err != nil {
|
||||
app.Logger.Error("error while stopping consensus client", "error", err)
|
||||
}
|
||||
}
|
||||
if app.mempoolConnClient != nil {
|
||||
app.mempoolConnClient.Stop()
|
||||
if err := app.mempoolConnClient.Stop(); err != nil {
|
||||
app.Logger.Error("error while stopping mempool client", "error", err)
|
||||
}
|
||||
}
|
||||
if app.queryConnClient != nil {
|
||||
app.queryConnClient.Stop()
|
||||
if err := app.queryConnClient.Stop(); err != nil {
|
||||
app.Logger.Error("error while stopping query client", "error", err)
|
||||
}
|
||||
}
|
||||
if app.snapshotConnClient != nil {
|
||||
app.snapshotConnClient.Stop()
|
||||
if err := app.snapshotConnClient.Stop(); err != nil {
|
||||
app.Logger.Error("error while stopping snapshot client", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -35,7 +35,8 @@ func TestAppConns_Start_Stop(t *testing.T) {
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
appConns.Stop()
|
||||
err = appConns.Stop()
|
||||
require.NoError(t, err)
|
||||
|
||||
clientMock.AssertExpectations(t)
|
||||
}
|
||||
@@ -71,7 +72,11 @@ func TestAppConns_Failure(t *testing.T) {
|
||||
|
||||
err := appConns.Start()
|
||||
require.NoError(t, err)
|
||||
defer appConns.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := appConns.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// simulate failure
|
||||
close(quitCh)
|
||||
|
||||
@@ -190,7 +190,8 @@ func TestABCIQuery(t *testing.T) {
|
||||
apph := bres.Height + 1 // this is where the tx will be applied to the state
|
||||
|
||||
// wait before querying
|
||||
client.WaitForHeight(c, apph, nil)
|
||||
err = client.WaitForHeight(c, apph, nil)
|
||||
require.NoError(t, err)
|
||||
res, err := c.ABCIQuery("/key", k)
|
||||
qres := res.Response
|
||||
if assert.Nil(t, err) && assert.True(t, qres.IsOK()) {
|
||||
@@ -624,7 +625,8 @@ func testBatchedJSONRPCCalls(t *testing.T, c *rpchttp.HTTP) {
|
||||
require.Equal(t, *bresult2, *r2)
|
||||
apph := tmmath.MaxInt64(bresult1.Height, bresult2.Height) + 1
|
||||
|
||||
client.WaitForHeight(c, apph, nil)
|
||||
err = client.WaitForHeight(c, apph, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
q1, err := batch.ABCIQuery("/key", k1)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -349,7 +349,10 @@ func (c *WSClient) reconnectRoutine() {
|
||||
c.wg.Wait()
|
||||
if err := c.reconnect(); err != nil {
|
||||
c.Logger.Error("failed to reconnect", "err", err, "original_err", originalError)
|
||||
c.Stop()
|
||||
if err = c.Stop(); err != nil {
|
||||
c.Logger.Error("failed to stop conn", "error", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
// drain reconnectAfter
|
||||
|
||||
@@ -99,7 +99,9 @@ func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Requ
|
||||
wm.logger.Error("Failed to start connection", "err", err)
|
||||
return
|
||||
}
|
||||
con.Stop()
|
||||
if err := con.Stop(); err != nil {
|
||||
wm.logger.Error("error while stopping connection", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
@@ -22,7 +22,11 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) {
|
||||
eventBus.SetLogger(log.TestingLogger())
|
||||
err := eventBus.Start()
|
||||
require.NoError(t, err)
|
||||
defer eventBus.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := eventBus.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// tx indexer
|
||||
store := db.NewMemDB()
|
||||
@@ -32,27 +36,34 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) {
|
||||
service.SetLogger(log.TestingLogger())
|
||||
err = service.Start()
|
||||
require.NoError(t, err)
|
||||
defer service.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := service.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// publish block with txs
|
||||
eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{
|
||||
err = eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{
|
||||
Header: types.Header{Height: 1},
|
||||
NumTxs: int64(2),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
txResult1 := &abci.TxResult{
|
||||
Height: 1,
|
||||
Index: uint32(0),
|
||||
Tx: types.Tx("foo"),
|
||||
Result: abci.ResponseDeliverTx{Code: 0},
|
||||
}
|
||||
eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult1})
|
||||
err = eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult1})
|
||||
require.NoError(t, err)
|
||||
txResult2 := &abci.TxResult{
|
||||
Height: 1,
|
||||
Index: uint32(1),
|
||||
Tx: types.Tx("bar"),
|
||||
Result: abci.ResponseDeliverTx{Code: 0},
|
||||
}
|
||||
eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult2})
|
||||
err = eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult2})
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
|
||||
@@ -252,7 +252,9 @@ func BlockFromProto(bp *tmproto.Block) (*Block, error) {
|
||||
return nil, err
|
||||
}
|
||||
b.Data = data
|
||||
b.Evidence.FromProto(&bp.Evidence)
|
||||
if err := b.Evidence.FromProto(&bp.Evidence); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if bp.LastCommit != nil {
|
||||
lc, err := CommitFromProto(bp.LastCommit)
|
||||
|
||||
@@ -173,8 +173,8 @@ func makeBlockIDRandom() BlockID {
|
||||
blockHash = make([]byte, tmhash.Size)
|
||||
partSetHash = make([]byte, tmhash.Size)
|
||||
)
|
||||
rand.Read(blockHash)
|
||||
rand.Read(partSetHash)
|
||||
rand.Read(blockHash) //nolint: errcheck // ignore errcheck for read
|
||||
rand.Read(partSetHash) //nolint: errcheck // ignore errcheck for read
|
||||
return BlockID{blockHash, PartSetHeader{123, partSetHash}}
|
||||
}
|
||||
|
||||
|
||||
@@ -59,7 +59,9 @@ func (b *EventBus) OnStart() error {
|
||||
}
|
||||
|
||||
func (b *EventBus) OnStop() {
|
||||
b.pubsub.Stop()
|
||||
if err := b.pubsub.Stop(); err != nil {
|
||||
b.pubsub.Logger.Error("error trying to stop eventBus", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *EventBus) NumClients() int {
|
||||
|
||||
@@ -20,7 +20,11 @@ func TestEventBusPublishEventTx(t *testing.T) {
|
||||
eventBus := NewEventBus()
|
||||
err := eventBus.Start()
|
||||
require.NoError(t, err)
|
||||
defer eventBus.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := eventBus.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
tx := Tx("foo")
|
||||
result := abci.ResponseDeliverTx{
|
||||
@@ -65,7 +69,11 @@ func TestEventBusPublishEventNewBlock(t *testing.T) {
|
||||
eventBus := NewEventBus()
|
||||
err := eventBus.Start()
|
||||
require.NoError(t, err)
|
||||
defer eventBus.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := eventBus.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
block := MakeBlock(0, []Tx{}, nil, []Evidence{})
|
||||
resultBeginBlock := abci.ResponseBeginBlock{
|
||||
@@ -112,7 +120,11 @@ func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) {
|
||||
eventBus := NewEventBus()
|
||||
err := eventBus.Start()
|
||||
require.NoError(t, err)
|
||||
defer eventBus.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := eventBus.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
tx := Tx("foo")
|
||||
result := abci.ResponseDeliverTx{
|
||||
@@ -216,7 +228,11 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) {
|
||||
eventBus := NewEventBus()
|
||||
err := eventBus.Start()
|
||||
require.NoError(t, err)
|
||||
defer eventBus.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := eventBus.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
block := MakeBlock(0, []Tx{}, nil, []Evidence{})
|
||||
resultBeginBlock := abci.ResponseBeginBlock{
|
||||
@@ -263,7 +279,11 @@ func TestEventBusPublishEventNewEvidence(t *testing.T) {
|
||||
eventBus := NewEventBus()
|
||||
err := eventBus.Start()
|
||||
require.NoError(t, err)
|
||||
defer eventBus.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := eventBus.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
ev := NewMockDuplicateVoteEvidence(1, time.Now(), "test-chain-id")
|
||||
|
||||
@@ -297,7 +317,11 @@ func TestEventBusPublish(t *testing.T) {
|
||||
eventBus := NewEventBus()
|
||||
err := eventBus.Start()
|
||||
require.NoError(t, err)
|
||||
defer eventBus.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := eventBus.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
const numEventsExpected = 14
|
||||
|
||||
@@ -389,8 +413,15 @@ func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *tes
|
||||
rand.Seed(time.Now().Unix())
|
||||
|
||||
eventBus := NewEventBusWithBufferCapacity(0) // set buffer capacity to 0 so we are not testing cache
|
||||
eventBus.Start()
|
||||
defer eventBus.Stop()
|
||||
err := eventBus.Start()
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
b.Cleanup(func() {
|
||||
if err := eventBus.Stop(); err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
q := EventQueryNewBlock
|
||||
@@ -423,7 +454,10 @@ func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *tes
|
||||
eventType = randEvent()
|
||||
}
|
||||
|
||||
eventBus.Publish(eventType, EventDataString("Gamora"))
|
||||
err := eventBus.Publish(eventType, EventDataString("Gamora"))
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -129,7 +129,8 @@ func TestGenesisSaveAs(t *testing.T) {
|
||||
genDoc := randomGenesisDoc()
|
||||
|
||||
// save
|
||||
genDoc.SaveAs(tmpfile.Name())
|
||||
err = genDoc.SaveAs(tmpfile.Name())
|
||||
require.NoError(t, err)
|
||||
stat, err := tmpfile.Stat()
|
||||
require.NoError(t, err)
|
||||
if err != nil && stat.Size() <= 0 {
|
||||
|
||||
@@ -150,7 +150,10 @@ func HashConsensusParams(params tmproto.ConsensusParams) []byte {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
hasher.Write(bz)
|
||||
_, err = hasher.Write(bz)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return hasher.Sum(nil)
|
||||
}
|
||||
|
||||
|
||||
@@ -307,7 +307,8 @@ func TestVoteSet_Conflicts(t *testing.T) {
|
||||
}
|
||||
|
||||
// start tracking blockHash1
|
||||
voteSet.SetPeerMaj23("peerA", BlockID{blockHash1, PartSetHeader{}})
|
||||
err = voteSet.SetPeerMaj23("peerA", BlockID{blockHash1, PartSetHeader{}})
|
||||
require.NoError(t, err)
|
||||
|
||||
// val0 votes again for blockHash1.
|
||||
{
|
||||
@@ -318,7 +319,8 @@ func TestVoteSet_Conflicts(t *testing.T) {
|
||||
}
|
||||
|
||||
// attempt tracking blockHash2, should fail because already set for peerA.
|
||||
voteSet.SetPeerMaj23("peerA", BlockID{blockHash2, PartSetHeader{}})
|
||||
err = voteSet.SetPeerMaj23("peerA", BlockID{blockHash2, PartSetHeader{}})
|
||||
require.Error(t, err)
|
||||
|
||||
// val0 votes again for blockHash1.
|
||||
{
|
||||
@@ -369,7 +371,8 @@ func TestVoteSet_Conflicts(t *testing.T) {
|
||||
}
|
||||
|
||||
// now attempt tracking blockHash1
|
||||
voteSet.SetPeerMaj23("peerB", BlockID{blockHash1, PartSetHeader{}})
|
||||
err = voteSet.SetPeerMaj23("peerB", BlockID{blockHash1, PartSetHeader{}})
|
||||
require.NoError(t, err)
|
||||
|
||||
// val2 votes for blockHash1.
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user