lint: enable errcheck (#5336)

## Description

Enable errcheck linter throughout the codebase

Closes: #5059
This commit is contained in:
Marko
2020-09-07 17:03:18 +02:00
committed by GitHub
parent 3359e0bf2f
commit 0ed8dba991
39 changed files with 346 additions and 137 deletions

View File

@@ -214,7 +214,9 @@ func (pool *BlockPool) PopRequest() {
PanicSanity("PopRequest() requires a valid block")
}
*/
r.Stop()
if err := r.Stop(); err != nil {
pool.Logger.Error("Error stopping requester", "err", err)
}
delete(pool.requesters, pool.height)
pool.height++
} else {
@@ -615,7 +617,9 @@ OUTER_LOOP:
for {
select {
case <-bpr.pool.Quit():
bpr.Stop()
if err := bpr.Stop(); err != nil {
bpr.Logger.Error("Error stopped requester", "err", err)
}
return
case <-bpr.Quit():
return

View File

@@ -129,7 +129,9 @@ func (bcR *BlockchainReactor) SwitchToFastSync(state sm.State) error {
// OnStop implements service.Service.
func (bcR *BlockchainReactor) OnStop() {
if bcR.fastSync {
bcR.pool.Stop()
if err := bcR.pool.Stop(); err != nil {
bcR.Logger.Error("Error stopping pool", "err", err)
}
}
}
@@ -313,7 +315,9 @@ FOR_LOOP:
"outbound", outbound, "inbound", inbound)
if bcR.pool.IsCaughtUp() {
bcR.Logger.Info("Time to switch to consensus reactor!", "height", height)
bcR.pool.Stop()
if err := bcR.pool.Stop(); err != nil {
bcR.Logger.Error("Error stopping pool", "err", err)
}
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
if ok {
conR.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)

View File

@@ -318,7 +318,9 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
case <-doProcessBlockCh:
r.processor.send(rProcessBlock{})
case <-doStatusCh:
r.io.broadcastStatusRequest()
if err := r.io.broadcastStatusRequest(); err != nil {
r.logger.Error("Error broadcasting status request", "err", err)
}
// Events from peers. Closing the channel signals event loop termination.
case event, ok := <-events:
@@ -343,9 +345,13 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
r.processor.send(event)
case scPeerError:
r.processor.send(event)
r.reporter.Report(behaviour.BadMessage(event.peerID, "scPeerError"))
if err := r.reporter.Report(behaviour.BadMessage(event.peerID, "scPeerError")); err != nil {
r.logger.Error("Error reporting peer", "err", err)
}
case scBlockRequest:
r.io.sendBlockRequest(event.peerID, event.height)
if err := r.io.sendBlockRequest(event.peerID, event.height); err != nil {
r.logger.Error("Error sending block request", "err", err)
}
case scFinishedEv:
r.processor.send(event)
r.scheduler.stop()

View File

@@ -288,7 +288,9 @@ func (sc *scheduler) setPeerRange(peerID p2p.ID, base int64, height int64) error
}
if height < peer.height {
sc.removePeer(peerID)
if err := sc.removePeer(peerID); err != nil {
return err
}
return fmt.Errorf("cannot move peer height lower. from %d to %d", peer.height, height)
}
@@ -611,7 +613,9 @@ func (sc *scheduler) handleTryPrunePeer(event rTryPrunePeer) (Event, error) {
// from that peer within sc.peerTimeout. Remove the peer. This is to ensure that a peer
// will be timed out even if it sends blocks at higher heights but prevents progress by
// not sending the block at current height.
sc.removePeer(sc.pendingBlocks[sc.height])
if err := sc.removePeer(sc.pendingBlocks[sc.height]); err != nil {
return nil, err
}
}
prunablePeers := sc.prunablePeers(sc.peerTimeout, sc.minRecvRate, event.time)