mirror of
https://github.com/tendermint/tendermint.git
synced 2026-05-13 10:41:30 +00:00
lint: enable errcheck (#5336)
## Description Enable errcheck linter throughout the codebase Closes: #5059
This commit is contained in:
@@ -5,7 +5,7 @@ linters:
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
# - errcheck
|
||||
- errcheck
|
||||
# - funlen
|
||||
# - gochecknoglobals
|
||||
# - gochecknoinits
|
||||
@@ -39,8 +39,6 @@ linters:
|
||||
# - wsl
|
||||
# - gocognit
|
||||
- nolintlint
|
||||
disable:
|
||||
- errcheck
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
|
||||
@@ -36,6 +36,7 @@ func NewGRPCServer(protoAddr string, app types.ABCIApplicationServer) service.Se
|
||||
|
||||
// OnStart starts the gRPC service.
|
||||
func (s *GRPCServer) OnStart() error {
|
||||
|
||||
ln, err := net.Listen(s.proto, s.addr)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -46,8 +47,11 @@ func (s *GRPCServer) OnStart() error {
|
||||
types.RegisterABCIApplicationServer(s.server, s.app)
|
||||
|
||||
s.Logger.Info("Listening", "proto", s.proto, "addr", s.addr)
|
||||
go s.server.Serve(s.listener)
|
||||
|
||||
go func() {
|
||||
if err := s.server.Serve(s.listener); err != nil {
|
||||
s.Logger.Error("Error serving gRPC server", "err", err)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -60,13 +60,12 @@ func testCounter() {
|
||||
log.Fatalf("starting %q err: %v", abciApp, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := cmd.Wait(); err != nil {
|
||||
log.Printf("error while waiting for cmd to exit: %v", err)
|
||||
}
|
||||
|
||||
if err := cmd.Process.Kill(); err != nil {
|
||||
log.Printf("error on process kill: %v", err)
|
||||
}
|
||||
if err := cmd.Wait(); err != nil {
|
||||
log.Printf("error while waiting for cmd to exit: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil {
|
||||
|
||||
@@ -214,7 +214,9 @@ func (pool *BlockPool) PopRequest() {
|
||||
PanicSanity("PopRequest() requires a valid block")
|
||||
}
|
||||
*/
|
||||
r.Stop()
|
||||
if err := r.Stop(); err != nil {
|
||||
pool.Logger.Error("Error stopping requester", "err", err)
|
||||
}
|
||||
delete(pool.requesters, pool.height)
|
||||
pool.height++
|
||||
} else {
|
||||
@@ -615,7 +617,9 @@ OUTER_LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-bpr.pool.Quit():
|
||||
bpr.Stop()
|
||||
if err := bpr.Stop(); err != nil {
|
||||
bpr.Logger.Error("Error stopped requester", "err", err)
|
||||
}
|
||||
return
|
||||
case <-bpr.Quit():
|
||||
return
|
||||
|
||||
@@ -129,7 +129,9 @@ func (bcR *BlockchainReactor) SwitchToFastSync(state sm.State) error {
|
||||
// OnStop implements service.Service.
|
||||
func (bcR *BlockchainReactor) OnStop() {
|
||||
if bcR.fastSync {
|
||||
bcR.pool.Stop()
|
||||
if err := bcR.pool.Stop(); err != nil {
|
||||
bcR.Logger.Error("Error stopping pool", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -313,7 +315,9 @@ FOR_LOOP:
|
||||
"outbound", outbound, "inbound", inbound)
|
||||
if bcR.pool.IsCaughtUp() {
|
||||
bcR.Logger.Info("Time to switch to consensus reactor!", "height", height)
|
||||
bcR.pool.Stop()
|
||||
if err := bcR.pool.Stop(); err != nil {
|
||||
bcR.Logger.Error("Error stopping pool", "err", err)
|
||||
}
|
||||
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
conR.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
|
||||
|
||||
@@ -318,7 +318,9 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
case <-doProcessBlockCh:
|
||||
r.processor.send(rProcessBlock{})
|
||||
case <-doStatusCh:
|
||||
r.io.broadcastStatusRequest()
|
||||
if err := r.io.broadcastStatusRequest(); err != nil {
|
||||
r.logger.Error("Error broadcasting status request", "err", err)
|
||||
}
|
||||
|
||||
// Events from peers. Closing the channel signals event loop termination.
|
||||
case event, ok := <-events:
|
||||
@@ -343,9 +345,13 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
r.processor.send(event)
|
||||
case scPeerError:
|
||||
r.processor.send(event)
|
||||
r.reporter.Report(behaviour.BadMessage(event.peerID, "scPeerError"))
|
||||
if err := r.reporter.Report(behaviour.BadMessage(event.peerID, "scPeerError")); err != nil {
|
||||
r.logger.Error("Error reporting peer", "err", err)
|
||||
}
|
||||
case scBlockRequest:
|
||||
r.io.sendBlockRequest(event.peerID, event.height)
|
||||
if err := r.io.sendBlockRequest(event.peerID, event.height); err != nil {
|
||||
r.logger.Error("Error sending block request", "err", err)
|
||||
}
|
||||
case scFinishedEv:
|
||||
r.processor.send(event)
|
||||
r.scheduler.stop()
|
||||
|
||||
@@ -288,7 +288,9 @@ func (sc *scheduler) setPeerRange(peerID p2p.ID, base int64, height int64) error
|
||||
}
|
||||
|
||||
if height < peer.height {
|
||||
sc.removePeer(peerID)
|
||||
if err := sc.removePeer(peerID); err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("cannot move peer height lower. from %d to %d", peer.height, height)
|
||||
}
|
||||
|
||||
@@ -611,7 +613,9 @@ func (sc *scheduler) handleTryPrunePeer(event rTryPrunePeer) (Event, error) {
|
||||
// from that peer within sc.peerTimeout. Remove the peer. This is to ensure that a peer
|
||||
// will be timed out even if it sends blocks at higher heights but prevents progress by
|
||||
// not sending the block at current height.
|
||||
sc.removePeer(sc.pendingBlocks[sc.height])
|
||||
if err := sc.removePeer(sc.pendingBlocks[sc.height]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
prunablePeers := sc.prunablePeers(sc.peerTimeout, sc.minRecvRate, event.time)
|
||||
|
||||
@@ -136,7 +136,9 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
initFilesWithConfig(config)
|
||||
if err := initFilesWithConfig(config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pvKeyFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidatorKey)
|
||||
pvStateFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidatorState)
|
||||
@@ -170,7 +172,9 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
initFilesWithConfig(config)
|
||||
if err := initFilesWithConfig(config); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Generate genesis doc from generated validators
|
||||
|
||||
@@ -93,7 +93,9 @@ func (conR *Reactor) OnStart() error {
|
||||
// state.
|
||||
func (conR *Reactor) OnStop() {
|
||||
conR.unsubscribeFromBroadcastEvents()
|
||||
conR.conS.Stop()
|
||||
if err := conR.conS.Stop(); err != nil {
|
||||
conR.Logger.Error("Error stopping consensus state", "err", err)
|
||||
}
|
||||
if !conR.WaitSync() {
|
||||
conR.conS.Wait()
|
||||
}
|
||||
@@ -395,20 +397,26 @@ func (conR *Reactor) WaitSync() bool {
|
||||
// them to peers upon receiving.
|
||||
func (conR *Reactor) subscribeToBroadcastEvents() {
|
||||
const subscriber = "consensus-reactor"
|
||||
conR.conS.evsw.AddListenerForEvent(subscriber, types.EventNewRoundStep,
|
||||
if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventNewRoundStep,
|
||||
func(data tmevents.EventData) {
|
||||
conR.broadcastNewRoundStepMessage(data.(*cstypes.RoundState))
|
||||
})
|
||||
}); err != nil {
|
||||
conR.Logger.Error("Error adding listener for events", "err", err)
|
||||
}
|
||||
|
||||
conR.conS.evsw.AddListenerForEvent(subscriber, types.EventValidBlock,
|
||||
if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventValidBlock,
|
||||
func(data tmevents.EventData) {
|
||||
conR.broadcastNewValidBlockMessage(data.(*cstypes.RoundState))
|
||||
})
|
||||
}); err != nil {
|
||||
conR.Logger.Error("Error adding listener for events", "err", err)
|
||||
}
|
||||
|
||||
conR.conS.evsw.AddListenerForEvent(subscriber, types.EventVote,
|
||||
if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventVote,
|
||||
func(data tmevents.EventData) {
|
||||
conR.broadcastHasVoteMessage(data.(*types.Vote))
|
||||
})
|
||||
}); err != nil {
|
||||
conR.Logger.Error("Error adding listener for events", "err", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -57,7 +57,11 @@ func (cs *State) ReplayFile(file string, console bool) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep)
|
||||
}
|
||||
defer cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep)
|
||||
defer func() {
|
||||
if err := cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep); err != nil {
|
||||
cs.Logger.Error("Error unsubscribing to event bus", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// just open the file for reading, no need to use wal
|
||||
fp, err := os.OpenFile(file, os.O_RDONLY, 0600)
|
||||
@@ -120,7 +124,9 @@ func newPlayback(fileName string, fp *os.File, cs *State, genState sm.State) *pl
|
||||
|
||||
// go back count steps by resetting the state and running (pb.count - count) steps
|
||||
func (pb *playback) replayReset(count int, newStepSub types.Subscription) error {
|
||||
pb.cs.Stop()
|
||||
if err := pb.cs.Stop(); err != nil {
|
||||
return err
|
||||
}
|
||||
pb.cs.Wait()
|
||||
|
||||
newCS := NewState(pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec,
|
||||
@@ -218,7 +224,11 @@ func (pb *playback) replayConsoleLoop() int {
|
||||
if err != nil {
|
||||
tmos.Exit(fmt.Sprintf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep))
|
||||
}
|
||||
defer pb.cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep)
|
||||
defer func() {
|
||||
if err := pb.cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep); err != nil {
|
||||
pb.cs.Logger.Error("Error unsubscribing from eventBus", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if len(tokens) == 1 {
|
||||
if err := pb.replayReset(1, newStepSub); err != nil {
|
||||
|
||||
@@ -83,7 +83,11 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Confi
|
||||
|
||||
err := cs.Start()
|
||||
require.NoError(t, err)
|
||||
defer cs.Stop()
|
||||
defer func() {
|
||||
if err := cs.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// This is just a signal that we haven't halted; its not something contained
|
||||
// in the WAL itself. Assuming the consensus state is running, replay of any
|
||||
|
||||
@@ -329,7 +329,9 @@ func (cs *State) OnStart() error {
|
||||
cs.Logger.Info("WAL file is corrupted. Attempting repair", "err", err)
|
||||
|
||||
// 1) prep work
|
||||
cs.wal.Stop()
|
||||
if err := cs.wal.Stop(); err != nil {
|
||||
return err
|
||||
}
|
||||
repairAttempted = true
|
||||
|
||||
// 2) backup original WAL file
|
||||
@@ -658,11 +660,15 @@ func (cs *State) updateToState(state sm.State) {
|
||||
|
||||
func (cs *State) newStep() {
|
||||
rs := cs.RoundStateEvent()
|
||||
cs.wal.Write(rs)
|
||||
if err := cs.wal.Write(rs); err != nil {
|
||||
cs.Logger.Error("Error writing to wal", "err", err)
|
||||
}
|
||||
cs.nSteps++
|
||||
// newStep is called by updateToState in NewState before the eventBus is set!
|
||||
if cs.eventBus != nil {
|
||||
cs.eventBus.PublishEventNewRoundStep(rs)
|
||||
if err := cs.eventBus.PublishEventNewRoundStep(rs); err != nil {
|
||||
cs.Logger.Error("Error publishing new round step", "err", err)
|
||||
}
|
||||
cs.evsw.FireEvent(types.EventNewRoundStep, &cs.RoundState)
|
||||
}
|
||||
}
|
||||
@@ -720,7 +726,9 @@ func (cs *State) receiveRoutine(maxSteps int) {
|
||||
case <-cs.txNotifier.TxsAvailable():
|
||||
cs.handleTxsAvailable()
|
||||
case mi = <-cs.peerMsgQueue:
|
||||
cs.wal.Write(mi)
|
||||
if err := cs.wal.Write(mi); err != nil {
|
||||
cs.Logger.Error("Error writing to wal", "err", err)
|
||||
}
|
||||
// handles proposals, block parts, votes
|
||||
// may generate internal events (votes, complete proposals, 2/3 majorities)
|
||||
cs.handleMsg(mi)
|
||||
@@ -741,7 +749,9 @@ func (cs *State) receiveRoutine(maxSteps int) {
|
||||
// handles proposals, block parts, votes
|
||||
cs.handleMsg(mi)
|
||||
case ti := <-cs.timeoutTicker.Chan(): // tockChan:
|
||||
cs.wal.Write(ti)
|
||||
if err := cs.wal.Write(ti); err != nil {
|
||||
cs.Logger.Error("Error writing to wal", "err", err)
|
||||
}
|
||||
// if the timeout is relevant to the rs
|
||||
// go to the next step
|
||||
cs.handleTimeout(ti, rs)
|
||||
@@ -839,13 +849,19 @@ func (cs *State) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) {
|
||||
case cstypes.RoundStepNewRound:
|
||||
cs.enterPropose(ti.Height, 0)
|
||||
case cstypes.RoundStepPropose:
|
||||
cs.eventBus.PublishEventTimeoutPropose(cs.RoundStateEvent())
|
||||
if err := cs.eventBus.PublishEventTimeoutPropose(cs.RoundStateEvent()); err != nil {
|
||||
cs.Logger.Error("Error publishing timeout propose", "err", err)
|
||||
}
|
||||
cs.enterPrevote(ti.Height, ti.Round)
|
||||
case cstypes.RoundStepPrevoteWait:
|
||||
cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent())
|
||||
if err := cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()); err != nil {
|
||||
cs.Logger.Error("Error publishing timeout wait", "err", err)
|
||||
}
|
||||
cs.enterPrecommit(ti.Height, ti.Round)
|
||||
case cstypes.RoundStepPrecommitWait:
|
||||
cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent())
|
||||
if err := cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()); err != nil {
|
||||
cs.Logger.Error("Error publishing timeout wait", "err", err)
|
||||
}
|
||||
cs.enterPrecommit(ti.Height, ti.Round)
|
||||
cs.enterNewRound(ti.Height, ti.Round+1)
|
||||
default:
|
||||
@@ -933,7 +949,9 @@ func (cs *State) enterNewRound(height int64, round int32) {
|
||||
cs.Votes.SetRound(tmmath.SafeAddInt32(round, 1)) // also track next round (round+1) to allow round-skipping
|
||||
cs.TriggeredTimeoutPrecommit = false
|
||||
|
||||
cs.eventBus.PublishEventNewRound(cs.NewRoundEvent())
|
||||
if err := cs.eventBus.PublishEventNewRound(cs.NewRoundEvent()); err != nil {
|
||||
cs.Logger.Error("Error publishing new round", "err", err)
|
||||
}
|
||||
cs.metrics.Rounds.Set(float64(round))
|
||||
|
||||
// Wait for txs to be available in the mempool
|
||||
@@ -1058,7 +1076,9 @@ func (cs *State) defaultDecideProposal(height int64, round int32) {
|
||||
|
||||
// Flush the WAL. Otherwise, we may not recompute the same proposal to sign,
|
||||
// and the privValidator will refuse to sign anything.
|
||||
cs.wal.FlushAndSync()
|
||||
if err := cs.wal.FlushAndSync(); err != nil {
|
||||
cs.Logger.Error("Error flushing to disk")
|
||||
}
|
||||
|
||||
// Make proposal
|
||||
propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()}
|
||||
@@ -1269,7 +1289,9 @@ func (cs *State) enterPrecommit(height int64, round int32) {
|
||||
}
|
||||
|
||||
// At this point +2/3 prevoted for a particular block or nil.
|
||||
cs.eventBus.PublishEventPolka(cs.RoundStateEvent())
|
||||
if err := cs.eventBus.PublishEventPolka(cs.RoundStateEvent()); err != nil {
|
||||
cs.Logger.Error("Error publishing polka", "err", err)
|
||||
}
|
||||
|
||||
// the latest POLRound should be this round.
|
||||
polRound, _ := cs.Votes.POLInfo()
|
||||
@@ -1286,7 +1308,9 @@ func (cs *State) enterPrecommit(height int64, round int32) {
|
||||
cs.LockedRound = -1
|
||||
cs.LockedBlock = nil
|
||||
cs.LockedBlockParts = nil
|
||||
cs.eventBus.PublishEventUnlock(cs.RoundStateEvent())
|
||||
if err := cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()); err != nil {
|
||||
cs.Logger.Error("Error publishing event unlock", "err", err)
|
||||
}
|
||||
}
|
||||
cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{})
|
||||
return
|
||||
@@ -1298,7 +1322,9 @@ func (cs *State) enterPrecommit(height int64, round int32) {
|
||||
if cs.LockedBlock.HashesTo(blockID.Hash) {
|
||||
logger.Info("enterPrecommit: +2/3 prevoted locked block. Relocking")
|
||||
cs.LockedRound = round
|
||||
cs.eventBus.PublishEventRelock(cs.RoundStateEvent())
|
||||
if err := cs.eventBus.PublishEventRelock(cs.RoundStateEvent()); err != nil {
|
||||
cs.Logger.Error("Error publishing event relock", "err", err)
|
||||
}
|
||||
cs.signAddVote(tmproto.PrecommitType, blockID.Hash, blockID.PartSetHeader)
|
||||
return
|
||||
}
|
||||
@@ -1313,7 +1339,9 @@ func (cs *State) enterPrecommit(height int64, round int32) {
|
||||
cs.LockedRound = round
|
||||
cs.LockedBlock = cs.ProposalBlock
|
||||
cs.LockedBlockParts = cs.ProposalBlockParts
|
||||
cs.eventBus.PublishEventLock(cs.RoundStateEvent())
|
||||
if err := cs.eventBus.PublishEventLock(cs.RoundStateEvent()); err != nil {
|
||||
cs.Logger.Error("Error publishing event lock", "err", err)
|
||||
}
|
||||
cs.signAddVote(tmproto.PrecommitType, blockID.Hash, blockID.PartSetHeader)
|
||||
return
|
||||
}
|
||||
@@ -1329,7 +1357,9 @@ func (cs *State) enterPrecommit(height int64, round int32) {
|
||||
cs.ProposalBlock = nil
|
||||
cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader)
|
||||
}
|
||||
cs.eventBus.PublishEventUnlock(cs.RoundStateEvent())
|
||||
if err := cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()); err != nil {
|
||||
cs.Logger.Error("Error publishing event unlock", "err", err)
|
||||
}
|
||||
cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{})
|
||||
}
|
||||
|
||||
@@ -1415,7 +1445,9 @@ func (cs *State) enterCommit(height int64, commitRound int32) {
|
||||
// Set up ProposalBlockParts and keep waiting.
|
||||
cs.ProposalBlock = nil
|
||||
cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader)
|
||||
cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent())
|
||||
if err := cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()); err != nil {
|
||||
cs.Logger.Error("Error publishing valid block", "err", err)
|
||||
}
|
||||
cs.evsw.FireEvent(types.EventValidBlock, &cs.RoundState)
|
||||
}
|
||||
// else {
|
||||
@@ -1756,7 +1788,9 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (add
|
||||
cs.ProposalBlock = block
|
||||
// NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal
|
||||
cs.Logger.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash())
|
||||
cs.eventBus.PublishEventCompleteProposal(cs.CompleteProposalEvent())
|
||||
if err := cs.eventBus.PublishEventCompleteProposal(cs.CompleteProposalEvent()); err != nil {
|
||||
cs.Logger.Error("Error publishing event complete proposal", "err", err)
|
||||
}
|
||||
|
||||
// Update Valid* if we can.
|
||||
prevotes := cs.Votes.Prevotes(cs.Round)
|
||||
@@ -1872,7 +1906,9 @@ func (cs *State) addVote(
|
||||
}
|
||||
|
||||
cs.Logger.Info(fmt.Sprintf("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
|
||||
cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote})
|
||||
if err := cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}); err != nil {
|
||||
return added, err
|
||||
}
|
||||
cs.evsw.FireEvent(types.EventVote, vote)
|
||||
|
||||
// if we can skip timeoutCommit and have all the votes now,
|
||||
@@ -1899,7 +1935,9 @@ func (cs *State) addVote(
|
||||
return
|
||||
}
|
||||
|
||||
cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote})
|
||||
if err := cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}); err != nil {
|
||||
return added, err
|
||||
}
|
||||
cs.evsw.FireEvent(types.EventVote, vote)
|
||||
|
||||
switch vote.Type {
|
||||
@@ -1925,7 +1963,9 @@ func (cs *State) addVote(
|
||||
cs.LockedRound = -1
|
||||
cs.LockedBlock = nil
|
||||
cs.LockedBlockParts = nil
|
||||
cs.eventBus.PublishEventUnlock(cs.RoundStateEvent())
|
||||
if err := cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()); err != nil {
|
||||
return added, err
|
||||
}
|
||||
}
|
||||
|
||||
// Update Valid* if we can.
|
||||
@@ -1949,7 +1989,9 @@ func (cs *State) addVote(
|
||||
cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader)
|
||||
}
|
||||
cs.evsw.FireEvent(types.EventValidBlock, &cs.RoundState)
|
||||
cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent())
|
||||
if err := cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()); err != nil {
|
||||
return added, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2009,7 +2051,9 @@ func (cs *State) signVote(
|
||||
) (*types.Vote, error) {
|
||||
// Flush the WAL. Otherwise, we may not recompute the same vote to sign,
|
||||
// and the privValidator will refuse to sign anything.
|
||||
cs.wal.FlushAndSync()
|
||||
if err := cs.wal.FlushAndSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cs.privValidatorPubKey == nil {
|
||||
return nil, errPubKeyIsNotSet
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
clist "github.com/tendermint/tendermint/libs/clist"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
@@ -229,14 +227,16 @@ func encodeMsg(evis []types.Evidence) ([]byte, error) {
|
||||
Evidence: evi,
|
||||
}
|
||||
|
||||
return proto.Marshal(&epl)
|
||||
return epl.Marshal()
|
||||
}
|
||||
|
||||
// decodemsg takes an array of bytes
|
||||
// returns an array of evidence
|
||||
func decodeMsg(bz []byte) (evis []types.Evidence, err error) {
|
||||
lm := ep.List{}
|
||||
proto.Unmarshal(bz, &lm)
|
||||
if err := lm.Unmarshal(bz); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
evis = make([]types.Evidence, len(lm.Evidence))
|
||||
for i := 0; i < len(lm.Evidence); i++ {
|
||||
|
||||
@@ -79,7 +79,7 @@ func OpenAutoFile(path string) (*AutoFile, error) {
|
||||
signal.Notify(af.hupc, syscall.SIGHUP)
|
||||
go func() {
|
||||
for range af.hupc {
|
||||
af.closeFile()
|
||||
_ = af.closeFile()
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -103,7 +103,7 @@ func (af *AutoFile) closeFileRoutine() {
|
||||
for {
|
||||
select {
|
||||
case <-af.closeTicker.C:
|
||||
af.closeFile()
|
||||
_ = af.closeFile()
|
||||
case <-af.closeTickerStopc:
|
||||
return
|
||||
}
|
||||
|
||||
@@ -17,7 +17,11 @@ import (
|
||||
func TestSIGHUP(t *testing.T) {
|
||||
origDir, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
defer os.Chdir(origDir)
|
||||
t.Cleanup(func() {
|
||||
if err := os.Chdir(origDir); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// First, create a temporary directory and move into it
|
||||
dir, err := ioutil.TempDir("", "sighup_test")
|
||||
|
||||
@@ -23,7 +23,10 @@ func parseFlags() (headPath string, chopSize int64, limitSize int64, version boo
|
||||
flagSet.StringVar(&chopSizeStr, "chop", "100M", "Move file if greater than this")
|
||||
flagSet.StringVar(&limitSizeStr, "limit", "10G", "Only keep this much (for each specified file). Remove old files.")
|
||||
flagSet.BoolVar(&version, "version", false, "Version")
|
||||
flagSet.Parse(os.Args[1:])
|
||||
if err := flagSet.Parse(os.Args[1:]); err != nil {
|
||||
fmt.Printf("err parsing flag: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
chopSize = parseBytesize(chopSizeStr)
|
||||
limitSize = parseBytesize(limitSizeStr)
|
||||
return
|
||||
|
||||
@@ -145,7 +145,9 @@ func (g *Group) OnStart() error {
|
||||
// NOTE: g.Head must be closed separately using Close.
|
||||
func (g *Group) OnStop() {
|
||||
g.ticker.Stop()
|
||||
g.FlushAndSync()
|
||||
if err := g.FlushAndSync(); err != nil {
|
||||
g.Logger.Error("Error flushin to disk", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until all internal goroutines are finished. Supposed to be
|
||||
@@ -157,7 +159,9 @@ func (g *Group) Wait() {
|
||||
|
||||
// Close closes the head file. The group must be stopped by this moment.
|
||||
func (g *Group) Close() {
|
||||
g.FlushAndSync()
|
||||
if err := g.FlushAndSync(); err != nil {
|
||||
g.Logger.Error("Error flushin to disk", "err", err)
|
||||
}
|
||||
|
||||
g.mtx.Lock()
|
||||
_ = g.Head.closeFile()
|
||||
|
||||
@@ -69,7 +69,7 @@ func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (s
|
||||
go func() {
|
||||
var buf bytes.Buffer
|
||||
// io.Copy will end when we call reader.Close() below
|
||||
io.Copy(&buf, reader)
|
||||
io.Copy(&buf, reader) //nolint:errcheck //ignore error
|
||||
stdC <- buf.String()
|
||||
}()
|
||||
return &stdC
|
||||
|
||||
@@ -77,6 +77,7 @@ func benchmarkRunnerKitlog(b *testing.B, logger kitlog.Logger, f func(kitlog.Log
|
||||
}
|
||||
}
|
||||
|
||||
//nolint: errcheck // ignore errors
|
||||
var (
|
||||
baseMessage = func(logger kitlog.Logger) { logger.Log("foo_key", "foo_value") }
|
||||
withMessage = func(logger kitlog.Logger) { kitlog.With(logger, "a", "b").Log("d", "f") }
|
||||
|
||||
@@ -74,16 +74,18 @@ func TestWriteFileAtomicDuplicateFile(t *testing.T) {
|
||||
// Defer here, in case there is a panic in WriteFileAtomic.
|
||||
defer os.Remove(fileToWrite)
|
||||
|
||||
require.Nil(t, err)
|
||||
f.WriteString(testString)
|
||||
WriteFileAtomic(fileToWrite, []byte(expectedString), 0777)
|
||||
require.NoError(t, err)
|
||||
_, err = f.WriteString(testString)
|
||||
require.NoError(t, err)
|
||||
err = WriteFileAtomic(fileToWrite, []byte(expectedString), 0777)
|
||||
require.NoError(t, err)
|
||||
// Check that the first atomic file was untouched
|
||||
firstAtomicFileBytes, err := ioutil.ReadFile(fname)
|
||||
require.Nil(t, err, "Error reading first atomic file")
|
||||
require.NoError(t, err, "Error reading first atomic file")
|
||||
require.Equal(t, []byte(testString), firstAtomicFileBytes, "First atomic file was overwritten")
|
||||
// Check that the resultant file is correct
|
||||
resultantFileBytes, err := ioutil.ReadFile(fileToWrite)
|
||||
require.Nil(t, err, "Error reading resultant file")
|
||||
require.NoError(t, err, "Error reading resultant file")
|
||||
require.Equal(t, []byte(expectedString), resultantFileBytes, "Written file had incorrect bytes")
|
||||
|
||||
// Check that the intermediate write file was deleted
|
||||
@@ -113,7 +115,8 @@ func TestWriteFileAtomicManyDuplicates(t *testing.T) {
|
||||
fname := "/tmp/" + atomicWriteFilePrefix + fileRand
|
||||
f, err := os.OpenFile(fname, atomicWriteFileFlag, 0777)
|
||||
require.Nil(t, err)
|
||||
f.WriteString(fmt.Sprintf(testString, i))
|
||||
_, err = f.WriteString(fmt.Sprintf(testString, i))
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(fname)
|
||||
}
|
||||
|
||||
@@ -121,7 +124,8 @@ func TestWriteFileAtomicManyDuplicates(t *testing.T) {
|
||||
// Defer here, in case there is a panic in WriteFileAtomic.
|
||||
defer os.Remove(fileToWrite)
|
||||
|
||||
WriteFileAtomic(fileToWrite, []byte(expectedString), 0777)
|
||||
err := WriteFileAtomic(fileToWrite, []byte(expectedString), 0777)
|
||||
require.NoError(t, err)
|
||||
// Check that all intermittent atomic file were untouched
|
||||
atomicWriteFileRand = defaultSeed
|
||||
for i := 0; i < atomicWriteFileMaxNumConflicts+2; i++ {
|
||||
|
||||
@@ -66,7 +66,9 @@ func ExampleClient_Update() {
|
||||
stdlog.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
c.Cleanup()
|
||||
if err := c.Cleanup(); err != nil {
|
||||
stdlog.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
@@ -135,7 +137,9 @@ func ExampleClient_VerifyLightBlockAtHeight() {
|
||||
stdlog.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
c.Cleanup()
|
||||
if err := c.Cleanup(); err != nil {
|
||||
stdlog.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = c.VerifyLightBlockAtHeight(3, time.Now())
|
||||
|
||||
@@ -55,7 +55,9 @@ func (c *Client) OnStart() error {
|
||||
|
||||
func (c *Client) OnStop() {
|
||||
if c.next.IsRunning() {
|
||||
c.next.Stop()
|
||||
if err := c.next.Stop(); err != nil {
|
||||
c.Logger.Error("Error stopping on next", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
44
node/node.go
44
node/node.go
@@ -1046,21 +1046,29 @@ func (n *Node) startRPC() ([]net.Listener, error) {
|
||||
rootHandler = corsMiddleware.Handler(mux)
|
||||
}
|
||||
if n.config.RPC.IsTLSEnabled() {
|
||||
go rpcserver.ServeTLS(
|
||||
listener,
|
||||
rootHandler,
|
||||
n.config.RPC.CertFile(),
|
||||
n.config.RPC.KeyFile(),
|
||||
rpcLogger,
|
||||
config,
|
||||
)
|
||||
go func() {
|
||||
if err := rpcserver.ServeTLS(
|
||||
listener,
|
||||
rootHandler,
|
||||
n.config.RPC.CertFile(),
|
||||
n.config.RPC.KeyFile(),
|
||||
rpcLogger,
|
||||
config,
|
||||
); err != nil {
|
||||
n.Logger.Error("Error serving server with TLS", "err", err)
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
go rpcserver.Serve(
|
||||
listener,
|
||||
rootHandler,
|
||||
rpcLogger,
|
||||
config,
|
||||
)
|
||||
go func() {
|
||||
if err := rpcserver.Serve(
|
||||
listener,
|
||||
rootHandler,
|
||||
rpcLogger,
|
||||
config,
|
||||
); err != nil {
|
||||
n.Logger.Error("Error serving server", "err", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
listeners[i] = listener
|
||||
@@ -1084,11 +1092,17 @@ func (n *Node) startRPC() ([]net.Listener, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go grpccore.StartGRPCServer(listener)
|
||||
go func() {
|
||||
if err := grpccore.StartGRPCServer(listener); err != nil {
|
||||
n.Logger.Error("Error starting gRPC server", "err", err)
|
||||
}
|
||||
}()
|
||||
listeners = append(listeners, listener)
|
||||
|
||||
}
|
||||
|
||||
return listeners, nil
|
||||
|
||||
}
|
||||
|
||||
// startPrometheusServer starts a Prometheus HTTP server, listening for metrics
|
||||
|
||||
@@ -336,7 +336,9 @@ func (c *MConnection) _recover() {
|
||||
}
|
||||
|
||||
func (c *MConnection) stopForError(r interface{}) {
|
||||
c.Stop()
|
||||
if err := c.Stop(); err != nil {
|
||||
c.Logger.Error("Error stopping connection", "err", err)
|
||||
}
|
||||
if atomic.CompareAndSwapUint32(&c.errored, 0, 1) {
|
||||
if c.onError != nil {
|
||||
c.onError(r)
|
||||
|
||||
@@ -36,11 +36,13 @@ func NewPeer(ip net.IP) *Peer {
|
||||
kv: make(map[string]interface{}),
|
||||
}
|
||||
mp.BaseService = service.NewBaseService(nil, "MockPeer", mp)
|
||||
mp.Start()
|
||||
if err := mp.Start(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return mp
|
||||
}
|
||||
|
||||
func (mp *Peer) FlushStop() { mp.Stop() }
|
||||
func (mp *Peer) FlushStop() { mp.Stop() } //nolint:errcheck //ignore error
|
||||
func (mp *Peer) TrySend(chID byte, msgBytes []byte) bool { return true }
|
||||
func (mp *Peer) Send(chID byte, msgBytes []byte) bool { return true }
|
||||
func (mp *Peer) NodeInfo() p2p.NodeInfo {
|
||||
|
||||
@@ -18,7 +18,7 @@ type mockPeer struct {
|
||||
id ID
|
||||
}
|
||||
|
||||
func (mp *mockPeer) FlushStop() { mp.Stop() }
|
||||
func (mp *mockPeer) FlushStop() { mp.Stop() } //nolint:errcheck // ignore error
|
||||
func (mp *mockPeer) TrySend(chID byte, msgBytes []byte) bool { return true }
|
||||
func (mp *mockPeer) Send(chID byte, msgBytes []byte) bool { return true }
|
||||
func (mp *mockPeer) NodeInfo() NodeInfo { return DefaultNodeInfo{} }
|
||||
|
||||
@@ -110,7 +110,7 @@ type addrBook struct {
|
||||
|
||||
func newHashKey() []byte {
|
||||
result := make([]byte, highwayhash.Size)
|
||||
crand.Read(result)
|
||||
crand.Read(result) //nolint:errcheck // ignore error
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -328,7 +328,9 @@ func (a *addrBook) MarkGood(id p2p.ID) {
|
||||
}
|
||||
ka.markGood()
|
||||
if ka.isNew() {
|
||||
a.moveToOld(ka)
|
||||
if err := a.moveToOld(ka); err != nil {
|
||||
a.Logger.Error("Error moving address to old", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -373,7 +375,9 @@ func (a *addrBook) ReinstateBadPeers() {
|
||||
continue
|
||||
}
|
||||
|
||||
a.addToNewBucket(ka, bucket)
|
||||
if err := a.addToNewBucket(ka, bucket); err != nil {
|
||||
a.Logger.Error("Error adding peer to new bucket", "err", err)
|
||||
}
|
||||
delete(a.badPeers, ka.ID())
|
||||
|
||||
a.Logger.Info("Reinstated address", "addr", ka.Addr)
|
||||
@@ -779,7 +783,9 @@ func (a *addrBook) moveToOld(ka *knownAddress) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.addToNewBucket(oldest, newBucketIdx)
|
||||
if err := a.addToNewBucket(oldest, newBucketIdx); err != nil {
|
||||
a.Logger.Error("Error adding peer to old bucket", "err", err)
|
||||
}
|
||||
|
||||
// Finally, add our ka to old bucket again.
|
||||
added = a.addToOldBucket(ka, oldBucketIdx)
|
||||
@@ -935,6 +941,6 @@ func (a *addrBook) hash(b []byte) ([]byte, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hasher.Write(b)
|
||||
hasher.Write(b) //nolint:errcheck // ignore error
|
||||
return hasher.Sum(nil), nil
|
||||
}
|
||||
|
||||
@@ -171,7 +171,9 @@ func (r *Reactor) OnStart() error {
|
||||
|
||||
// OnStop implements BaseService
|
||||
func (r *Reactor) OnStop() {
|
||||
r.book.Stop()
|
||||
if err := r.book.Stop(); err != nil {
|
||||
r.Logger.Error("Error stopping address book", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// GetChannels implements Reactor
|
||||
|
||||
@@ -29,7 +29,7 @@ func (ni mockNodeInfo) Validate() error { return nil }
|
||||
func (ni mockNodeInfo) CompatibleWith(other NodeInfo) error { return nil }
|
||||
|
||||
func AddPeerToSwitchPeerSet(sw *Switch, peer Peer) {
|
||||
sw.peers.Add(peer)
|
||||
sw.peers.Add(peer) //nolint:errcheck // ignore error
|
||||
}
|
||||
|
||||
func CreateRandomPeer(outbound bool) Peer {
|
||||
|
||||
@@ -35,7 +35,11 @@ func TestHeaderEvents(t *testing.T) {
|
||||
// if so, then we start it, listen, and stop it.
|
||||
err := c.Start()
|
||||
require.Nil(t, err, "%d: %+v", i, err)
|
||||
defer c.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := c.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
evtTyp := types.EventNewBlockHeader
|
||||
@@ -59,14 +63,22 @@ func TestBlockEvents(t *testing.T) {
|
||||
// if so, then we start it, listen, and stop it.
|
||||
err := c.Start()
|
||||
require.Nil(t, err)
|
||||
defer c.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := c.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
const subscriber = "TestBlockEvents"
|
||||
|
||||
eventCh, err := c.Subscribe(context.Background(), subscriber, types.QueryForEvent(types.EventNewBlock).String())
|
||||
require.NoError(t, err)
|
||||
defer c.UnsubscribeAll(context.Background(), subscriber)
|
||||
t.Cleanup(func() {
|
||||
if err := c.UnsubscribeAll(context.Background(), subscriber); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
var firstBlockHeight int64
|
||||
for i := int64(0); i < 3; i++ {
|
||||
@@ -99,7 +111,11 @@ func testTxEventsSent(t *testing.T, broadcastMethod string) {
|
||||
// if so, then we start it, listen, and stop it.
|
||||
err := c.Start()
|
||||
require.Nil(t, err)
|
||||
defer c.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := c.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// make the tx
|
||||
|
||||
@@ -68,7 +68,11 @@ func WaitForOneEvent(c EventsClient, evtTyp string, timeout time.Duration) (type
|
||||
return nil, fmt.Errorf("failed to subscribe: %w", err)
|
||||
}
|
||||
// make sure to unregister after the test is over
|
||||
defer c.UnsubscribeAll(ctx, subscriber)
|
||||
defer func() {
|
||||
if deferErr := c.UnsubscribeAll(ctx, subscriber); deferErr != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case event := <-eventCh:
|
||||
|
||||
@@ -50,7 +50,9 @@ type Local struct {
|
||||
// don't run in parallel, or try to simulate an entire network in
|
||||
// one process...
|
||||
func New(node *nm.Node) *Local {
|
||||
node.ConfigureRPC()
|
||||
if err := node.ConfigureRPC(); err != nil {
|
||||
node.Logger.Error("Error configuring RPC", "err", err)
|
||||
}
|
||||
return &Local{
|
||||
EventBus: node.EventBus(),
|
||||
Logger: log.NewNopLogger(),
|
||||
|
||||
@@ -71,7 +71,11 @@ func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadc
|
||||
env.Logger.Error("Error on broadcast_tx_commit", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
defer env.EventBus.Unsubscribe(context.Background(), subscriber, q)
|
||||
defer func() {
|
||||
if err := env.EventBus.Unsubscribe(context.Background(), subscriber, q); err != nil {
|
||||
env.Logger.Error("Error unsubscribing from eventBus", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Broadcast tx and wait for CheckTx result
|
||||
checkTxResCh := make(chan *abci.Response, 1)
|
||||
|
||||
@@ -125,7 +125,11 @@ func setup() {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
go server.Serve(listener1, mux, tcpLogger, config)
|
||||
go func() {
|
||||
if err := server.Serve(listener1, mux, tcpLogger, config); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
unixLogger := logger.With("socket", "unix")
|
||||
mux2 := http.NewServeMux()
|
||||
@@ -137,7 +141,11 @@ func setup() {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
go server.Serve(listener2, mux2, unixLogger, config)
|
||||
go func() {
|
||||
if err := server.Serve(listener2, mux2, unixLogger, config); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// wait for servers to start
|
||||
time.Sleep(time.Second * 2)
|
||||
|
||||
@@ -85,7 +85,11 @@ func TestServeTLS(t *testing.T) {
|
||||
fmt.Fprint(w, "some body")
|
||||
})
|
||||
|
||||
go ServeTLS(ln, mux, "test.crt", "test.key", log.TestingLogger(), DefaultConfig())
|
||||
go func() {
|
||||
if err := ServeTLS(ln, mux, "test.crt", "test.key", log.TestingLogger(), DefaultConfig()); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
}()
|
||||
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
|
||||
@@ -306,7 +306,9 @@ func (wsc *wsConnection) readRoutine() {
|
||||
err = fmt.Errorf("WSJSONRPC: %v", r)
|
||||
}
|
||||
wsc.Logger.Error("Panic in WSJSONRPC handler", "err", err, "stack", string(debug.Stack()))
|
||||
wsc.WriteRPCResponse(writeCtx, types.RPCInternalError(types.JSONRPCIntID(-1), err))
|
||||
if err := wsc.WriteRPCResponse(writeCtx, types.RPCInternalError(types.JSONRPCIntID(-1), err)); err != nil {
|
||||
wsc.Logger.Error("Error writing RPC response", "err", err)
|
||||
}
|
||||
go wsc.readRoutine()
|
||||
}
|
||||
}()
|
||||
@@ -332,7 +334,9 @@ func (wsc *wsConnection) readRoutine() {
|
||||
} else {
|
||||
wsc.Logger.Error("Failed to read request", "err", err)
|
||||
}
|
||||
wsc.Stop()
|
||||
if err := wsc.Stop(); err != nil {
|
||||
wsc.Logger.Error("Error closing websocket connection", "err", err)
|
||||
}
|
||||
close(wsc.readRoutineQuit)
|
||||
return
|
||||
}
|
||||
@@ -341,7 +345,10 @@ func (wsc *wsConnection) readRoutine() {
|
||||
var request types.RPCRequest
|
||||
err = dec.Decode(&request)
|
||||
if err != nil {
|
||||
wsc.WriteRPCResponse(writeCtx, types.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err)))
|
||||
if err := wsc.WriteRPCResponse(writeCtx,
|
||||
types.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err))); err != nil {
|
||||
wsc.Logger.Error("Error writing RPC response", "err", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -358,7 +365,9 @@ func (wsc *wsConnection) readRoutine() {
|
||||
// Now, fetch the RPCFunc and execute it.
|
||||
rpcFunc := wsc.funcMap[request.Method]
|
||||
if rpcFunc == nil {
|
||||
wsc.WriteRPCResponse(writeCtx, types.RPCMethodNotFoundError(request.ID))
|
||||
if err := wsc.WriteRPCResponse(writeCtx, types.RPCMethodNotFoundError(request.ID)); err != nil {
|
||||
wsc.Logger.Error("Error writing RPC response", "err", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -367,9 +376,11 @@ func (wsc *wsConnection) readRoutine() {
|
||||
if len(request.Params) > 0 {
|
||||
fnArgs, err := jsonParamsToArgs(rpcFunc, request.Params)
|
||||
if err != nil {
|
||||
wsc.WriteRPCResponse(writeCtx,
|
||||
if err := wsc.WriteRPCResponse(writeCtx,
|
||||
types.RPCInternalError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)),
|
||||
)
|
||||
); err != nil {
|
||||
wsc.Logger.Error("Error writing RPC response", "err", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
args = append(args, fnArgs...)
|
||||
@@ -382,11 +393,15 @@ func (wsc *wsConnection) readRoutine() {
|
||||
|
||||
result, err := unreflectResult(returns)
|
||||
if err != nil {
|
||||
wsc.WriteRPCResponse(writeCtx, types.RPCInternalError(request.ID, err))
|
||||
if err := wsc.WriteRPCResponse(writeCtx, types.RPCInternalError(request.ID, err)); err != nil {
|
||||
wsc.Logger.Error("Error writing RPC response", "err", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
wsc.WriteRPCResponse(writeCtx, types.NewRPCSuccessResponse(request.ID, result))
|
||||
if err := wsc.WriteRPCResponse(writeCtx, types.NewRPCSuccessResponse(request.ID, result)); err != nil {
|
||||
wsc.Logger.Error("Error writing RPC response", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -440,12 +455,12 @@ func (wsc *wsConnection) writeRoutine() {
|
||||
wsc.Logger.Error("Can't get NextWriter", "err", err)
|
||||
return
|
||||
}
|
||||
w.Write(jsonBytes)
|
||||
w.Write(jsonBytes) //nolint:errcheck //ignore error
|
||||
|
||||
// Add queued messages to the current websocket message.
|
||||
n := len(wsc.writeChan)
|
||||
for i := 0; i < n; i++ {
|
||||
w.Write(newline)
|
||||
w.Write(newline) //nolint:errcheck //ignore error
|
||||
|
||||
msg = <-wsc.writeChan
|
||||
jsonBytes, err = json.MarshalIndent(msg, "", " ")
|
||||
@@ -453,7 +468,7 @@ func (wsc *wsConnection) writeRoutine() {
|
||||
wsc.Logger.Error("Failed to marshal RPCResponse to JSON", "err", err)
|
||||
continue
|
||||
}
|
||||
w.Write(jsonBytes)
|
||||
w.Write(jsonBytes) //nolint:errcheck //ignore error
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
|
||||
@@ -473,39 +473,49 @@ func fireEvents(
|
||||
abciResponses *tmstate.ABCIResponses,
|
||||
validatorUpdates []*types.Validator,
|
||||
) {
|
||||
eventBus.PublishEventNewBlock(types.EventDataNewBlock{
|
||||
if err := eventBus.PublishEventNewBlock(types.EventDataNewBlock{
|
||||
Block: block,
|
||||
ResultBeginBlock: *abciResponses.BeginBlock,
|
||||
ResultEndBlock: *abciResponses.EndBlock,
|
||||
})
|
||||
eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{
|
||||
}); err != nil {
|
||||
logger.Error("Error publishing new block", "err", err)
|
||||
}
|
||||
if err := eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{
|
||||
Header: block.Header,
|
||||
NumTxs: int64(len(block.Txs)),
|
||||
ResultBeginBlock: *abciResponses.BeginBlock,
|
||||
ResultEndBlock: *abciResponses.EndBlock,
|
||||
})
|
||||
}); err != nil {
|
||||
logger.Error("Error publishing new block header", "err", err)
|
||||
}
|
||||
|
||||
if len(block.Evidence.Evidence) != 0 {
|
||||
for _, ev := range block.Evidence.Evidence {
|
||||
eventBus.PublishEventNewEvidence(types.EventDataNewEvidence{
|
||||
if err := eventBus.PublishEventNewEvidence(types.EventDataNewEvidence{
|
||||
Evidence: ev,
|
||||
Height: block.Height,
|
||||
})
|
||||
}); err != nil {
|
||||
logger.Error("Error publishing new evidence", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i, tx := range block.Data.Txs {
|
||||
eventBus.PublishEventTx(types.EventDataTx{TxResult: abci.TxResult{
|
||||
if err := eventBus.PublishEventTx(types.EventDataTx{TxResult: abci.TxResult{
|
||||
Height: block.Height,
|
||||
Index: uint32(i),
|
||||
Tx: tx,
|
||||
Result: *(abciResponses.DeliverTxs[i]),
|
||||
}})
|
||||
}}); err != nil {
|
||||
logger.Error("Error publishing event TX", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(validatorUpdates) > 0 {
|
||||
eventBus.PublishEventValidatorSetUpdates(
|
||||
types.EventDataValidatorSetUpdates{ValidatorUpdates: validatorUpdates})
|
||||
if err := eventBus.PublishEventValidatorSetUpdates(
|
||||
types.EventDataValidatorSetUpdates{ValidatorUpdates: validatorUpdates}); err != nil {
|
||||
logger.Error("Error publishing event", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -30,9 +30,9 @@ type snapshot struct {
|
||||
func (s *snapshot) Key() snapshotKey {
|
||||
// Hash.Write() never returns an error.
|
||||
hasher := sha256.New()
|
||||
hasher.Write([]byte(fmt.Sprintf("%v:%v:%v", s.Height, s.Format, s.Chunks)))
|
||||
hasher.Write(s.Hash)
|
||||
hasher.Write(s.Metadata)
|
||||
hasher.Write([]byte(fmt.Sprintf("%v:%v:%v", s.Height, s.Format, s.Chunks))) //nolint:errcheck // ignore error
|
||||
hasher.Write(s.Hash) //nolint:errcheck // ignore error
|
||||
hasher.Write(s.Metadata) //nolint:errcheck // ignore error
|
||||
var key snapshotKey
|
||||
copy(key[:], hasher.Sum(nil))
|
||||
return key
|
||||
|
||||
@@ -144,7 +144,10 @@ func extractKey(tmhome, outputPath string) {
|
||||
}
|
||||
|
||||
func main() {
|
||||
rootCmd.Parse(os.Args[1:])
|
||||
if err := rootCmd.Parse(os.Args[1:]); err != nil {
|
||||
fmt.Printf("Error parsing flags: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if rootCmd.NArg() == 0 || (rootCmd.NArg() == 1 && rootCmd.Arg(0) == "help") {
|
||||
rootCmd.Usage()
|
||||
os.Exit(0)
|
||||
@@ -166,10 +169,16 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
case "run":
|
||||
runCmd.Parse(os.Args[2:])
|
||||
if err := runCmd.Parse(os.Args[2:]); err != nil {
|
||||
fmt.Printf("Error parsing flags: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
runTestHarness(flagAcceptRetries, flagBindAddr, flagTMHome)
|
||||
case "extract_key":
|
||||
extractKeyCmd.Parse(os.Args[2:])
|
||||
if err := extractKeyCmd.Parse(os.Args[2:]); err != nil {
|
||||
fmt.Printf("Error parsing flags: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
extractKey(flagTMHome, flagKeyOutputPath)
|
||||
case "version":
|
||||
fmt.Println(version.Version)
|
||||
|
||||
Reference in New Issue
Block a user