mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-06 21:36:26 +00:00
service: change stop interface (#7816)
This commit is contained in:
@@ -235,9 +235,7 @@ func (pool *BlockPool) PopRequest() {
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
if r := pool.requesters[pool.height]; r != nil {
|
||||
if err := r.Stop(); err != nil {
|
||||
pool.logger.Error("error stopping requester", "err", err)
|
||||
}
|
||||
r.Stop()
|
||||
delete(pool.requesters, pool.height)
|
||||
pool.height++
|
||||
pool.lastAdvance = time.Now()
|
||||
@@ -676,9 +674,7 @@ OUTER_LOOP:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-bpr.pool.exitedCh:
|
||||
if err := bpr.Stop(); err != nil {
|
||||
bpr.logger.Error("error stopped requester", "err", err)
|
||||
}
|
||||
bpr.Stop()
|
||||
return
|
||||
case peerID := <-bpr.redoCh:
|
||||
if peerID == bpr.peerID {
|
||||
|
||||
@@ -187,9 +187,7 @@ func (r *Reactor) OnStart(ctx context.Context) error {
|
||||
// blocking until they all exit.
|
||||
func (r *Reactor) OnStop() {
|
||||
if r.blockSync.IsSet() {
|
||||
if err := r.pool.Stop(); err != nil {
|
||||
r.logger.Error("failed to stop pool", "err", err)
|
||||
}
|
||||
r.pool.Stop()
|
||||
}
|
||||
|
||||
// wait for the poolRoutine and requestRoutine goroutines to gracefully exit
|
||||
@@ -485,9 +483,7 @@ FOR_LOOP:
|
||||
continue
|
||||
}
|
||||
|
||||
if err := r.pool.Stop(); err != nil {
|
||||
r.logger.Error("failed to stop pool", "err", err)
|
||||
}
|
||||
r.pool.Stop()
|
||||
|
||||
r.blockSync.UnSet()
|
||||
|
||||
|
||||
@@ -915,15 +915,9 @@ type mockTicker struct {
|
||||
fired bool
|
||||
}
|
||||
|
||||
func (m *mockTicker) Start(context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockTicker) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockTicker) IsRunning() bool { return false }
|
||||
func (m *mockTicker) Start(context.Context) error { return nil }
|
||||
func (m *mockTicker) Stop() {}
|
||||
func (m *mockTicker) IsRunning() bool { return false }
|
||||
|
||||
func (m *mockTicker) ScheduleTimeout(ti timeoutInfo) {
|
||||
m.mtx.Lock()
|
||||
@@ -941,8 +935,6 @@ func (m *mockTicker) Chan() <-chan timeoutInfo {
|
||||
return m.c
|
||||
}
|
||||
|
||||
func (*mockTicker) SetLogger(log.Logger) {}
|
||||
|
||||
func newPersistentKVStore(t *testing.T, logger log.Logger) abci.Application {
|
||||
t.Helper()
|
||||
|
||||
|
||||
@@ -219,11 +219,7 @@ func (r *Reactor) OnStart(ctx context.Context) error {
|
||||
func (r *Reactor) OnStop() {
|
||||
r.unsubscribeFromBroadcastEvents()
|
||||
|
||||
if err := r.state.Stop(); err != nil {
|
||||
if !errors.Is(err, service.ErrAlreadyStopped) {
|
||||
r.logger.Error("failed to stop consensus state", "err", err)
|
||||
}
|
||||
}
|
||||
r.state.Stop()
|
||||
|
||||
if !r.WaitSync() {
|
||||
r.state.Wait()
|
||||
|
||||
@@ -142,9 +142,7 @@ func newPlayback(fileName string, fp *os.File, cs *State, genState sm.State) *pl
|
||||
|
||||
// go back count steps by resetting the state and running (pb.count - count) steps
|
||||
func (pb *playback) replayReset(ctx context.Context, count int, newStepSub eventbus.Subscription) error {
|
||||
if err := pb.cs.Stop(); err != nil {
|
||||
return err
|
||||
}
|
||||
pb.cs.Stop()
|
||||
pb.cs.Wait()
|
||||
|
||||
newCS := NewState(ctx, pb.cs.logger, pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec,
|
||||
|
||||
@@ -79,9 +79,7 @@ func startNewStateAndWaitForBlock(ctx context.Context, t *testing.T, consensusRe
|
||||
|
||||
require.NoError(t, cs.Start(ctx))
|
||||
defer func() {
|
||||
if err := cs.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
cs.Stop()
|
||||
}()
|
||||
t.Cleanup(cs.Wait)
|
||||
// This is just a signal that we haven't halted; its not something contained
|
||||
@@ -208,7 +206,7 @@ LOOP:
|
||||
startNewStateAndWaitForBlock(ctx, t, consensusReplayConfig, cs.Height, blockDB, stateStore)
|
||||
|
||||
// stop consensus state and transactions sender (initFn)
|
||||
cs.Stop() //nolint:errcheck // Logging this error causes failure
|
||||
cs.Stop()
|
||||
cancel()
|
||||
|
||||
// if we reached the required height, exit
|
||||
@@ -292,7 +290,7 @@ func (w *crashingWAL) SearchForEndHeight(
|
||||
}
|
||||
|
||||
func (w *crashingWAL) Start(ctx context.Context) error { return w.next.Start(ctx) }
|
||||
func (w *crashingWAL) Stop() error { return w.next.Stop() }
|
||||
func (w *crashingWAL) Stop() { w.next.Stop() }
|
||||
func (w *crashingWAL) Wait() { w.next.Wait() }
|
||||
|
||||
//------------------------------------------------------------------------------------------
|
||||
|
||||
@@ -396,10 +396,7 @@ func (cs *State) OnStart(ctx context.Context) error {
|
||||
cs.logger.Error("the WAL file is corrupted; attempting repair", "err", err)
|
||||
|
||||
// 1) prep work
|
||||
if err := cs.wal.Stop(); err != nil {
|
||||
|
||||
return err
|
||||
}
|
||||
cs.wal.Stop()
|
||||
|
||||
repairAttempted = true
|
||||
|
||||
@@ -494,19 +491,11 @@ func (cs *State) OnStop() {
|
||||
close(cs.onStopCh)
|
||||
|
||||
if cs.evsw.IsRunning() {
|
||||
if err := cs.evsw.Stop(); err != nil {
|
||||
if !errors.Is(err, service.ErrAlreadyStopped) {
|
||||
cs.logger.Error("failed trying to stop eventSwitch", "error", err)
|
||||
}
|
||||
}
|
||||
cs.evsw.Stop()
|
||||
}
|
||||
|
||||
if cs.timeoutTicker.IsRunning() {
|
||||
if err := cs.timeoutTicker.Stop(); err != nil {
|
||||
if !errors.Is(err, service.ErrAlreadyStopped) {
|
||||
cs.logger.Error("failed trying to stop timeoutTicket", "error", err)
|
||||
}
|
||||
}
|
||||
cs.timeoutTicker.Stop()
|
||||
}
|
||||
// WAL is stopped in receiveRoutine.
|
||||
}
|
||||
@@ -515,6 +504,7 @@ func (cs *State) OnStop() {
|
||||
// NOTE: be sure to Stop() the event switch and drain
|
||||
// any event channels or this may deadlock
|
||||
func (cs *State) Wait() {
|
||||
cs.evsw.Wait()
|
||||
<-cs.done
|
||||
}
|
||||
|
||||
@@ -840,12 +830,7 @@ func (cs *State) receiveRoutine(ctx context.Context, maxSteps int) {
|
||||
// priv_val tracks LastSig
|
||||
|
||||
// close wal now that we're done writing to it
|
||||
if err := cs.wal.Stop(); err != nil {
|
||||
if !errors.Is(err, service.ErrAlreadyStopped) {
|
||||
cs.logger.Error("failed trying to stop WAL", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
cs.wal.Stop()
|
||||
cs.wal.Wait()
|
||||
close(cs.done)
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ var (
|
||||
// The timeoutInfo.Duration may be non-positive.
|
||||
type TimeoutTicker interface {
|
||||
Start(context.Context) error
|
||||
Stop() error
|
||||
Stop()
|
||||
IsRunning() bool
|
||||
Chan() <-chan timeoutInfo // on which to receive a timeout
|
||||
ScheduleTimeout(ti timeoutInfo) // reset the timer
|
||||
|
||||
@@ -67,7 +67,7 @@ type WAL interface {
|
||||
|
||||
// service methods
|
||||
Start(context.Context) error
|
||||
Stop() error
|
||||
Stop()
|
||||
Wait()
|
||||
}
|
||||
|
||||
@@ -164,15 +164,9 @@ func (wal *BaseWAL) FlushAndSync() error {
|
||||
func (wal *BaseWAL) OnStop() {
|
||||
wal.flushTicker.Stop()
|
||||
if err := wal.FlushAndSync(); err != nil {
|
||||
if !errors.Is(err, service.ErrAlreadyStopped) {
|
||||
wal.logger.Error("error on flush data to disk", "error", err)
|
||||
}
|
||||
}
|
||||
if err := wal.group.Stop(); err != nil {
|
||||
if !errors.Is(err, service.ErrAlreadyStopped) {
|
||||
wal.logger.Error("error trying to stop wal", "error", err)
|
||||
}
|
||||
wal.logger.Error("error on flush data to disk", "error", err)
|
||||
}
|
||||
wal.group.Stop()
|
||||
wal.group.Close()
|
||||
}
|
||||
|
||||
@@ -438,5 +432,5 @@ func (nilWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (rd io
|
||||
return nil, false, nil
|
||||
}
|
||||
func (nilWAL) Start(context.Context) error { return nil }
|
||||
func (nilWAL) Stop() error { return nil }
|
||||
func (nilWAL) Stop() {}
|
||||
func (nilWAL) Wait() {}
|
||||
|
||||
@@ -102,14 +102,10 @@ func WALGenerateNBlocks(ctx context.Context, t *testing.T, logger log.Logger, wr
|
||||
|
||||
select {
|
||||
case <-numBlocksWritten:
|
||||
if err := consensusState.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
consensusState.Stop()
|
||||
return nil
|
||||
case <-time.After(1 * time.Minute):
|
||||
if err := consensusState.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
consensusState.Stop()
|
||||
return fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks)
|
||||
}
|
||||
}
|
||||
@@ -219,5 +215,5 @@ func (w *byteBufferWAL) SearchForEndHeight(
|
||||
}
|
||||
|
||||
func (w *byteBufferWAL) Start(context.Context) error { return nil }
|
||||
func (w *byteBufferWAL) Stop() error { return nil }
|
||||
func (w *byteBufferWAL) Stop() {}
|
||||
func (w *byteBufferWAL) Wait() {}
|
||||
|
||||
@@ -3,7 +3,6 @@ package consensus
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"path/filepath"
|
||||
|
||||
"testing"
|
||||
@@ -17,7 +16,6 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/consensus/types"
|
||||
"github.com/tendermint/tendermint/internal/libs/autofile"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
tmtypes "github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -191,11 +189,7 @@ func TestWALPeriodicSync(t *testing.T) {
|
||||
|
||||
require.NoError(t, wal.Start(ctx))
|
||||
t.Cleanup(func() {
|
||||
if err := wal.Stop(); err != nil {
|
||||
if !errors.Is(err, service.ErrAlreadyStopped) {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
wal.Stop()
|
||||
wal.Wait()
|
||||
})
|
||||
|
||||
|
||||
@@ -110,7 +110,8 @@ func setup(ctx context.Context, t *testing.T, stateStores []sm.Store, chBuf uint
|
||||
t.Cleanup(func() {
|
||||
for _, r := range rts.reactors {
|
||||
if r.IsRunning() {
|
||||
require.NoError(t, r.Stop())
|
||||
r.Stop()
|
||||
r.Wait()
|
||||
require.False(t, r.IsRunning())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ func setupReactors(ctx context.Context, t *testing.T, numNodes int, chBuf uint)
|
||||
t.Cleanup(func() {
|
||||
for nodeID := range rts.reactors {
|
||||
if rts.reactors[nodeID].IsRunning() {
|
||||
require.NoError(t, rts.reactors[nodeID].Stop())
|
||||
rts.reactors[nodeID].Stop()
|
||||
rts.reactors[nodeID].Wait()
|
||||
require.False(t, rts.reactors[nodeID].IsRunning())
|
||||
}
|
||||
@@ -184,8 +184,7 @@ func TestReactorBroadcastDoesNotPanic(t *testing.T) {
|
||||
}()
|
||||
}
|
||||
|
||||
err := primaryReactor.Stop()
|
||||
require.NoError(t, err)
|
||||
primaryReactor.Stop()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
|
||||
@@ -294,9 +294,7 @@ func (c *MConnection) _recover(ctx context.Context) {
|
||||
}
|
||||
|
||||
func (c *MConnection) stopForError(ctx context.Context, r interface{}) {
|
||||
if err := c.Stop(); err != nil {
|
||||
c.logger.Error("error stopping connection", "err", err)
|
||||
}
|
||||
c.Stop()
|
||||
|
||||
if atomic.CompareAndSwapUint32(&c.errored, 0, 1) {
|
||||
if c.onError != nil {
|
||||
|
||||
@@ -208,7 +208,8 @@ func (n *Network) Remove(ctx context.Context, t *testing.T, id types.NodeID) {
|
||||
require.NoError(t, node.Transport.Close())
|
||||
node.cancel()
|
||||
if node.Router.IsRunning() {
|
||||
require.NoError(t, node.Router.Stop())
|
||||
node.Router.Stop()
|
||||
node.Router.Wait()
|
||||
}
|
||||
|
||||
for _, sub := range subs {
|
||||
@@ -275,7 +276,8 @@ func (n *Network) MakeNode(ctx context.Context, t *testing.T, opts NodeOptions)
|
||||
|
||||
t.Cleanup(func() {
|
||||
if router.IsRunning() {
|
||||
require.NoError(t, router.Stop())
|
||||
router.Stop()
|
||||
router.Wait()
|
||||
}
|
||||
require.NoError(t, transport.Close())
|
||||
cancel()
|
||||
|
||||
@@ -438,7 +438,7 @@ func TestRouter_AcceptPeers(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
require.NoError(t, router.Stop())
|
||||
router.Stop()
|
||||
mockTransport.AssertExpectations(t)
|
||||
mockConnection.AssertExpectations(t)
|
||||
})
|
||||
@@ -478,7 +478,7 @@ func TestRouter_AcceptPeers_Error(t *testing.T) {
|
||||
|
||||
require.NoError(t, router.Start(ctx))
|
||||
time.Sleep(time.Second)
|
||||
require.NoError(t, router.Stop())
|
||||
router.Stop()
|
||||
|
||||
mockTransport.AssertExpectations(t)
|
||||
}
|
||||
@@ -516,7 +516,7 @@ func TestRouter_AcceptPeers_ErrorEOF(t *testing.T) {
|
||||
|
||||
require.NoError(t, router.Start(ctx))
|
||||
time.Sleep(time.Second)
|
||||
require.NoError(t, router.Stop())
|
||||
router.Stop()
|
||||
|
||||
mockTransport.AssertExpectations(t)
|
||||
}
|
||||
@@ -573,7 +573,7 @@ func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) {
|
||||
close(closeCh)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
require.NoError(t, router.Stop())
|
||||
router.Stop()
|
||||
mockTransport.AssertExpectations(t)
|
||||
mockConnection.AssertExpectations(t)
|
||||
}
|
||||
@@ -687,7 +687,7 @@ func TestRouter_DialPeers(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
require.NoError(t, router.Stop())
|
||||
router.Stop()
|
||||
mockTransport.AssertExpectations(t)
|
||||
mockConnection.AssertExpectations(t)
|
||||
})
|
||||
@@ -778,7 +778,7 @@ func TestRouter_DialPeers_Parallel(t *testing.T) {
|
||||
close(closeCh)
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
require.NoError(t, router.Stop())
|
||||
router.Stop()
|
||||
mockTransport.AssertExpectations(t)
|
||||
mockConnection.AssertExpectations(t)
|
||||
}
|
||||
@@ -845,7 +845,7 @@ func TestRouter_EvictPeers(t *testing.T) {
|
||||
Status: p2p.PeerStatusDown,
|
||||
})
|
||||
|
||||
require.NoError(t, router.Stop())
|
||||
router.Stop()
|
||||
mockTransport.AssertExpectations(t)
|
||||
mockConnection.AssertExpectations(t)
|
||||
}
|
||||
@@ -895,7 +895,7 @@ func TestRouter_ChannelCompatability(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, router.Start(ctx))
|
||||
time.Sleep(1 * time.Second)
|
||||
require.NoError(t, router.Stop())
|
||||
router.Stop()
|
||||
require.Empty(t, peerManager.Peers())
|
||||
|
||||
mockConnection.AssertExpectations(t)
|
||||
@@ -964,6 +964,6 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) {
|
||||
Message: &p2ptest.Message{Value: "Hi"},
|
||||
}))
|
||||
|
||||
require.NoError(t, router.Stop())
|
||||
router.Stop()
|
||||
mockTransport.AssertExpectations(t)
|
||||
}
|
||||
|
||||
@@ -478,12 +478,13 @@ func (c *mConnConnection) RemoteEndpoint() Endpoint {
|
||||
func (c *mConnConnection) Close() error {
|
||||
var err error
|
||||
c.closeOnce.Do(func() {
|
||||
defer close(c.doneCh)
|
||||
|
||||
if c.mconn != nil && c.mconn.IsRunning() {
|
||||
err = c.mconn.Stop()
|
||||
c.mconn.Stop()
|
||||
} else {
|
||||
err = c.conn.Close()
|
||||
}
|
||||
close(c.doneCh)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
@@ -67,7 +66,7 @@ type multiAppConn struct {
|
||||
// of reasonable lifecycle witout needing an explicit stop method.
|
||||
type stoppableClient interface {
|
||||
abciclient.Client
|
||||
Stop() error
|
||||
Stop()
|
||||
}
|
||||
|
||||
// NewMultiAppConn makes all necessary abci connections to the application.
|
||||
@@ -81,53 +80,42 @@ func NewMultiAppConn(clientCreator abciclient.Creator, logger log.Logger, metric
|
||||
return multiAppConn
|
||||
}
|
||||
|
||||
func (app *multiAppConn) Mempool() AppConnMempool {
|
||||
return app.mempoolConn
|
||||
}
|
||||
|
||||
func (app *multiAppConn) Consensus() AppConnConsensus {
|
||||
return app.consensusConn
|
||||
}
|
||||
|
||||
func (app *multiAppConn) Query() AppConnQuery {
|
||||
return app.queryConn
|
||||
}
|
||||
|
||||
func (app *multiAppConn) Snapshot() AppConnSnapshot {
|
||||
return app.snapshotConn
|
||||
}
|
||||
func (app *multiAppConn) Mempool() AppConnMempool { return app.mempoolConn }
|
||||
func (app *multiAppConn) Consensus() AppConnConsensus { return app.consensusConn }
|
||||
func (app *multiAppConn) Query() AppConnQuery { return app.queryConn }
|
||||
func (app *multiAppConn) Snapshot() AppConnSnapshot { return app.snapshotConn }
|
||||
|
||||
func (app *multiAppConn) OnStart(ctx context.Context) error {
|
||||
c, err := app.abciClientFor(ctx, connQuery)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
app.queryConnClient = c.(stoppableClient)
|
||||
app.queryConn = NewAppConnQuery(c, app.metrics)
|
||||
var err error
|
||||
defer func() {
|
||||
if err != nil {
|
||||
app.stopAllClients()
|
||||
}
|
||||
}()
|
||||
|
||||
c, err = app.abciClientFor(ctx, connSnapshot)
|
||||
app.queryConnClient, err = app.abciClientFor(ctx, connQuery)
|
||||
if err != nil {
|
||||
app.stopAllClients()
|
||||
return err
|
||||
}
|
||||
app.snapshotConnClient = c.(stoppableClient)
|
||||
app.snapshotConn = NewAppConnSnapshot(c, app.metrics)
|
||||
app.queryConn = NewAppConnQuery(app.queryConnClient, app.metrics)
|
||||
|
||||
c, err = app.abciClientFor(ctx, connMempool)
|
||||
app.snapshotConnClient, err = app.abciClientFor(ctx, connSnapshot)
|
||||
if err != nil {
|
||||
app.stopAllClients()
|
||||
return err
|
||||
}
|
||||
app.mempoolConnClient = c.(stoppableClient)
|
||||
app.mempoolConn = NewAppConnMempool(c, app.metrics)
|
||||
app.snapshotConn = NewAppConnSnapshot(app.snapshotConnClient, app.metrics)
|
||||
|
||||
c, err = app.abciClientFor(ctx, connConsensus)
|
||||
app.mempoolConnClient, err = app.abciClientFor(ctx, connMempool)
|
||||
if err != nil {
|
||||
app.stopAllClients()
|
||||
return err
|
||||
}
|
||||
app.consensusConnClient = c.(stoppableClient)
|
||||
app.consensusConn = NewAppConnConsensus(c, app.metrics)
|
||||
app.mempoolConn = NewAppConnMempool(app.mempoolConnClient, app.metrics)
|
||||
|
||||
app.consensusConnClient, err = app.abciClientFor(ctx, connConsensus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
app.consensusConn = NewAppConnConsensus(app.consensusConnClient, app.metrics)
|
||||
|
||||
// Kill Tendermint if the ABCI application crashes.
|
||||
app.startWatchersForClientErrorToKillTendermint(ctx)
|
||||
@@ -135,9 +123,7 @@ func (app *multiAppConn) OnStart(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *multiAppConn) OnStop() {
|
||||
app.stopAllClients()
|
||||
}
|
||||
func (app *multiAppConn) OnStop() { app.stopAllClients() }
|
||||
|
||||
func (app *multiAppConn) startWatchersForClientErrorToKillTendermint(ctx context.Context) {
|
||||
// this function starts a number of threads (per abci client)
|
||||
@@ -154,12 +140,10 @@ func (app *multiAppConn) startWatchersForClientErrorToKillTendermint(ctx context
|
||||
}
|
||||
}
|
||||
|
||||
type op struct {
|
||||
for _, client := range []struct {
|
||||
connClient stoppableClient
|
||||
name string
|
||||
}
|
||||
|
||||
for _, client := range []op{
|
||||
}{
|
||||
{
|
||||
connClient: app.consensusConnClient,
|
||||
name: connConsensus,
|
||||
@@ -190,47 +174,36 @@ func (app *multiAppConn) startWatchersForClientErrorToKillTendermint(ctx context
|
||||
}
|
||||
|
||||
func (app *multiAppConn) stopAllClients() {
|
||||
if app.consensusConnClient != nil {
|
||||
if err := app.consensusConnClient.Stop(); err != nil {
|
||||
if !errors.Is(err, service.ErrAlreadyStopped) {
|
||||
app.logger.Error("error while stopping consensus client", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if app.mempoolConnClient != nil {
|
||||
if err := app.mempoolConnClient.Stop(); err != nil {
|
||||
if !errors.Is(err, service.ErrAlreadyStopped) {
|
||||
app.logger.Error("error while stopping mempool client", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if app.queryConnClient != nil {
|
||||
if err := app.queryConnClient.Stop(); err != nil {
|
||||
if !errors.Is(err, service.ErrAlreadyStopped) {
|
||||
app.logger.Error("error while stopping query client", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if app.snapshotConnClient != nil {
|
||||
if err := app.snapshotConnClient.Stop(); err != nil {
|
||||
if !errors.Is(err, service.ErrAlreadyStopped) {
|
||||
app.logger.Error("error while stopping snapshot client", "error", err)
|
||||
}
|
||||
for _, client := range []stoppableClient{
|
||||
app.consensusConnClient,
|
||||
app.mempoolConnClient,
|
||||
app.queryConnClient,
|
||||
app.snapshotConnClient,
|
||||
} {
|
||||
if client != nil {
|
||||
client.Stop()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (app *multiAppConn) abciClientFor(ctx context.Context, conn string) (abciclient.Client, error) {
|
||||
func (app *multiAppConn) abciClientFor(ctx context.Context, conn string) (stoppableClient, error) {
|
||||
c, err := app.clientCreator(app.logger.With(
|
||||
"module", "abci-client",
|
||||
"connection", conn))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating ABCI client (%s connection): %w", conn, err)
|
||||
}
|
||||
|
||||
if err := c.Start(ctx); err != nil {
|
||||
return nil, fmt.Errorf("error starting ABCI client (%s connection): %w", conn, err)
|
||||
}
|
||||
return c, nil
|
||||
|
||||
client, ok := c.(stoppableClient)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%T is not a stoppable client", c)
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func kill() error {
|
||||
|
||||
@@ -23,7 +23,7 @@ type noopStoppableClientImpl struct {
|
||||
count int
|
||||
}
|
||||
|
||||
func (c *noopStoppableClientImpl) Stop() error { c.count++; return nil }
|
||||
func (c *noopStoppableClientImpl) Stop() { c.count++ }
|
||||
|
||||
func TestAppConns_Start_Stop(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
@@ -439,7 +439,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) {
|
||||
eventBus := eventbus.NewDefault(logger)
|
||||
err = eventBus.Start(ctx)
|
||||
require.NoError(t, err)
|
||||
defer eventBus.Stop() //nolint:errcheck // ignore for tests
|
||||
defer eventBus.Stop()
|
||||
|
||||
blockExec.SetEventBus(eventBus)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user