mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-04 04:04:00 +00:00
service: plumb contexts to all (most) threads (#7363)
This continues the push of plumbing contexts through tendermint. I attempted to find all goroutines in the production code (non-test) and made sure that these threads would exit when their contexts were canceled, and I believe this PR does that.
This commit is contained in:
72
node/node.go
72
node/node.go
@@ -92,7 +92,9 @@ func newDefaultNode(
|
||||
return nil, fmt.Errorf("failed to load or gen node key %s: %w", cfg.NodeKeyFile(), err)
|
||||
}
|
||||
if cfg.Mode == config.ModeSeed {
|
||||
return makeSeedNode(cfg,
|
||||
return makeSeedNode(
|
||||
ctx,
|
||||
cfg,
|
||||
config.DefaultDBProvider,
|
||||
nodeKey,
|
||||
defaultGenesisDocProviderFunc(cfg),
|
||||
@@ -280,7 +282,7 @@ func makeNode(
|
||||
makeCloser(closers))
|
||||
}
|
||||
|
||||
router, err := createRouter(logger, nodeMetrics.p2p, nodeInfo, nodeKey,
|
||||
router, err := createRouter(ctx, logger, nodeMetrics.p2p, nodeInfo, nodeKey,
|
||||
peerManager, cfg, proxyApp)
|
||||
if err != nil {
|
||||
return nil, combineCloseError(
|
||||
@@ -288,14 +290,14 @@ func makeNode(
|
||||
makeCloser(closers))
|
||||
}
|
||||
|
||||
mpReactor, mp, err := createMempoolReactor(
|
||||
mpReactor, mp, err := createMempoolReactor(ctx,
|
||||
cfg, proxyApp, state, nodeMetrics.mempool, peerManager, router, logger,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, combineCloseError(err, makeCloser(closers))
|
||||
}
|
||||
|
||||
evReactor, evPool, err := createEvidenceReactor(
|
||||
evReactor, evPool, err := createEvidenceReactor(ctx,
|
||||
cfg, dbProvider, stateDB, blockStore, peerManager, router, logger,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -324,7 +326,7 @@ func makeNode(
|
||||
|
||||
// Create the blockchain reactor. Note, we do not start block sync if we're
|
||||
// doing a state sync first.
|
||||
bcReactor, err := createBlockchainReactor(
|
||||
bcReactor, err := createBlockchainReactor(ctx,
|
||||
logger, state, blockExec, blockStore, csReactor,
|
||||
peerManager, router, blockSync && !stateSync, nodeMetrics.consensus,
|
||||
)
|
||||
@@ -350,7 +352,7 @@ func makeNode(
|
||||
channels := make(map[p2p.ChannelID]*p2p.Channel, len(ssChDesc))
|
||||
for idx := range ssChDesc {
|
||||
chd := ssChDesc[idx]
|
||||
ch, err := router.OpenChannel(chd)
|
||||
ch, err := router.OpenChannel(ctx, chd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -369,7 +371,7 @@ func makeNode(
|
||||
channels[statesync.ChunkChannel],
|
||||
channels[statesync.LightBlockChannel],
|
||||
channels[statesync.ParamsChannel],
|
||||
peerManager.Subscribe(),
|
||||
peerManager.Subscribe(ctx),
|
||||
stateStore,
|
||||
blockStore,
|
||||
cfg.StateSync.TempDir,
|
||||
@@ -378,7 +380,7 @@ func makeNode(
|
||||
|
||||
var pexReactor service.Service
|
||||
if cfg.P2P.PexReactor {
|
||||
pexReactor, err = createPEXReactor(logger, peerManager, router)
|
||||
pexReactor, err = createPEXReactor(ctx, logger, peerManager, router)
|
||||
if err != nil {
|
||||
return nil, combineCloseError(err, makeCloser(closers))
|
||||
}
|
||||
@@ -441,7 +443,9 @@ func makeNode(
|
||||
}
|
||||
|
||||
// makeSeedNode returns a new seed node, containing only p2p, pex reactor
|
||||
func makeSeedNode(cfg *config.Config,
|
||||
func makeSeedNode(
|
||||
ctx context.Context,
|
||||
cfg *config.Config,
|
||||
dbProvider config.DBProvider,
|
||||
nodeKey types.NodeKey,
|
||||
genesisDocProvider genesisDocProvider,
|
||||
@@ -476,7 +480,7 @@ func makeSeedNode(cfg *config.Config,
|
||||
closer)
|
||||
}
|
||||
|
||||
router, err := createRouter(logger, p2pMetrics, nodeInfo, nodeKey,
|
||||
router, err := createRouter(ctx, logger, p2pMetrics, nodeInfo, nodeKey,
|
||||
peerManager, cfg, nil)
|
||||
if err != nil {
|
||||
return nil, combineCloseError(
|
||||
@@ -484,7 +488,7 @@ func makeSeedNode(cfg *config.Config,
|
||||
closer)
|
||||
}
|
||||
|
||||
pexReactor, err := createPEXReactor(logger, peerManager, router)
|
||||
pexReactor, err := createPEXReactor(ctx, logger, peerManager, router)
|
||||
if err != nil {
|
||||
return nil, combineCloseError(err, closer)
|
||||
}
|
||||
@@ -510,12 +514,25 @@ func makeSeedNode(cfg *config.Config,
|
||||
// OnStart starts the Node. It implements service.Service.
|
||||
func (n *nodeImpl) OnStart(ctx context.Context) error {
|
||||
if n.config.RPC.PprofListenAddress != "" {
|
||||
// this service is not cleaned up (I believe that we'd
|
||||
// need to have another thread and a potentially a
|
||||
// context to get this functionality.)
|
||||
rpcCtx, rpcCancel := context.WithCancel(ctx)
|
||||
srv := &http.Server{Addr: n.config.RPC.PprofListenAddress, Handler: nil}
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
sctx, scancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer scancel()
|
||||
_ = srv.Shutdown(sctx)
|
||||
case <-rpcCtx.Done():
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
n.Logger.Info("Starting pprof server", "laddr", n.config.RPC.PprofListenAddress)
|
||||
n.Logger.Error("pprof server error", "err", http.ListenAndServe(n.config.RPC.PprofListenAddress, nil))
|
||||
|
||||
if err := srv.ListenAndServe(); err != nil {
|
||||
n.Logger.Error("pprof server error", "err", err)
|
||||
rpcCancel()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -538,7 +555,7 @@ func (n *nodeImpl) OnStart(ctx context.Context) error {
|
||||
|
||||
if n.config.Instrumentation.Prometheus &&
|
||||
n.config.Instrumentation.PrometheusListenAddr != "" {
|
||||
n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr)
|
||||
n.prometheusSrv = n.startPrometheusServer(ctx, n.config.Instrumentation.PrometheusListenAddr)
|
||||
}
|
||||
|
||||
// Start the transport.
|
||||
@@ -784,6 +801,7 @@ func (n *nodeImpl) startRPC(ctx context.Context) ([]net.Listener, error) {
|
||||
if n.config.RPC.IsTLSEnabled() {
|
||||
go func() {
|
||||
if err := rpcserver.ServeTLS(
|
||||
ctx,
|
||||
listener,
|
||||
rootHandler,
|
||||
n.config.RPC.CertFile(),
|
||||
@@ -797,6 +815,7 @@ func (n *nodeImpl) startRPC(ctx context.Context) ([]net.Listener, error) {
|
||||
} else {
|
||||
go func() {
|
||||
if err := rpcserver.Serve(
|
||||
ctx,
|
||||
listener,
|
||||
rootHandler,
|
||||
rpcLogger,
|
||||
@@ -815,7 +834,7 @@ func (n *nodeImpl) startRPC(ctx context.Context) ([]net.Listener, error) {
|
||||
|
||||
// startPrometheusServer starts a Prometheus HTTP server, listening for metrics
|
||||
// collectors on addr.
|
||||
func (n *nodeImpl) startPrometheusServer(addr string) *http.Server {
|
||||
func (n *nodeImpl) startPrometheusServer(ctx context.Context, addr string) *http.Server {
|
||||
srv := &http.Server{
|
||||
Addr: addr,
|
||||
Handler: promhttp.InstrumentMetricHandler(
|
||||
@@ -825,12 +844,25 @@ func (n *nodeImpl) startPrometheusServer(addr string) *http.Server {
|
||||
),
|
||||
),
|
||||
}
|
||||
|
||||
promCtx, promCancel := context.WithCancel(ctx)
|
||||
go func() {
|
||||
if err := srv.ListenAndServe(); err != http.ErrServerClosed {
|
||||
// Error starting or closing listener:
|
||||
n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
sctx, scancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer scancel()
|
||||
_ = srv.Shutdown(sctx)
|
||||
case <-promCtx.Done():
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
if err := srv.ListenAndServe(); err != nil {
|
||||
n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err)
|
||||
promCancel()
|
||||
}
|
||||
}()
|
||||
|
||||
return srv
|
||||
}
|
||||
|
||||
|
||||
@@ -528,7 +528,8 @@ func TestNodeNewSeedNode(t *testing.T) {
|
||||
nodeKey, err := types.LoadOrGenNodeKey(cfg.NodeKeyFile())
|
||||
require.NoError(t, err)
|
||||
|
||||
ns, err := makeSeedNode(cfg,
|
||||
ns, err := makeSeedNode(ctx,
|
||||
cfg,
|
||||
config.DefaultDBProvider,
|
||||
nodeKey,
|
||||
defaultGenesisDocProviderFunc(cfg),
|
||||
|
||||
@@ -68,7 +68,7 @@ func New(
|
||||
config.DefaultDBProvider,
|
||||
logger)
|
||||
case config.ModeSeed:
|
||||
return makeSeedNode(conf, config.DefaultDBProvider, nodeKey, genProvider, logger)
|
||||
return makeSeedNode(ctx, conf, config.DefaultDBProvider, nodeKey, genProvider, logger)
|
||||
default:
|
||||
return nil, fmt.Errorf("%q is not a valid mode", conf.Mode)
|
||||
}
|
||||
|
||||
@@ -190,6 +190,7 @@ func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
|
||||
}
|
||||
|
||||
func createMempoolReactor(
|
||||
ctx context.Context,
|
||||
cfg *config.Config,
|
||||
proxyApp proxy.AppConns,
|
||||
state sm.State,
|
||||
@@ -201,7 +202,7 @@ func createMempoolReactor(
|
||||
|
||||
logger = logger.With("module", "mempool")
|
||||
|
||||
ch, err := router.OpenChannel(mempool.GetChannelDescriptor(cfg.Mempool))
|
||||
ch, err := router.OpenChannel(ctx, mempool.GetChannelDescriptor(cfg.Mempool))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -222,7 +223,7 @@ func createMempoolReactor(
|
||||
peerManager,
|
||||
mp,
|
||||
ch,
|
||||
peerManager.Subscribe(),
|
||||
peerManager.Subscribe(ctx),
|
||||
)
|
||||
|
||||
if cfg.Consensus.WaitForTxs() {
|
||||
@@ -233,6 +234,7 @@ func createMempoolReactor(
|
||||
}
|
||||
|
||||
func createEvidenceReactor(
|
||||
ctx context.Context,
|
||||
cfg *config.Config,
|
||||
dbProvider config.DBProvider,
|
||||
stateDB dbm.DB,
|
||||
@@ -253,7 +255,7 @@ func createEvidenceReactor(
|
||||
return nil, nil, fmt.Errorf("creating evidence pool: %w", err)
|
||||
}
|
||||
|
||||
ch, err := router.OpenChannel(evidence.GetChannelDescriptor())
|
||||
ch, err := router.OpenChannel(ctx, evidence.GetChannelDescriptor())
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("creating evidence channel: %w", err)
|
||||
}
|
||||
@@ -261,7 +263,7 @@ func createEvidenceReactor(
|
||||
evidenceReactor := evidence.NewReactor(
|
||||
logger,
|
||||
ch,
|
||||
peerManager.Subscribe(),
|
||||
peerManager.Subscribe(ctx),
|
||||
evidencePool,
|
||||
)
|
||||
|
||||
@@ -269,6 +271,7 @@ func createEvidenceReactor(
|
||||
}
|
||||
|
||||
func createBlockchainReactor(
|
||||
ctx context.Context,
|
||||
logger log.Logger,
|
||||
state sm.State,
|
||||
blockExec *sm.BlockExecutor,
|
||||
@@ -282,12 +285,12 @@ func createBlockchainReactor(
|
||||
|
||||
logger = logger.With("module", "blockchain")
|
||||
|
||||
ch, err := router.OpenChannel(blocksync.GetChannelDescriptor())
|
||||
ch, err := router.OpenChannel(ctx, blocksync.GetChannelDescriptor())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
peerUpdates := peerManager.Subscribe()
|
||||
peerUpdates := peerManager.Subscribe(ctx)
|
||||
|
||||
reactor, err := blocksync.NewReactor(
|
||||
logger, state.Copy(), blockExec, blockStore, csReactor,
|
||||
@@ -338,7 +341,7 @@ func createConsensusReactor(
|
||||
channels := make(map[p2p.ChannelID]*p2p.Channel, len(csChDesc))
|
||||
for idx := range csChDesc {
|
||||
chd := csChDesc[idx]
|
||||
ch, err := router.OpenChannel(chd)
|
||||
ch, err := router.OpenChannel(ctx, chd)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -353,7 +356,7 @@ func createConsensusReactor(
|
||||
channels[consensus.DataChannel],
|
||||
channels[consensus.VoteChannel],
|
||||
channels[consensus.VoteSetBitsChannel],
|
||||
peerManager.Subscribe(),
|
||||
peerManager.Subscribe(ctx),
|
||||
waitSync,
|
||||
consensus.ReactorMetrics(csMetrics),
|
||||
)
|
||||
@@ -450,6 +453,7 @@ func createPeerManager(
|
||||
}
|
||||
|
||||
func createRouter(
|
||||
ctx context.Context,
|
||||
logger log.Logger,
|
||||
p2pMetrics *p2p.Metrics,
|
||||
nodeInfo types.NodeInfo,
|
||||
@@ -468,6 +472,7 @@ func createRouter(
|
||||
}
|
||||
|
||||
return p2p.NewRouter(
|
||||
ctx,
|
||||
p2pLogger,
|
||||
p2pMetrics,
|
||||
nodeInfo,
|
||||
@@ -480,17 +485,18 @@ func createRouter(
|
||||
}
|
||||
|
||||
func createPEXReactor(
|
||||
ctx context.Context,
|
||||
logger log.Logger,
|
||||
peerManager *p2p.PeerManager,
|
||||
router *p2p.Router,
|
||||
) (service.Service, error) {
|
||||
|
||||
channel, err := router.OpenChannel(pex.ChannelDescriptor())
|
||||
channel, err := router.OpenChannel(ctx, pex.ChannelDescriptor())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pex.NewReactor(logger, peerManager, channel, peerManager.Subscribe()), nil
|
||||
return pex.NewReactor(logger, peerManager, channel, peerManager.Subscribe(ctx)), nil
|
||||
}
|
||||
|
||||
func makeNodeInfo(
|
||||
|
||||
Reference in New Issue
Block a user