p2p: remove final shims from p2p package (#7136)

This is, perhaps, the trival final piece of #7075 that I've been
working on.

There's more work to be done: 
- push more of the setup into the pacakges themselves
- move channel-based sending/filtering out of the 
- simplify the buffering throuhgout the p2p stack.
This commit is contained in:
Sam Kleinman
2021-10-15 16:08:09 -04:00
committed by GitHub
parent 7143f14a63
commit ca8f004112
11 changed files with 158 additions and 180 deletions

View File

@@ -303,11 +303,14 @@ func makeNode(cfg *config.Config,
sm.BlockExecutorWithMetrics(nodeMetrics.state),
)
csReactor, csState := createConsensusReactor(
csReactor, csState, err := createConsensusReactor(
cfg, state, blockExec, blockStore, mp, evPool,
privValidator, nodeMetrics.consensus, stateSync || blockSync, eventBus,
peerManager, router, consensusLogger,
)
if err != nil {
return nil, combineCloseError(err, makeCloser(closers))
}
// Create the blockchain reactor. Note, we do not start block sync if we're
// doing a state sync first.
@@ -334,7 +337,17 @@ func makeNode(cfg *config.Config,
// we should clean this whole thing up. See:
// https://github.com/tendermint/tendermint/issues/4644
ssLogger := logger.With("module", "statesync")
channels := makeChannelsFromShims(router, statesync.ChannelShims)
ssChDesc := statesync.GetChannelDescriptors()
channels := make(map[p2p.ChannelID]*p2p.Channel, len(ssChDesc))
for idx := range ssChDesc {
chd := ssChDesc[idx]
ch, err := router.OpenChannel(chd)
if err != nil {
return nil, err
}
channels[ch.ID] = ch
}
peerUpdates := peerManager.Subscribe()
stateSyncReactor := statesync.NewReactor(
@@ -1088,23 +1101,3 @@ func getRouterConfig(conf *config.Config, proxyApp proxy.AppConns) p2p.RouterOpt
return opts
}
// FIXME: Temporary helper function, shims should be removed.
func makeChannelsFromShims(
router *p2p.Router,
chDescs []*p2p.ChannelDescriptor,
) map[p2p.ChannelID]*p2p.Channel {
channels := map[p2p.ChannelID]*p2p.Channel{}
for idx := range chDescs {
chDesc := chDescs[idx]
ch, err := router.OpenChannel(chDesc)
if err != nil {
panic(fmt.Sprintf("failed to open channel %v: %v", chDesc.ID, err))
}
channels[chDesc.ID] = ch
}
return channels
}

View File

@@ -198,13 +198,15 @@ func createMempoolReactor(
) (service.Service, mempool.Mempool, error) {
logger = logger.With("module", "mempool", "version", cfg.Mempool.Version)
channelShims := mempoolv0.GetChannelShims(cfg.Mempool)
channels := makeChannelsFromShims(router, channelShims)
peerUpdates := peerManager.Subscribe()
switch cfg.Mempool.Version {
case config.MempoolV0:
ch, err := router.OpenChannel(mempoolv0.GetChannelDescriptor(cfg.Mempool))
if err != nil {
return nil, nil, err
}
mp := mempoolv0.NewCListMempool(
cfg.Mempool,
proxyApp.Mempool(),
@@ -221,7 +223,7 @@ func createMempoolReactor(
cfg.Mempool,
peerManager,
mp,
channels[mempool.MempoolChannel],
ch,
peerUpdates,
)
@@ -232,6 +234,11 @@ func createMempoolReactor(
return reactor, mp, nil
case config.MempoolV1:
ch, err := router.OpenChannel(mempoolv1.GetChannelDescriptor(cfg.Mempool))
if err != nil {
return nil, nil, err
}
mp := mempoolv1.NewTxMempool(
logger,
cfg.Mempool,
@@ -247,7 +254,7 @@ func createMempoolReactor(
cfg.Mempool,
peerManager,
mp,
channels[mempool.MempoolChannel],
ch,
peerUpdates,
)
@@ -283,9 +290,14 @@ func createEvidenceReactor(
return nil, nil, fmt.Errorf("creating evidence pool: %w", err)
}
ch, err := router.OpenChannel(evidence.GetChannelDescriptor())
if err != nil {
return nil, nil, fmt.Errorf("creating evidence channel: %w", err)
}
evidenceReactor := evidence.NewReactor(
logger,
makeChannelsFromShims(router, evidence.ChannelShims)[evidence.EvidenceChannel],
ch,
peerManager.Subscribe(),
evidencePool,
)
@@ -307,12 +319,16 @@ func createBlockchainReactor(
logger = logger.With("module", "blockchain")
channels := makeChannelsFromShims(router, blocksync.ChannelShims)
ch, err := router.OpenChannel(blocksync.GetChannelDescriptor())
if err != nil {
return nil, err
}
peerUpdates := peerManager.Subscribe()
reactor, err := blocksync.NewReactor(
logger, state.Copy(), blockExec, blockStore, csReactor,
channels[blocksync.BlockSyncChannel], peerUpdates, blockSync,
ch, peerUpdates, blockSync,
metrics,
)
if err != nil {
@@ -336,7 +352,7 @@ func createConsensusReactor(
peerManager *p2p.PeerManager,
router *p2p.Router,
logger log.Logger,
) (*consensus.Reactor, *consensus.State) {
) (*consensus.Reactor, *consensus.State, error) {
consensusState := consensus.NewState(
cfg.Consensus,
@@ -352,13 +368,19 @@ func createConsensusReactor(
consensusState.SetPrivValidator(privValidator)
}
var (
channels map[p2p.ChannelID]*p2p.Channel
peerUpdates *p2p.PeerUpdates
)
csChDesc := consensus.GetChannelDescriptors()
channels := make(map[p2p.ChannelID]*p2p.Channel, len(csChDesc))
for idx := range csChDesc {
chd := csChDesc[idx]
ch, err := router.OpenChannel(chd)
if err != nil {
return nil, nil, err
}
channels = makeChannelsFromShims(router, consensus.ChannelShims)
peerUpdates = peerManager.Subscribe()
channels[ch.ID] = ch
}
peerUpdates := peerManager.Subscribe()
reactor := consensus.NewReactor(
logger,
@@ -376,7 +398,7 @@ func createConsensusReactor(
// consensusReactor will set it on consensusState and blockExecutor.
reactor.SetEventBus(eventBus)
return reactor, consensusState
return reactor, consensusState, nil
}
func createTransport(logger log.Logger, cfg *config.Config) *p2p.MConnTransport {