Compare commits

...

4 Commits

Author SHA1 Message Date
tycho garen
10225aa8a9 merge 2021-06-14 12:26:07 -04:00
tycho garen
73f4714053 nil safety 2021-06-14 09:48:02 -04:00
tycho garen
3e30543246 hacking 2021-06-14 08:18:53 -04:00
tycho garen
f8458a19d4 node: minimize hardcoded service initialization 2021-06-08 17:40:02 -04:00
4 changed files with 213 additions and 173 deletions

88
libs/service/group.go Normal file
View File

@@ -0,0 +1,88 @@
package service
import (
"fmt"
"github.com/tendermint/tendermint/libs/log"
)
type groupImpl struct {
*BaseService
services []Service
}
func NewGroup(logger log.Logger, name string, services ...Service) Service {
srv := &groupImpl{
services: services,
}
srv.BaseService = NewBaseService(logger, name, srv)
return srv
}
func (gs *groupImpl) OnStart() error {
for _, srv := range gs.services {
if err := srv.Reset(); err != nil {
return err
}
}
return nil
}
func (gs *groupImpl) OnStop() {
for idx, srv := range gs.services {
if err := srv.Stop(); err != nil {
gs.Logger.Error(
fmt.Sprintf("problem starting service %d of %d", idx, len(gs.services)),
"err", err)
}
}
}
func (gs *groupImpl) OnReset() error {
for _, srv := range gs.services {
if err := srv.Reset(); err != nil {
return err
}
}
return nil
}
type FunctionalService struct {
Starter func() error
Stopper func()
Reseter func() error
}
func MakeFunctionalService(logger log.Logger, name string, opts FunctionalService) Service {
srv := &funImpl{
ops: opts,
}
srv.BaseService = NewBaseService(logger, name, srv)
return srv
}
type funImpl struct {
*BaseService
ops FunctionalService
}
func (fs *funImpl) OnStart() error {
if fs.ops.Starter != nil {
return fs.ops.Starter()
}
return nil
}
func (fs *funImpl) OnStop() {
if fs.ops.Stopper != nil {
fs.ops.Stopper()
}
}
func (fs *funImpl) OnReset() error {
if fs.ops.Reseter != nil {
return fs.ops.Reseter()
}
return nil
}

View File

@@ -18,7 +18,6 @@ import (
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto"
cs "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/evidence"
"github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/pex"
@@ -36,7 +35,6 @@ import (
grpccore "github.com/tendermint/tendermint/rpc/grpc"
rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/indexer"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
@@ -72,16 +70,13 @@ type nodeImpl struct {
stateSync bool // whether the node should state sync on startup
stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots
stateSyncProvider statesync.StateProvider // provides state data for bootstrapping a node
consensusState *cs.State // latest consensus state
stateSyncGenesis sm.State // provides the genesis state for state sync
consensusReactor *cs.Reactor // for participating in the consensus
pexReactor *pex.Reactor // for exchanging peer addresses
pexReactorV2 *pex.ReactorV2 // for exchanging peer addresses
evidenceReactor *evidence.Reactor
evidencePool *evidence.Pool // tracking evidence
proxyApp proxy.AppConns // connection to the application
pexReactor service.Service // for exchanging peer addresses
evidenceReactor service.Service
rpcListeners []net.Listener // rpc servers
eventSinks []indexer.EventSink
indexerService *indexer.Service
indexerService service.Service
rpcEnv *rpccore.Environment
prometheusSrv *http.Server
}
@@ -203,7 +198,7 @@ func makeNode(config *cfg.Config,
}
// Determine whether we should attempt state sync.
stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey)
stateSync := config.StateSync.Enable && !onlyValidatorIsUs(genDoc, pubKey)
if stateSync && state.LastBlockHeight > 0 {
logger.Info("Found local state with non-zero height, skipping state sync")
stateSync = false
@@ -371,10 +366,9 @@ func makeNode(config *cfg.Config,
// Note we currently use the addrBook regardless at least for AddOurAddress
var (
pexReactor *pex.Reactor
pexReactorV2 *pex.ReactorV2
sw *p2p.Switch
addrBook pex.AddrBook
pexReactor service.Service
sw *p2p.Switch
addrBook pex.AddrBook
)
pexCh := pex.ChannelDescriptor()
@@ -382,7 +376,7 @@ func makeNode(config *cfg.Config,
if config.P2P.DisableLegacy {
addrBook = nil
pexReactorV2, err = createPEXReactorV2(config, logger, peerManager, router)
pexReactor, err = createPEXReactorV2(config, logger, peerManager, router)
if err != nil {
return nil, err
}
@@ -436,19 +430,36 @@ func makeNode(config *cfg.Config,
bcReactor: bcReactor,
mempoolReactor: mpReactor,
mempool: mp,
consensusState: csState,
consensusReactor: csReactor,
stateSyncReactor: stateSyncReactor,
stateSync: stateSync,
pexReactor: pexReactor,
pexReactorV2: pexReactorV2,
evidenceReactor: evReactor,
evidencePool: evPool,
proxyApp: proxyApp,
indexerService: indexerService,
eventBus: eventBus,
eventSinks: eventSinks,
rpcEnv: &rpccore.Environment{
ProxyAppQuery: proxyApp.Query(),
ProxyAppMempool: proxyApp.Mempool(),
StateStore: stateStore,
BlockStore: blockStore,
EvidencePool: evPool,
ConsensusState: csState,
P2PPeers: sw,
GenDoc: genDoc,
EventSinks: eventSinks,
ConsensusReactor: csReactor,
EventBus: eventBus,
Mempool: mp,
Logger: logger.With("module", "rpc"),
Config: *config.RPC,
},
}
node.rpcEnv.P2PTransport = node
node.BaseService = *service.NewBaseService(logger, "Node", node)
return node, nil
@@ -481,25 +492,6 @@ func makeSeedNode(config *cfg.Config,
p2pMetrics := p2p.PrometheusMetrics(config.Instrumentation.Namespace, "chain_id", genDoc.ChainID)
p2pLogger := logger.With("module", "p2p")
transport := createTransport(p2pLogger, config)
sw := createSwitch(
config, transport, p2pMetrics, nil, nil,
nil, nil, nil, nil, nodeInfo, nodeKey, p2pLogger,
)
err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " "))
if err != nil {
return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err)
}
err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
if err != nil {
return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err)
}
addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey)
if err != nil {
return nil, fmt.Errorf("could not create addrbook: %w", err)
}
peerManager, err := createPeerManager(config, dbProvider, p2pLogger, nodeKey.ID)
if err != nil {
@@ -513,8 +505,9 @@ func makeSeedNode(config *cfg.Config,
}
var (
pexReactor *pex.Reactor
pexReactorV2 *pex.ReactorV2
pexReactor service.Service
sw *p2p.Switch
addrBook pex.AddrBook
)
// add the pex reactor
@@ -524,11 +517,32 @@ func makeSeedNode(config *cfg.Config,
pexCh := pex.ChannelDescriptor()
transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh})
if config.P2P.DisableLegacy {
pexReactorV2, err = createPEXReactorV2(config, logger, peerManager, router)
pexReactor, err = createPEXReactorV2(config, logger, peerManager, router)
if err != nil {
return nil, err
}
} else {
sw = createSwitch(
config, transport, p2pMetrics, nil, nil,
nil, nil, nil, nil, nodeInfo, nodeKey, p2pLogger,
)
err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " "))
if err != nil {
return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err)
}
err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
if err != nil {
return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err)
}
addrBook, err = createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey)
if err != nil {
return nil, fmt.Errorf("could not create addrbook: %w", err)
}
pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger)
}
@@ -551,8 +565,7 @@ func makeSeedNode(config *cfg.Config,
peerManager: peerManager,
router: router,
pexReactor: pexReactor,
pexReactorV2: pexReactorV2,
pexReactor: pexReactor,
}
node.BaseService = *service.NewBaseService(logger, "SeedNode", node)
@@ -642,11 +655,11 @@ func (n *nodeImpl) OnStart() error {
}
}
if n.config.P2P.DisableLegacy && n.pexReactorV2 != nil {
if err := n.pexReactorV2.Start(); err != nil {
return err
}
} else {
if err := n.pexReactor.Start(); err != nil {
return err
}
if !n.config.P2P.DisableLegacy {
// Always connect to persistent peers
err = n.sw.DialPeersAsync(strings.SplitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "))
if err != nil {
@@ -721,10 +734,8 @@ func (n *nodeImpl) OnStop() {
}
}
if n.config.P2P.DisableLegacy && n.pexReactorV2 != nil {
if err := n.pexReactorV2.Stop(); err != nil {
n.Logger.Error("failed to stop the PEX v2 reactor", "err", err)
}
if err := n.pexReactor.Stop(); err != nil {
n.Logger.Error("failed to stop the PEX v2 reactor", "err", err)
}
if n.config.P2P.DisableLegacy {
@@ -765,54 +776,23 @@ func (n *nodeImpl) OnStop() {
}
}
// ConfigureRPC makes sure RPC has all the objects it needs to operate.
func (n *nodeImpl) ConfigureRPC() (*rpccore.Environment, error) {
rpcCoreEnv := rpccore.Environment{
ProxyAppQuery: n.proxyApp.Query(),
ProxyAppMempool: n.proxyApp.Mempool(),
StateStore: n.stateStore,
BlockStore: n.blockStore,
EvidencePool: n.evidencePool,
ConsensusState: n.consensusState,
P2PPeers: n.sw,
P2PTransport: n,
GenDoc: n.genesisDoc,
EventSinks: n.eventSinks,
ConsensusReactor: n.consensusReactor,
EventBus: n.eventBus,
Mempool: n.mempool,
Logger: n.Logger.With("module", "rpc"),
Config: *n.config.RPC,
}
func (n *nodeImpl) startRPC() ([]net.Listener, error) {
if n.config.Mode == cfg.ModeValidator {
pubKey, err := n.privValidator.GetPubKey(context.TODO())
if pubKey == nil || err != nil {
return nil, fmt.Errorf("can't get pubkey: %w", err)
}
rpcCoreEnv.PubKey = pubKey
n.rpcEnv.PubKey = pubKey
}
if err := rpcCoreEnv.InitGenesisChunks(); err != nil {
return nil, err
}
return &rpcCoreEnv, nil
}
func (n *nodeImpl) startRPC() ([]net.Listener, error) {
env, err := n.ConfigureRPC()
if err != nil {
if err := n.rpcEnv.InitGenesisChunks(); err != nil {
return nil, err
}
listenAddrs := strings.SplitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ")
routes := env.GetRoutes()
routes := n.rpcEnv.GetRoutes()
if n.config.RPC.Unsafe {
env.AddUnsafe(routes)
n.rpcEnv.AddUnsafe(routes)
}
config := rpcserver.DefaultConfig()
@@ -909,7 +889,7 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) {
return nil, err
}
go func() {
if err := grpccore.StartGRPCServer(env, listener); err != nil {
if err := grpccore.StartGRPCServer(n.rpcEnv, listener); err != nil {
n.Logger.Error("Error starting gRPC server", "err", err)
}
}()
@@ -942,46 +922,16 @@ func (n *nodeImpl) startPrometheusServer(addr string) *http.Server {
return srv
}
// Switch returns the Node's Switch.
func (n *nodeImpl) Switch() *p2p.Switch {
return n.sw
}
// BlockStore returns the Node's BlockStore.
func (n *nodeImpl) BlockStore() *store.BlockStore {
return n.blockStore
}
// ConsensusState returns the Node's ConsensusState.
func (n *nodeImpl) ConsensusState() *cs.State {
return n.consensusState
}
// ConsensusReactor returns the Node's ConsensusReactor.
func (n *nodeImpl) ConsensusReactor() *cs.Reactor {
return n.consensusReactor
}
// MempoolReactor returns the Node's mempool reactor.
func (n *nodeImpl) MempoolReactor() service.Service {
return n.mempoolReactor
}
// Mempool returns the Node's mempool.
func (n *nodeImpl) Mempool() mempool.Mempool {
return n.mempool
}
// PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled.
func (n *nodeImpl) PEXReactor() *pex.Reactor {
return n.pexReactor
}
// EvidencePool returns the Node's EvidencePool.
func (n *nodeImpl) EvidencePool() *evidence.Pool {
return n.evidencePool
}
// EventBus returns the Node's EventBus.
func (n *nodeImpl) EventBus() *types.EventBus {
return n.eventBus
@@ -998,19 +948,9 @@ func (n *nodeImpl) GenesisDoc() *types.GenesisDoc {
return n.genesisDoc
}
// ProxyApp returns the Node's AppConns, representing its connections to the ABCI application.
func (n *nodeImpl) ProxyApp() proxy.AppConns {
return n.proxyApp
}
// Config returns the Node's config.
func (n *nodeImpl) Config() *cfg.Config {
return n.config
}
// EventSinks returns the Node's event indexing sinks.
func (n *nodeImpl) EventSinks() []indexer.EventSink {
return n.eventSinks
// RPCEnvironment makes sure RPC has all the objects it needs to operate.
func (n *nodeImpl) RPCEnvironment() *rpccore.Environment {
return n.rpcEnv
}
//------------------------------------------------------------------------------

View File

@@ -509,36 +509,48 @@ func TestNodeSetEventSink(t *testing.T) {
config := cfg.ResetTestRoot("node_app_version_test")
defer os.RemoveAll(config.RootDir)
n := getTestNode(t, config, log.TestingLogger())
logger := log.TestingLogger()
setupTest := func(t *testing.T, conf *cfg.Config) []indexer.EventSink {
eventBus, err := createAndStartEventBus(logger)
require.NoError(t, err)
assert.Equal(t, 1, len(n.eventSinks))
assert.Equal(t, indexer.KV, n.eventSinks[0].Type())
genDoc, err := types.GenesisDocFromFile(config.GenesisFile())
require.NoError(t, err)
indexService, eventSinks, err := createAndStartIndexerService(config, cfg.DefaultDBProvider, eventBus, logger, genDoc.ChainID)
t.Cleanup(func() { require.NoError(t, indexService.Stop()) })
return eventSinks
}
eventSinks := setupTest(t, config)
assert.Equal(t, 1, len(eventSinks))
assert.Equal(t, indexer.KV, eventSinks[0].Type())
config.TxIndex.Indexer = []string{"null"}
n = getTestNode(t, config, log.TestingLogger())
eventSinks = setupTest(t, config)
assert.Equal(t, 1, len(n.eventSinks))
assert.Equal(t, indexer.NULL, n.eventSinks[0].Type())
assert.Equal(t, 1, len(eventSinks))
assert.Equal(t, indexer.NULL, eventSinks[0].Type())
config.TxIndex.Indexer = []string{"null", "kv"}
n = getTestNode(t, config, log.TestingLogger())
eventSinks = setupTest(t, config)
assert.Equal(t, 1, len(n.eventSinks))
assert.Equal(t, indexer.NULL, n.eventSinks[0].Type())
assert.Equal(t, 1, len(eventSinks))
assert.Equal(t, indexer.NULL, eventSinks[0].Type())
config.TxIndex.Indexer = []string{"kvv"}
ns, err := newDefaultNode(config, log.TestingLogger())
ns, err := newDefaultNode(config, logger)
assert.Nil(t, ns)
assert.Equal(t, errors.New("unsupported event sink type"), err)
config.TxIndex.Indexer = []string{}
n = getTestNode(t, config, log.TestingLogger())
eventSinks = setupTest(t, config)
assert.Equal(t, 1, len(n.eventSinks))
assert.Equal(t, indexer.NULL, n.eventSinks[0].Type())
assert.Equal(t, 1, len(eventSinks))
assert.Equal(t, indexer.NULL, eventSinks[0].Type())
config.TxIndex.Indexer = []string{"psql"}
ns, err = newDefaultNode(config, log.TestingLogger())
ns, err = newDefaultNode(config, logger)
assert.Nil(t, ns)
assert.Equal(t, errors.New("the psql connection settings cannot be empty"), err)
@@ -546,46 +558,46 @@ func TestNodeSetEventSink(t *testing.T) {
config.TxIndex.Indexer = []string{"psql"}
config.TxIndex.PsqlConn = psqlConn
n = getTestNode(t, config, log.TestingLogger())
assert.Equal(t, 1, len(n.eventSinks))
assert.Equal(t, indexer.PSQL, n.eventSinks[0].Type())
n.OnStop()
eventSinks = setupTest(t, config)
assert.Equal(t, 1, len(eventSinks))
assert.Equal(t, indexer.PSQL, eventSinks[0].Type())
config.TxIndex.Indexer = []string{"psql", "kv"}
config.TxIndex.PsqlConn = psqlConn
n = getTestNode(t, config, log.TestingLogger())
assert.Equal(t, 2, len(n.eventSinks))
eventSinks = setupTest(t, config)
assert.Equal(t, 2, len(eventSinks))
// we use map to filter the duplicated sinks, so it's not guarantee the order when append sinks.
if n.eventSinks[0].Type() == indexer.KV {
assert.Equal(t, indexer.PSQL, n.eventSinks[1].Type())
if eventSinks[0].Type() == indexer.KV {
assert.Equal(t, indexer.PSQL, eventSinks[1].Type())
} else {
assert.Equal(t, indexer.PSQL, n.eventSinks[0].Type())
assert.Equal(t, indexer.KV, n.eventSinks[1].Type())
assert.Equal(t, indexer.PSQL, eventSinks[0].Type())
assert.Equal(t, indexer.KV, eventSinks[1].Type())
}
n.OnStop()
config.TxIndex.Indexer = []string{"kv", "psql"}
config.TxIndex.PsqlConn = psqlConn
n = getTestNode(t, config, log.TestingLogger())
assert.Equal(t, 2, len(n.eventSinks))
if n.eventSinks[0].Type() == indexer.KV {
assert.Equal(t, indexer.PSQL, n.eventSinks[1].Type())
eventSinks = setupTest(t, config)
assert.Equal(t, 2, len(eventSinks))
if eventSinks[0].Type() == indexer.KV {
assert.Equal(t, indexer.PSQL, eventSinks[1].Type())
} else {
assert.Equal(t, indexer.PSQL, n.eventSinks[0].Type())
assert.Equal(t, indexer.KV, n.eventSinks[1].Type())
assert.Equal(t, indexer.PSQL, eventSinks[0].Type())
assert.Equal(t, indexer.KV, eventSinks[1].Type())
}
n.OnStop()
var e = errors.New("found duplicated sinks, please check the tx-index section in the config.toml")
config.TxIndex.Indexer = []string{"psql", "kv", "Kv"}
config.TxIndex.PsqlConn = psqlConn
_, err = newDefaultNode(config, log.TestingLogger())
_, err = newDefaultNode(config, logger)
require.Error(t, err)
assert.Equal(t, e, err)
config.TxIndex.Indexer = []string{"Psql", "kV", "kv", "pSql"}
config.TxIndex.PsqlConn = psqlConn
_, err = newDefaultNode(config, log.TestingLogger())
_, err = newDefaultNode(config, logger)
require.Error(t, err)
assert.Equal(t, e, err)
}

View File

@@ -182,12 +182,12 @@ func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusL
}
}
func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
if state.Validators.Size() > 1 {
func onlyValidatorIsUs(genDoc *types.GenesisDoc, pubKey crypto.PubKey) bool {
if len(genDoc.Validators) > 1 || pubKey == nil {
return false
}
addr, _ := state.Validators.GetByIndex(0)
return pubKey != nil && bytes.Equal(pubKey.Address(), addr)
return bytes.Equal(pubKey.Address(), genDoc.Validators[0].Address)
}
func createMempoolReactor(
@@ -694,7 +694,7 @@ func createPEXReactorV2(
logger log.Logger,
peerManager *p2p.PeerManager,
router *p2p.Router,
) (*pex.ReactorV2, error) {
) (service.Service, error) {
channel, err := router.OpenChannel(pex.ChannelDescriptor(), &protop2p.PexMessage{}, 4096)
if err != nil {