mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-07 22:05:18 +00:00
p2p: rename pexV2 to pex (#7088)
This commit is contained in:
@@ -1,3 +1,4 @@
|
||||
//go:build gofuzz
|
||||
// +build gofuzz
|
||||
|
||||
package consensus
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build deadlock
|
||||
// +build deadlock
|
||||
|
||||
package sync
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build !deadlock
|
||||
// +build !deadlock
|
||||
|
||||
package sync
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build go1.10
|
||||
// +build go1.10
|
||||
|
||||
package conn
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build !go1.10
|
||||
// +build !go1.10
|
||||
|
||||
package conn
|
||||
|
||||
@@ -7,19 +7,14 @@ The PEX reactor is a continuous service which periodically requests addresses
|
||||
and serves addresses to other peers. There are two versions of this service
|
||||
aligning with the two p2p frameworks that Tendermint currently supports.
|
||||
|
||||
V1 is coupled with the Switch (which handles peer connections and routing of
|
||||
messages) and, alongside exchanging peer information in the form of port/IP
|
||||
pairs, also has the responsibility of dialing peers and ensuring that a
|
||||
node has a sufficient amount of peers connected.
|
||||
|
||||
V2 is embedded with the new p2p stack and uses the peer manager to advertise
|
||||
The reactor is embedded with the new p2p stack and uses the peer manager to advertise
|
||||
peers as well as add new peers to the peer store. The V2 reactor passes a
|
||||
different set of proto messages which include a list of
|
||||
[urls](https://golang.org/pkg/net/url/#URL).These can be used to save a set of
|
||||
endpoints that each peer uses. The V2 reactor has backwards compatibility with
|
||||
V1. It can also handle V1 messages.
|
||||
|
||||
The V2 reactor is able to tweak the intensity of it's search by decreasing or
|
||||
The reactor is able to tweak the intensity of it's search by decreasing or
|
||||
increasing the interval between each request. It tracks connected peers via a
|
||||
linked list, sending a request to the node at the front of the list and adding
|
||||
it to the back of the list once a response is received. Using this method, a
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
_ service.Service = (*ReactorV2)(nil)
|
||||
_ service.Service = (*Reactor)(nil)
|
||||
_ p2p.Wrapper = (*protop2p.PexMessage)(nil)
|
||||
)
|
||||
|
||||
@@ -73,11 +73,6 @@ func ChannelDescriptor() conn.ChannelDescriptor {
|
||||
}
|
||||
}
|
||||
|
||||
// ReactorV2 is a PEX reactor for the new P2P stack. The legacy reactor
|
||||
// is Reactor.
|
||||
//
|
||||
// FIXME: Rename this when Reactor is removed, and consider moving to p2p/.
|
||||
//
|
||||
// The peer exchange or PEX reactor supports the peer manager by sending
|
||||
// requests to other peers for addresses that can be given to the peer manager
|
||||
// and at the same time advertises addresses to peers that need more.
|
||||
@@ -86,7 +81,7 @@ func ChannelDescriptor() conn.ChannelDescriptor {
|
||||
// increasing the interval between each request. It tracks connected peers via
|
||||
// a linked list, sending a request to the node at the front of the list and
|
||||
// adding it to the back of the list once a response is received.
|
||||
type ReactorV2 struct {
|
||||
type Reactor struct {
|
||||
service.BaseService
|
||||
|
||||
peerManager *p2p.PeerManager
|
||||
@@ -125,14 +120,14 @@ type ReactorV2 struct {
|
||||
}
|
||||
|
||||
// NewReactor returns a reference to a new reactor.
|
||||
func NewReactorV2(
|
||||
func NewReactor(
|
||||
logger log.Logger,
|
||||
peerManager *p2p.PeerManager,
|
||||
pexCh *p2p.Channel,
|
||||
peerUpdates *p2p.PeerUpdates,
|
||||
) *ReactorV2 {
|
||||
) *Reactor {
|
||||
|
||||
r := &ReactorV2{
|
||||
r := &Reactor{
|
||||
peerManager: peerManager,
|
||||
pexCh: pexCh,
|
||||
peerUpdates: peerUpdates,
|
||||
@@ -150,7 +145,7 @@ func NewReactorV2(
|
||||
// envelopes on each. In addition, it also listens for peer updates and handles
|
||||
// messages on that p2p channel accordingly. The caller must be sure to execute
|
||||
// OnStop to ensure the outbound p2p Channels are closed.
|
||||
func (r *ReactorV2) OnStart() error {
|
||||
func (r *Reactor) OnStart() error {
|
||||
go r.processPexCh()
|
||||
go r.processPeerUpdates()
|
||||
return nil
|
||||
@@ -158,7 +153,7 @@ func (r *ReactorV2) OnStart() error {
|
||||
|
||||
// OnStop stops the reactor by signaling to all spawned goroutines to exit and
|
||||
// blocking until they all exit.
|
||||
func (r *ReactorV2) OnStop() {
|
||||
func (r *Reactor) OnStop() {
|
||||
// Close closeCh to signal to all spawned goroutines to gracefully exit. All
|
||||
// p2p Channels should execute Close().
|
||||
close(r.closeCh)
|
||||
@@ -172,7 +167,7 @@ func (r *ReactorV2) OnStop() {
|
||||
|
||||
// processPexCh implements a blocking event loop where we listen for p2p
|
||||
// Envelope messages from the pexCh.
|
||||
func (r *ReactorV2) processPexCh() {
|
||||
func (r *Reactor) processPexCh() {
|
||||
defer r.pexCh.Close()
|
||||
|
||||
for {
|
||||
@@ -202,7 +197,7 @@ func (r *ReactorV2) processPexCh() {
|
||||
// processPeerUpdates initiates a blocking process where we listen for and handle
|
||||
// PeerUpdate messages. When the reactor is stopped, we will catch the signal and
|
||||
// close the p2p PeerUpdatesCh gracefully.
|
||||
func (r *ReactorV2) processPeerUpdates() {
|
||||
func (r *Reactor) processPeerUpdates() {
|
||||
defer r.peerUpdates.Close()
|
||||
|
||||
for {
|
||||
@@ -218,7 +213,7 @@ func (r *ReactorV2) processPeerUpdates() {
|
||||
}
|
||||
|
||||
// handlePexMessage handles envelopes sent from peers on the PexChannel.
|
||||
func (r *ReactorV2) handlePexMessage(envelope p2p.Envelope) error {
|
||||
func (r *Reactor) handlePexMessage(envelope p2p.Envelope) error {
|
||||
logger := r.Logger.With("peer", envelope.From)
|
||||
|
||||
switch msg := envelope.Message.(type) {
|
||||
@@ -337,7 +332,7 @@ func (r *ReactorV2) handlePexMessage(envelope p2p.Envelope) error {
|
||||
//
|
||||
// FIXME: We may want to cache and parallelize this, but for now we'll just rely
|
||||
// on the operating system to cache it for us.
|
||||
func (r *ReactorV2) resolve(addresses []p2p.NodeAddress) []protop2p.PexAddress {
|
||||
func (r *Reactor) resolve(addresses []p2p.NodeAddress) []protop2p.PexAddress {
|
||||
limit := len(addresses)
|
||||
pexAddresses := make([]protop2p.PexAddress, 0, limit)
|
||||
|
||||
@@ -380,7 +375,7 @@ func (r *ReactorV2) resolve(addresses []p2p.NodeAddress) []protop2p.PexAddress {
|
||||
// handleMessage handles an Envelope sent from a peer on a specific p2p Channel.
|
||||
// It will handle errors and any possible panics gracefully. A caller can handle
|
||||
// any error returned by sending a PeerError on the respective channel.
|
||||
func (r *ReactorV2) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) {
|
||||
func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
@@ -407,7 +402,7 @@ func (r *ReactorV2) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (er
|
||||
|
||||
// processPeerUpdate processes a PeerUpdate. For added peers, PeerStatusUp, we
|
||||
// send a request for addresses.
|
||||
func (r *ReactorV2) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
|
||||
func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
|
||||
r.Logger.Debug("received PEX peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status)
|
||||
|
||||
r.mtx.Lock()
|
||||
@@ -424,7 +419,7 @@ func (r *ReactorV2) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ReactorV2) waitUntilNextRequest() <-chan time.Time {
|
||||
func (r *Reactor) waitUntilNextRequest() <-chan time.Time {
|
||||
return time.After(time.Until(r.nextRequestTime))
|
||||
}
|
||||
|
||||
@@ -432,7 +427,7 @@ func (r *ReactorV2) waitUntilNextRequest() <-chan time.Time {
|
||||
// peer a request for more peer addresses. The function then moves the
|
||||
// peer into the requestsSent bucket and calculates when the next request
|
||||
// time should be
|
||||
func (r *ReactorV2) sendRequestForPeers() {
|
||||
func (r *Reactor) sendRequestForPeers() {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
if len(r.availablePeers) == 0 {
|
||||
@@ -480,7 +475,7 @@ func (r *ReactorV2) sendRequestForPeers() {
|
||||
// new nodes will plummet to a very small number, meaning the interval expands
|
||||
// to its upper bound.
|
||||
// CONTRACT: Must use a write lock as nextRequestTime is updated
|
||||
func (r *ReactorV2) calculateNextRequestTime() {
|
||||
func (r *Reactor) calculateNextRequestTime() {
|
||||
// check if the peer store is full. If so then there is no need
|
||||
// to send peer requests too often
|
||||
if ratio := r.peerManager.PeerRatio(); ratio >= 0.95 {
|
||||
@@ -516,7 +511,7 @@ func (r *ReactorV2) calculateNextRequestTime() {
|
||||
r.nextRequestTime = time.Now().Add(baseTime * time.Duration(r.discoveryRatio))
|
||||
}
|
||||
|
||||
func (r *ReactorV2) markPeerRequest(peer types.NodeID) error {
|
||||
func (r *Reactor) markPeerRequest(peer types.NodeID) error {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
if lastRequestTime, ok := r.lastReceivedRequests[peer]; ok {
|
||||
@@ -529,7 +524,7 @@ func (r *ReactorV2) markPeerRequest(peer types.NodeID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ReactorV2) markPeerResponse(peer types.NodeID) error {
|
||||
func (r *Reactor) markPeerResponse(peer types.NodeID) error {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
// check if a request to this peer was sent
|
||||
@@ -546,7 +541,7 @@ func (r *ReactorV2) markPeerResponse(peer types.NodeID) error {
|
||||
|
||||
// all addresses must use a MCONN protocol for the peer to be considered part of the
|
||||
// legacy p2p pex system
|
||||
func (r *ReactorV2) isLegacyPeer(peer types.NodeID) bool {
|
||||
func (r *Reactor) isLegacyPeer(peer types.NodeID) bool {
|
||||
for _, addr := range r.peerManager.Addresses(peer) {
|
||||
if addr.Protocol != p2p.MConnProtocol {
|
||||
return false
|
||||
|
||||
@@ -272,7 +272,7 @@ func TestReactorIntegrationWithLegacyHandleResponse(t *testing.T) {
|
||||
}
|
||||
|
||||
type singleTestReactor struct {
|
||||
reactor *pex.ReactorV2
|
||||
reactor *pex.Reactor
|
||||
pexInCh chan p2p.Envelope
|
||||
pexOutCh chan p2p.Envelope
|
||||
pexErrCh chan p2p.PeerError
|
||||
@@ -301,7 +301,7 @@ func setupSingle(t *testing.T) *singleTestReactor {
|
||||
peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor := pex.NewReactorV2(log.TestingLogger(), peerManager, pexCh, peerUpdates)
|
||||
reactor := pex.NewReactor(log.TestingLogger(), peerManager, pexCh, peerUpdates)
|
||||
require.NoError(t, reactor.Start())
|
||||
t.Cleanup(func() {
|
||||
err := reactor.Stop()
|
||||
@@ -327,7 +327,7 @@ type reactorTestSuite struct {
|
||||
network *p2ptest.Network
|
||||
logger log.Logger
|
||||
|
||||
reactors map[types.NodeID]*pex.ReactorV2
|
||||
reactors map[types.NodeID]*pex.Reactor
|
||||
pexChannels map[types.NodeID]*p2p.Channel
|
||||
|
||||
peerChans map[types.NodeID]chan p2p.PeerUpdate
|
||||
@@ -370,7 +370,7 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite {
|
||||
rts := &reactorTestSuite{
|
||||
logger: log.TestingLogger().With("testCase", t.Name()),
|
||||
network: p2ptest.MakeNetwork(t, networkOpts),
|
||||
reactors: make(map[types.NodeID]*pex.ReactorV2, realNodes),
|
||||
reactors: make(map[types.NodeID]*pex.Reactor, realNodes),
|
||||
pexChannels: make(map[types.NodeID]*p2p.Channel, opts.TotalNodes),
|
||||
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, opts.TotalNodes),
|
||||
peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, opts.TotalNodes),
|
||||
@@ -394,7 +394,7 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite {
|
||||
if idx < opts.MockNodes {
|
||||
rts.mocks = append(rts.mocks, nodeID)
|
||||
} else {
|
||||
rts.reactors[nodeID] = pex.NewReactorV2(
|
||||
rts.reactors[nodeID] = pex.NewReactor(
|
||||
rts.logger.With("nodeID", nodeID),
|
||||
rts.network.Nodes[nodeID].PeerManager,
|
||||
rts.pexChannels[nodeID],
|
||||
@@ -452,7 +452,7 @@ func (r *reactorTestSuite) addNodes(t *testing.T, nodes int) {
|
||||
r.peerChans[nodeID] = make(chan p2p.PeerUpdate, r.opts.BufferSize)
|
||||
r.peerUpdates[nodeID] = p2p.NewPeerUpdates(r.peerChans[nodeID], r.opts.BufferSize)
|
||||
r.network.Nodes[nodeID].PeerManager.Register(r.peerUpdates[nodeID])
|
||||
r.reactors[nodeID] = pex.NewReactorV2(
|
||||
r.reactors[nodeID] = pex.NewReactor(
|
||||
r.logger.With("nodeID", nodeID),
|
||||
r.network.Nodes[nodeID].PeerManager,
|
||||
r.pexChannels[nodeID],
|
||||
|
||||
Reference in New Issue
Block a user