From 4b8fd2814841d2102b3454cdba59a09e7c9028c2 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 7 Dec 2021 10:27:49 -0500 Subject: [PATCH 01/33] ci: fix missing dependency (#7396) --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f119ec04c..da51f0395 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -74,7 +74,7 @@ jobs: go.mod go.sum - name: install - run: make install + run: make install install_abci if: "env.GIT_DIFF != ''" - name: test_apps run: test/app/test.sh From 6b35cc1a47a4808fa18fb1f132da11877613a776 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 7 Dec 2021 10:40:07 -0500 Subject: [PATCH 02/33] p2p: remove unneeded close channels from p2p layer (#7392) --- internal/consensus/reactor.go | 4 +- internal/p2p/conn/connection.go | 38 ++++---- internal/p2p/conn/connection_test.go | 118 ++++++++++++++++------- internal/p2p/mocks/connection.go | 28 +++--- internal/p2p/mocks/transport.go | 14 +-- internal/p2p/p2ptest/network.go | 13 ++- internal/p2p/peermanager.go | 50 ++++------ internal/p2p/peermanager_scoring_test.go | 5 +- internal/p2p/peermanager_test.go | 10 -- internal/p2p/pex/reactor.go | 18 +--- internal/p2p/router.go | 28 +++--- internal/p2p/router_test.go | 60 +++++------- internal/p2p/transport.go | 6 +- internal/p2p/transport_mconn.go | 43 +++++---- internal/p2p/transport_mconn_test.go | 10 +- internal/p2p/transport_memory.go | 48 +++++---- internal/p2p/transport_test.go | 39 ++++---- 17 files changed, 264 insertions(+), 268 deletions(-) diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index 803834d96..d0d625e26 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -1470,7 +1470,7 @@ func (r *Reactor) peerStatsRoutine(ctx context.Context) { switch msg.Msg.(type) { case *VoteMessage: if numVotes := ps.RecordVote(); numVotes%votesToContributeToBecomeGoodPeer == 0 { - r.peerUpdates.SendUpdate(p2p.PeerUpdate{ + r.peerUpdates.SendUpdate(ctx, p2p.PeerUpdate{ NodeID: msg.PeerID, Status: p2p.PeerStatusGood, }) @@ -1478,7 +1478,7 @@ func (r *Reactor) peerStatsRoutine(ctx context.Context) { case *BlockPartMessage: if numParts := ps.RecordBlockPart(); numParts%blocksToContributeToBecomeGoodPeer == 0 { - r.peerUpdates.SendUpdate(p2p.PeerUpdate{ + r.peerUpdates.SendUpdate(ctx, p2p.PeerUpdate{ NodeID: msg.PeerID, Status: p2p.PeerStatusGood, }) diff --git a/internal/p2p/conn/connection.go b/internal/p2p/conn/connection.go index a2808f216..fa21358c1 100644 --- a/internal/p2p/conn/connection.go +++ b/internal/p2p/conn/connection.go @@ -49,8 +49,8 @@ const ( defaultPongTimeout = 45 * time.Second ) -type receiveCbFunc func(chID ChannelID, msgBytes []byte) -type errorCbFunc func(interface{}) +type receiveCbFunc func(ctx context.Context, chID ChannelID, msgBytes []byte) +type errorCbFunc func(context.Context, interface{}) /* Each peer has one `MConnection` (multiplex connection) instance. @@ -286,21 +286,21 @@ func (c *MConnection) flush() { } // Catch panics, usually caused by remote disconnects. -func (c *MConnection) _recover() { +func (c *MConnection) _recover(ctx context.Context) { if r := recover(); r != nil { c.logger.Error("MConnection panicked", "err", r, "stack", string(debug.Stack())) - c.stopForError(fmt.Errorf("recovered from panic: %v", r)) + c.stopForError(ctx, fmt.Errorf("recovered from panic: %v", r)) } } -func (c *MConnection) stopForError(r interface{}) { +func (c *MConnection) stopForError(ctx context.Context, r interface{}) { if err := c.Stop(); err != nil { c.logger.Error("Error stopping connection", "err", err) } if atomic.CompareAndSwapUint32(&c.errored, 0, 1) { if c.onError != nil { - c.onError(r) + c.onError(ctx, r) } } } @@ -335,7 +335,7 @@ func (c *MConnection) Send(chID ChannelID, msgBytes []byte) bool { // sendRoutine polls for packets to send from channels. func (c *MConnection) sendRoutine(ctx context.Context) { - defer c._recover() + defer c._recover(ctx) protoWriter := protoio.NewDelimitedWriter(c.bufConnWriter) FOR_LOOP: @@ -390,7 +390,7 @@ FOR_LOOP: break FOR_LOOP case <-c.send: // Send some PacketMsgs - eof := c.sendSomePacketMsgs() + eof := c.sendSomePacketMsgs(ctx) if !eof { // Keep sendRoutine awake. select { @@ -405,7 +405,7 @@ FOR_LOOP: } if err != nil { c.logger.Error("Connection failed @ sendRoutine", "conn", c, "err", err) - c.stopForError(err) + c.stopForError(ctx, err) break FOR_LOOP } } @@ -417,7 +417,7 @@ FOR_LOOP: // Returns true if messages from channels were exhausted. // Blocks in accordance to .sendMonitor throttling. -func (c *MConnection) sendSomePacketMsgs() bool { +func (c *MConnection) sendSomePacketMsgs(ctx context.Context) bool { // Block until .sendMonitor says we can write. // Once we're ready we send more than we asked for, // but amortized it should even out. @@ -425,7 +425,7 @@ func (c *MConnection) sendSomePacketMsgs() bool { // Now send some PacketMsgs. for i := 0; i < numBatchPacketMsgs; i++ { - if c.sendPacketMsg() { + if c.sendPacketMsg(ctx) { return true } } @@ -433,7 +433,7 @@ func (c *MConnection) sendSomePacketMsgs() bool { } // Returns true if messages from channels were exhausted. -func (c *MConnection) sendPacketMsg() bool { +func (c *MConnection) sendPacketMsg(ctx context.Context) bool { // Choose a channel to create a PacketMsg from. // The chosen channel will be the one whose recentlySent/priority is the least. var leastRatio float32 = math.MaxFloat32 @@ -461,7 +461,7 @@ func (c *MConnection) sendPacketMsg() bool { _n, err := leastChannel.writePacketMsgTo(c.bufConnWriter) if err != nil { c.logger.Error("Failed to write PacketMsg", "err", err) - c.stopForError(err) + c.stopForError(ctx, err) return true } c.sendMonitor.Update(_n) @@ -474,7 +474,7 @@ func (c *MConnection) sendPacketMsg() bool { // Blocks depending on how the connection is throttled. // Otherwise, it never blocks. func (c *MConnection) recvRoutine(ctx context.Context) { - defer c._recover() + defer c._recover(ctx) protoReader := protoio.NewDelimitedReader(c.bufConnReader, c._maxPacketMsgSize) @@ -518,7 +518,7 @@ FOR_LOOP: } else { c.logger.Debug("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err) } - c.stopForError(err) + c.stopForError(ctx, err) } break FOR_LOOP } @@ -547,7 +547,7 @@ FOR_LOOP: if pkt.PacketMsg.ChannelID < 0 || pkt.PacketMsg.ChannelID > math.MaxUint8 || !ok || channel == nil { err := fmt.Errorf("unknown channel %X", pkt.PacketMsg.ChannelID) c.logger.Debug("Connection failed @ recvRoutine", "conn", c, "err", err) - c.stopForError(err) + c.stopForError(ctx, err) break FOR_LOOP } @@ -555,19 +555,19 @@ FOR_LOOP: if err != nil { if c.IsRunning() { c.logger.Debug("Connection failed @ recvRoutine", "conn", c, "err", err) - c.stopForError(err) + c.stopForError(ctx, err) } break FOR_LOOP } if msgBytes != nil { c.logger.Debug("Received bytes", "chID", channelID, "msgBytes", msgBytes) // NOTE: This means the reactor.Receive runs in the same thread as the p2p recv routine - c.onReceive(channelID, msgBytes) + c.onReceive(ctx, channelID, msgBytes) } default: err := fmt.Errorf("unknown message type %v", reflect.TypeOf(packet)) c.logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) - c.stopForError(err) + c.stopForError(ctx, err) break FOR_LOOP } } diff --git a/internal/p2p/conn/connection_test.go b/internal/p2p/conn/connection_test.go index f1b2ae24c..0700db1b0 100644 --- a/internal/p2p/conn/connection_test.go +++ b/internal/p2p/conn/connection_test.go @@ -25,18 +25,18 @@ const maxPingPongPacketSize = 1024 // bytes func createTestMConnection(logger log.Logger, conn net.Conn) *MConnection { return createMConnectionWithCallbacks(logger, conn, // onRecieve - func(chID ChannelID, msgBytes []byte) { + func(ctx context.Context, chID ChannelID, msgBytes []byte) { }, // onError - func(r interface{}) { + func(ctx context.Context, r interface{}) { }) } func createMConnectionWithCallbacks( logger log.Logger, conn net.Conn, - onReceive func(chID ChannelID, msgBytes []byte), - onError func(r interface{}), + onReceive func(ctx context.Context, chID ChannelID, msgBytes []byte), + onError func(ctx context.Context, r interface{}), ) *MConnection { cfg := DefaultMConnConfig() cfg.PingInterval = 90 * time.Millisecond @@ -120,11 +120,17 @@ func TestMConnectionReceive(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID ChannelID, msgBytes []byte) { - receivedCh <- msgBytes + onReceive := func(ctx context.Context, chID ChannelID, msgBytes []byte) { + select { + case receivedCh <- msgBytes: + case <-ctx.Done(): + } } - onError := func(r interface{}) { - errorsCh <- r + onError := func(ctx context.Context, r interface{}) { + select { + case errorsCh <- r: + case <-ctx.Done(): + } } logger := log.TestingLogger() @@ -160,11 +166,17 @@ func TestMConnectionPongTimeoutResultsInError(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID ChannelID, msgBytes []byte) { - receivedCh <- msgBytes + onReceive := func(ctx context.Context, chID ChannelID, msgBytes []byte) { + select { + case receivedCh <- msgBytes: + case <-ctx.Done(): + } } - onError := func(r interface{}) { - errorsCh <- r + onError := func(ctx context.Context, r interface{}) { + select { + case errorsCh <- r: + case <-ctx.Done(): + } } ctx, cancel := context.WithCancel(context.Background()) @@ -202,12 +214,19 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID ChannelID, msgBytes []byte) { - receivedCh <- msgBytes + onReceive := func(ctx context.Context, chID ChannelID, msgBytes []byte) { + select { + case receivedCh <- msgBytes: + case <-ctx.Done(): + } } - onError := func(r interface{}) { - errorsCh <- r + onError := func(ctx context.Context, r interface{}) { + select { + case errorsCh <- r: + case <-ctx.Done(): + } } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -259,11 +278,17 @@ func TestMConnectionMultiplePings(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID ChannelID, msgBytes []byte) { - receivedCh <- msgBytes + onReceive := func(ctx context.Context, chID ChannelID, msgBytes []byte) { + select { + case receivedCh <- msgBytes: + case <-ctx.Done(): + } } - onError := func(r interface{}) { - errorsCh <- r + onError := func(ctx context.Context, r interface{}) { + select { + case errorsCh <- r: + case <-ctx.Done(): + } } ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -309,11 +334,17 @@ func TestMConnectionPingPongs(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID ChannelID, msgBytes []byte) { - receivedCh <- msgBytes + onReceive := func(ctx context.Context, chID ChannelID, msgBytes []byte) { + select { + case receivedCh <- msgBytes: + case <-ctx.Done(): + } } - onError := func(r interface{}) { - errorsCh <- r + onError := func(ctx context.Context, r interface{}) { + select { + case errorsCh <- r: + case <-ctx.Done(): + } } ctx, cancel := context.WithCancel(context.Background()) @@ -370,11 +401,17 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID ChannelID, msgBytes []byte) { - receivedCh <- msgBytes + onReceive := func(ctx context.Context, chID ChannelID, msgBytes []byte) { + select { + case receivedCh <- msgBytes: + case <-ctx.Done(): + } } - onError := func(r interface{}) { - errorsCh <- r + onError := func(ctx context.Context, r interface{}) { + select { + case errorsCh <- r: + case <-ctx.Done(): + } } ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -406,8 +443,8 @@ func newClientAndServerConnsForReadErrors( ) (*MConnection, *MConnection) { server, client := NetPipe() - onReceive := func(chID ChannelID, msgBytes []byte) {} - onError := func(r interface{}) {} + onReceive := func(context.Context, ChannelID, []byte) {} + onError := func(context.Context, interface{}) {} // create client conn with two channels chDescs := []*ChannelDescriptor{ @@ -423,8 +460,11 @@ func newClientAndServerConnsForReadErrors( // create server conn with 1 channel // it fires on chOnErr when there's an error serverLogger := logger.With("module", "server") - onError = func(r interface{}) { - chOnErr <- struct{}{} + onError = func(ctx context.Context, r interface{}) { + select { + case <-ctx.Done(): + case chOnErr <- struct{}{}: + } } mconnServer := createMConnectionWithCallbacks(serverLogger, server, onReceive, onError) @@ -488,8 +528,11 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { mconnClient, mconnServer := newClientAndServerConnsForReadErrors(ctx, t, chOnErr) t.Cleanup(waitAll(mconnClient, mconnServer)) - mconnServer.onReceive = func(chID ChannelID, msgBytes []byte) { - chOnRcv <- struct{}{} + mconnServer.onReceive = func(ctx context.Context, chID ChannelID, msgBytes []byte) { + select { + case <-ctx.Done(): + case chOnRcv <- struct{}{}: + } } client := mconnClient.conn @@ -590,8 +633,11 @@ func TestMConnectionChannelOverflow(t *testing.T) { mconnClient, mconnServer := newClientAndServerConnsForReadErrors(ctx, t, chOnErr) t.Cleanup(waitAll(mconnClient, mconnServer)) - mconnServer.onReceive = func(chID ChannelID, msgBytes []byte) { - chOnRcv <- struct{}{} + mconnServer.onReceive = func(ctx context.Context, chID ChannelID, msgBytes []byte) { + select { + case <-ctx.Done(): + case chOnRcv <- struct{}{}: + } } client := mconnClient.conn diff --git a/internal/p2p/mocks/connection.go b/internal/p2p/mocks/connection.go index 65b9afafb..576fb2386 100644 --- a/internal/p2p/mocks/connection.go +++ b/internal/p2p/mocks/connection.go @@ -79,20 +79,20 @@ func (_m *Connection) LocalEndpoint() p2p.Endpoint { return r0 } -// ReceiveMessage provides a mock function with given fields: -func (_m *Connection) ReceiveMessage() (conn.ChannelID, []byte, error) { - ret := _m.Called() +// ReceiveMessage provides a mock function with given fields: _a0 +func (_m *Connection) ReceiveMessage(_a0 context.Context) (conn.ChannelID, []byte, error) { + ret := _m.Called(_a0) var r0 conn.ChannelID - if rf, ok := ret.Get(0).(func() conn.ChannelID); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) conn.ChannelID); ok { + r0 = rf(_a0) } else { r0 = ret.Get(0).(conn.ChannelID) } var r1 []byte - if rf, ok := ret.Get(1).(func() []byte); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) []byte); ok { + r1 = rf(_a0) } else { if ret.Get(1) != nil { r1 = ret.Get(1).([]byte) @@ -100,8 +100,8 @@ func (_m *Connection) ReceiveMessage() (conn.ChannelID, []byte, error) { } var r2 error - if rf, ok := ret.Get(2).(func() error); ok { - r2 = rf() + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(_a0) } else { r2 = ret.Error(2) } @@ -123,13 +123,13 @@ func (_m *Connection) RemoteEndpoint() p2p.Endpoint { return r0 } -// SendMessage provides a mock function with given fields: _a0, _a1 -func (_m *Connection) SendMessage(_a0 conn.ChannelID, _a1 []byte) error { - ret := _m.Called(_a0, _a1) +// SendMessage provides a mock function with given fields: _a0, _a1, _a2 +func (_m *Connection) SendMessage(_a0 context.Context, _a1 conn.ChannelID, _a2 []byte) error { + ret := _m.Called(_a0, _a1, _a2) var r0 error - if rf, ok := ret.Get(0).(func(conn.ChannelID, []byte) error); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, conn.ChannelID, []byte) error); ok { + r0 = rf(_a0, _a1, _a2) } else { r0 = ret.Error(0) } diff --git a/internal/p2p/mocks/transport.go b/internal/p2p/mocks/transport.go index eea1de4c5..b17290118 100644 --- a/internal/p2p/mocks/transport.go +++ b/internal/p2p/mocks/transport.go @@ -17,13 +17,13 @@ type Transport struct { mock.Mock } -// Accept provides a mock function with given fields: -func (_m *Transport) Accept() (p2p.Connection, error) { - ret := _m.Called() +// Accept provides a mock function with given fields: _a0 +func (_m *Transport) Accept(_a0 context.Context) (p2p.Connection, error) { + ret := _m.Called(_a0) var r0 p2p.Connection - if rf, ok := ret.Get(0).(func() p2p.Connection); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) p2p.Connection); ok { + r0 = rf(_a0) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(p2p.Connection) @@ -31,8 +31,8 @@ func (_m *Transport) Accept() (p2p.Connection, error) { } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) } else { r1 = ret.Error(1) } diff --git a/internal/p2p/p2ptest/network.go b/internal/p2p/p2ptest/network.go index 6fc5d7c11..30f1a435f 100644 --- a/internal/p2p/p2ptest/network.go +++ b/internal/p2p/p2ptest/network.go @@ -24,6 +24,7 @@ type Network struct { logger log.Logger memoryNetwork *p2p.MemoryNetwork + cancel context.CancelFunc } // NetworkOptions is an argument structure to parameterize the @@ -68,6 +69,9 @@ func MakeNetwork(ctx context.Context, t *testing.T, opts NetworkOptions) *Networ // addition to creating a peer update subscription for each node. Finally, all // nodes are connected to each other. func (n *Network) Start(ctx context.Context, t *testing.T) { + ctx, n.cancel = context.WithCancel(ctx) + t.Cleanup(n.cancel) + // Set up a list of node addresses to dial, and a peer update subscription // for each node. dialQueue := []p2p.NodeAddress{} @@ -200,10 +204,10 @@ func (n *Network) Remove(ctx context.Context, t *testing.T, id types.NodeID) { } require.NoError(t, node.Transport.Close()) + node.cancel() if node.Router.IsRunning() { require.NoError(t, node.Router.Stop()) } - node.PeerManager.Close() for _, sub := range subs { RequireUpdate(t, sub, p2p.PeerUpdate{ @@ -222,12 +226,16 @@ type Node struct { Router *p2p.Router PeerManager *p2p.PeerManager Transport *p2p.MemoryTransport + + cancel context.CancelFunc } // MakeNode creates a new Node configured for the network with a // running peer manager, but does not add it to the existing // network. Callers are responsible for updating peering relationships. func (n *Network) MakeNode(ctx context.Context, t *testing.T, opts NodeOptions) *Node { + ctx, cancel := context.WithCancel(ctx) + privKey := ed25519.GenPrivKey() nodeID := types.NodeIDFromPubKey(privKey.PubKey()) nodeInfo := types.NodeInfo{ @@ -267,8 +275,8 @@ func (n *Network) MakeNode(ctx context.Context, t *testing.T, opts NodeOptions) if router.IsRunning() { require.NoError(t, router.Stop()) } - peerManager.Close() require.NoError(t, transport.Close()) + cancel() }) return &Node{ @@ -279,6 +287,7 @@ func (n *Network) MakeNode(ctx context.Context, t *testing.T, opts NodeOptions) Router: router, PeerManager: peerManager, Transport: transport, + cancel: cancel, } } diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index 0ab0128ca..40dcf8464 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -56,8 +56,8 @@ type PeerUpdate struct { type PeerUpdates struct { routerUpdatesCh chan PeerUpdate reactorUpdatesCh chan PeerUpdate - closeCh chan struct{} closeOnce sync.Once + doneCh chan struct{} } // NewPeerUpdates creates a new PeerUpdates subscription. It is primarily for @@ -67,7 +67,7 @@ func NewPeerUpdates(updatesCh chan PeerUpdate, buf int) *PeerUpdates { return &PeerUpdates{ reactorUpdatesCh: updatesCh, routerUpdatesCh: make(chan PeerUpdate, buf), - closeCh: make(chan struct{}), + doneCh: make(chan struct{}), } } @@ -76,28 +76,28 @@ func (pu *PeerUpdates) Updates() <-chan PeerUpdate { return pu.reactorUpdatesCh } -// SendUpdate pushes information about a peer into the routing layer, -// presumably from a peer. -func (pu *PeerUpdates) SendUpdate(update PeerUpdate) { - select { - case <-pu.closeCh: - case pu.routerUpdatesCh <- update: - } +// Done returns a channel that is closed when the subscription is closed. +func (pu *PeerUpdates) Done() <-chan struct{} { + return pu.doneCh } // Close closes the peer updates subscription. func (pu *PeerUpdates) Close() { pu.closeOnce.Do(func() { // NOTE: We don't close updatesCh since multiple goroutines may be - // sending on it. The PeerManager senders will select on closeCh as well + // sending on it. The PeerManager senders will select on doneCh as well // to avoid blocking on a closed subscription. - close(pu.closeCh) + close(pu.doneCh) }) } -// Done returns a channel that is closed when the subscription is closed. -func (pu *PeerUpdates) Done() <-chan struct{} { - return pu.closeCh +// SendUpdate pushes information about a peer into the routing layer, +// presumably from a peer. +func (pu *PeerUpdates) SendUpdate(ctx context.Context, update PeerUpdate) { + select { + case <-ctx.Done(): + case pu.routerUpdatesCh <- update: + } } // PeerManagerOptions specifies options for a PeerManager. @@ -276,8 +276,6 @@ type PeerManager struct { rand *rand.Rand dialWaker *tmsync.Waker // wakes up DialNext() on relevant peer changes evictWaker *tmsync.Waker // wakes up EvictNext() on relevant peer changes - closeCh chan struct{} // signal channel for Close() - closeOnce sync.Once mtx sync.Mutex store *peerStore @@ -312,7 +310,6 @@ func NewPeerManager(selfID types.NodeID, peerDB dbm.DB, options PeerManagerOptio rand: rand.New(rand.NewSource(time.Now().UnixNano())), // nolint:gosec dialWaker: tmsync.NewWaker(), evictWaker: tmsync.NewWaker(), - closeCh: make(chan struct{}), store: store, dialing: map[types.NodeID]bool{}, @@ -552,7 +549,6 @@ func (m *PeerManager) DialFailed(ctx context.Context, address NodeAddress) error select { case <-timer.C: m.dialWaker.Wake() - case <-m.closeCh: case <-ctx.Done(): } }() @@ -864,10 +860,6 @@ func (m *PeerManager) Register(ctx context.Context, peerUpdates *PeerUpdates) { go func() { for { select { - case <-peerUpdates.closeCh: - return - case <-m.closeCh: - return case <-ctx.Done(): return case pu := <-peerUpdates.routerUpdatesCh: @@ -882,7 +874,6 @@ func (m *PeerManager) Register(ctx context.Context, peerUpdates *PeerUpdates) { m.mtx.Lock() delete(m.subscriptions, peerUpdates) m.mtx.Unlock() - case <-m.closeCh: case <-ctx.Done(): } }() @@ -913,27 +904,20 @@ func (m *PeerManager) processPeerEvent(pu PeerUpdate) { // maintaining order if this is a problem. func (m *PeerManager) broadcast(peerUpdate PeerUpdate) { for _, sub := range m.subscriptions { - // We have to check closeCh separately first, otherwise there's a 50% + // We have to check doneChan separately first, otherwise there's a 50% // chance the second select will send on a closed subscription. select { - case <-sub.closeCh: + case <-sub.doneCh: continue default: } select { case sub.reactorUpdatesCh <- peerUpdate: - case <-sub.closeCh: + case <-sub.doneCh: } } } -// Close closes the peer manager, releasing resources (i.e. goroutines). -func (m *PeerManager) Close() { - m.closeOnce.Do(func() { - close(m.closeCh) - }) -} - // Addresses returns all known addresses for a peer, primarily for testing. // The order is arbitrary. func (m *PeerManager) Addresses(peerID types.NodeID) []NodeAddress { diff --git a/internal/p2p/peermanager_scoring_test.go b/internal/p2p/peermanager_scoring_test.go index fe23767c4..ecaf71c98 100644 --- a/internal/p2p/peermanager_scoring_test.go +++ b/internal/p2p/peermanager_scoring_test.go @@ -22,7 +22,6 @@ func TestPeerScoring(t *testing.T) { db := dbm.NewMemDB() peerManager, err := NewPeerManager(selfID, db, PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() // create a fake node id := types.NodeID(strings.Repeat("a1", 20)) @@ -59,7 +58,7 @@ func TestPeerScoring(t *testing.T) { start := peerManager.Scores()[id] pu := peerManager.Subscribe(ctx) defer pu.Close() - pu.SendUpdate(PeerUpdate{ + pu.SendUpdate(ctx, PeerUpdate{ NodeID: id, Status: PeerStatusGood, }) @@ -73,7 +72,7 @@ func TestPeerScoring(t *testing.T) { start := peerManager.Scores()[id] pu := peerManager.Subscribe(ctx) defer pu.Close() - pu.SendUpdate(PeerUpdate{ + pu.SendUpdate(ctx, PeerUpdate{ NodeID: id, Status: PeerStatusBad, }) diff --git a/internal/p2p/peermanager_test.go b/internal/p2p/peermanager_test.go index cf1b0707e..dec92dab0 100644 --- a/internal/p2p/peermanager_test.go +++ b/internal/p2p/peermanager_test.go @@ -154,7 +154,6 @@ func TestNewPeerManager_Persistence(t *testing.T) { PeerScores: map[types.NodeID]p2p.PeerScore{bID: 1}, }) require.NoError(t, err) - defer peerManager.Close() for _, addr := range append(append(aAddresses, bAddresses...), cAddresses...) { added, err := peerManager.Add(addr) @@ -171,8 +170,6 @@ func TestNewPeerManager_Persistence(t *testing.T) { cID: 0, }, peerManager.Scores()) - peerManager.Close() - // Creating a new peer manager with the same database should retain the // peers, but they should have updated scores from the new PersistentPeers // configuration. @@ -181,7 +178,6 @@ func TestNewPeerManager_Persistence(t *testing.T) { PeerScores: map[types.NodeID]p2p.PeerScore{cID: 1}, }) require.NoError(t, err) - defer peerManager.Close() require.ElementsMatch(t, aAddresses, peerManager.Addresses(aID)) require.ElementsMatch(t, bAddresses, peerManager.Addresses(bID)) @@ -208,7 +204,6 @@ func TestNewPeerManager_SelfIDChange(t *testing.T) { require.NoError(t, err) require.True(t, added) require.ElementsMatch(t, []types.NodeID{a.NodeID, b.NodeID}, peerManager.Peers()) - peerManager.Close() // If we change our selfID to one of the peers in the peer store, it // should be removed from the store. @@ -1755,9 +1750,6 @@ func TestPeerManager_Close(t *testing.T) { require.NoError(t, err) require.Equal(t, a, dial) require.NoError(t, peerManager.DialFailed(ctx, a)) - - // This should clean up the goroutines. - peerManager.Close() } func TestPeerManager_Advertise(t *testing.T) { @@ -1780,7 +1772,6 @@ func TestPeerManager_Advertise(t *testing.T) { PeerScores: map[types.NodeID]p2p.PeerScore{aID: 3, bID: 2, cID: 1}, }) require.NoError(t, err) - defer peerManager.Close() added, err := peerManager.Add(aTCP) require.NoError(t, err) @@ -1847,7 +1838,6 @@ func TestPeerManager_SetHeight_GetHeight(t *testing.T) { require.ElementsMatch(t, []types.NodeID{a.NodeID, b.NodeID}, peerManager.Peers()) // The heights should not be persisted. - peerManager.Close() peerManager, err = p2p.NewPeerManager(selfID, db, p2p.PeerManagerOptions{}) require.NoError(t, err) diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index 69ff5206c..b42bb2f4b 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -83,7 +83,6 @@ type Reactor struct { peerManager *p2p.PeerManager pexCh *p2p.Channel peerUpdates *p2p.PeerUpdates - closeCh chan struct{} // list of available peers to loop through and send peer requests to availablePeers map[types.NodeID]struct{} @@ -128,7 +127,6 @@ func NewReactor( peerManager: peerManager, pexCh: pexCh, peerUpdates: peerUpdates, - closeCh: make(chan struct{}), availablePeers: make(map[types.NodeID]struct{}), requestsSent: make(map[types.NodeID]struct{}), lastReceivedRequests: make(map[types.NodeID]time.Time), @@ -150,13 +148,7 @@ func (r *Reactor) OnStart(ctx context.Context) error { // OnStop stops the reactor by signaling to all spawned goroutines to exit and // blocking until they all exit. -func (r *Reactor) OnStop() { - // Close closeCh to signal to all spawned goroutines to gracefully exit. All - // p2p Channels should execute Close(). - close(r.closeCh) - - <-r.peerUpdates.Done() -} +func (r *Reactor) OnStop() {} // processPexCh implements a blocking event loop where we listen for p2p // Envelope messages from the pexCh. @@ -168,8 +160,6 @@ func (r *Reactor) processPexCh(ctx context.Context) { select { case <-ctx.Done(): - return - case <-r.closeCh: r.logger.Debug("stopped listening on PEX channel; closing...") return @@ -196,17 +186,13 @@ func (r *Reactor) processPexCh(ctx context.Context) { // close the p2p PeerUpdatesCh gracefully. func (r *Reactor) processPeerUpdates(ctx context.Context) { defer r.peerUpdates.Close() - for { select { case <-ctx.Done(): + r.logger.Debug("stopped listening on peer updates channel; closing...") return case peerUpdate := <-r.peerUpdates.Updates(): r.processPeerUpdate(peerUpdate) - - case <-r.closeCh: - r.logger.Debug("stopped listening on peer updates channel; closing...") - return } } } diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 7d1529ace..8f751ec6a 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -158,7 +158,6 @@ type Router struct { endpoints []Endpoint connTracker connectionTracker protocolTransports map[Protocol]Transport - stopCh chan struct{} // signals Router shutdown peerMtx sync.RWMutex peerQueues map[types.NodeID]queue // outbound messages per peer for all channels @@ -208,7 +207,6 @@ func NewRouter( protocolTransports: map[Protocol]Transport{}, peerManager: peerManager, options: options, - stopCh: make(chan struct{}), channelQueues: map[ChannelID]queue{}, channelMessages: map[ChannelID]proto.Message{}, peerQueues: map[types.NodeID]queue{}, @@ -399,7 +397,7 @@ func (r *Router) routeChannel( case <-q.closed(): r.logger.Debug("dropping message for unconnected peer", "peer", envelope.To, "channel", chID) - case <-r.stopCh: + case <-ctx.Done(): return } } @@ -414,8 +412,6 @@ func (r *Router) routeChannel( r.peerManager.Errored(peerError.NodeID, peerError.Err) case <-ctx.Done(): return - case <-r.stopCh: - return } } } @@ -474,7 +470,7 @@ func (r *Router) acceptPeers(ctx context.Context, transport Transport) { r.logger.Debug("starting accept routine", "transport", transport) for { - conn, err := transport.Accept() + conn, err := transport.Accept(ctx) switch err { case nil: case io.EOF: @@ -783,14 +779,14 @@ func (r *Router) routePeer(ctx context.Context, peerID types.NodeID, conn Connec go func() { select { - case errCh <- r.receivePeer(peerID, conn): + case errCh <- r.receivePeer(ctx, peerID, conn): case <-ctx.Done(): } }() go func() { select { - case errCh <- r.sendPeer(peerID, conn, sendQueue): + case errCh <- r.sendPeer(ctx, peerID, conn, sendQueue): case <-ctx.Done(): } }() @@ -829,9 +825,9 @@ func (r *Router) routePeer(ctx context.Context, peerID types.NodeID, conn Connec // receivePeer receives inbound messages from a peer, deserializes them and // passes them on to the appropriate channel. -func (r *Router) receivePeer(peerID types.NodeID, conn Connection) error { +func (r *Router) receivePeer(ctx context.Context, peerID types.NodeID, conn Connection) error { for { - chID, bz, err := conn.ReceiveMessage() + chID, bz, err := conn.ReceiveMessage(ctx) if err != nil { return err } @@ -874,14 +870,14 @@ func (r *Router) receivePeer(peerID types.NodeID, conn Connection) error { case <-queue.closed(): r.logger.Debug("channel closed, dropping message", "peer", peerID, "channel", chID) - case <-r.stopCh: + case <-ctx.Done(): return nil } } } // sendPeer sends queued messages to a peer. -func (r *Router) sendPeer(peerID types.NodeID, conn Connection, peerQueue queue) error { +func (r *Router) sendPeer(ctx context.Context, peerID types.NodeID, conn Connection, peerQueue queue) error { for { start := time.Now().UTC() @@ -899,7 +895,7 @@ func (r *Router) sendPeer(peerID types.NodeID, conn Connection, peerQueue queue) continue } - if err = conn.SendMessage(envelope.channelID, bz); err != nil { + if err = conn.SendMessage(ctx, envelope.channelID, bz); err != nil { return err } @@ -908,7 +904,7 @@ func (r *Router) sendPeer(peerID types.NodeID, conn Connection, peerQueue queue) case <-peerQueue.closed(): return nil - case <-r.stopCh: + case <-ctx.Done(): return nil } } @@ -983,9 +979,6 @@ func (r *Router) OnStart(ctx context.Context) error { // here, since that would cause any reactor senders to panic, so it is the // sender's responsibility. func (r *Router) OnStop() { - // Signal router shutdown. - close(r.stopCh) - // Close transport listeners (unblocks Accept calls). for _, transport := range r.transports { if err := transport.Close(); err != nil { @@ -1009,6 +1002,7 @@ func (r *Router) OnStop() { r.peerMtx.RUnlock() for _, q := range queues { + q.close() <-q.closed() } } diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index 8a4c9e4bc..a561f68cd 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -106,7 +106,6 @@ func TestRouter_Channel_Basic(t *testing.T) { // Set up a router with no transports (so no peers). peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() router, err := p2p.NewRouter( ctx, @@ -392,25 +391,22 @@ func TestRouter_AcceptPeers(t *testing.T) { mockConnection.On("String").Maybe().Return("mock") mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). Return(tc.peerInfo, tc.peerKey, nil) - mockConnection.On("Close").Run(func(_ mock.Arguments) { closer.Close() }).Return(nil) + mockConnection.On("Close").Run(func(_ mock.Arguments) { closer.Close() }).Return(nil).Maybe() mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{}) if tc.ok { - // without the sleep after RequireUpdate this method isn't - // always called. Consider making this call optional. - mockConnection.On("ReceiveMessage").Return(chID, nil, io.EOF) + mockConnection.On("ReceiveMessage", mock.Anything).Return(chID, nil, io.EOF).Maybe() } mockTransport := &mocks.Transport{} mockTransport.On("String").Maybe().Return("mock") mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) - mockTransport.On("Close").Return(nil) - mockTransport.On("Accept").Once().Return(mockConnection, nil) - mockTransport.On("Accept").Maybe().Return(nil, io.EOF) + mockTransport.On("Close").Return(nil).Maybe() + mockTransport.On("Accept", mock.Anything).Once().Return(mockConnection, nil) + mockTransport.On("Accept", mock.Anything).Maybe().Return(nil, io.EOF) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() sub := peerManager.Subscribe(ctx) defer sub.Close() @@ -464,13 +460,12 @@ func TestRouter_AcceptPeers_Error(t *testing.T) { mockTransport := &mocks.Transport{} mockTransport.On("String").Maybe().Return("mock") mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) - mockTransport.On("Accept").Once().Return(nil, errors.New("boom")) + mockTransport.On("Accept", mock.Anything).Once().Return(nil, errors.New("boom")) mockTransport.On("Close").Return(nil) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() router, err := p2p.NewRouter( ctx, @@ -503,13 +498,12 @@ func TestRouter_AcceptPeers_ErrorEOF(t *testing.T) { mockTransport := &mocks.Transport{} mockTransport.On("String").Maybe().Return("mock") mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Accept", mock.Anything).Once().Return(nil, io.EOF) mockTransport.On("Close").Return(nil) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() router, err := p2p.NewRouter( ctx, @@ -554,15 +548,14 @@ func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) { mockTransport.On("String").Maybe().Return("mock") mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) - mockTransport.On("Accept").Times(3).Run(func(_ mock.Arguments) { + mockTransport.On("Accept", mock.Anything).Times(3).Run(func(_ mock.Arguments) { acceptCh <- true }).Return(mockConnection, nil) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Accept", mock.Anything).Once().Return(nil, io.EOF) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() router, err := p2p.NewRouter( ctx, @@ -580,7 +573,7 @@ func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) { require.Eventually(t, func() bool { return len(acceptCh) == 3 - }, time.Second, 10*time.Millisecond) + }, time.Second, 10*time.Millisecond, "num", len(acceptCh)) close(closeCh) time.Sleep(100 * time.Millisecond) @@ -636,19 +629,17 @@ func TestRouter_DialPeers(t *testing.T) { if tc.dialErr == nil { mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). Return(tc.peerInfo, tc.peerKey, nil) - mockConnection.On("Close").Run(func(_ mock.Arguments) { closer.Close() }).Return(nil) + mockConnection.On("Close").Run(func(_ mock.Arguments) { closer.Close() }).Return(nil).Maybe() } if tc.ok { - // without the sleep after RequireUpdate this method isn't - // always called. Consider making this call optional. - mockConnection.On("ReceiveMessage").Return(chID, nil, io.EOF) + mockConnection.On("ReceiveMessage", mock.Anything).Return(chID, nil, io.EOF).Maybe() } mockTransport := &mocks.Transport{} mockTransport.On("String").Maybe().Return("mock") mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) - mockTransport.On("Close").Return(nil) - mockTransport.On("Accept").Maybe().Return(nil, io.EOF) + mockTransport.On("Close").Return(nil).Maybe() + mockTransport.On("Accept", mock.Anything).Maybe().Return(nil, io.EOF) if tc.dialErr == nil { mockTransport.On("Dial", mock.Anything, endpoint).Once().Return(mockConnection, nil) // This handles the retry when a dialed connection gets closed after ReceiveMessage @@ -663,7 +654,6 @@ func TestRouter_DialPeers(t *testing.T) { // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() added, err := peerManager.Add(address) require.NoError(t, err) @@ -734,7 +724,7 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { mockTransport.On("String").Maybe().Return("mock") mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Accept", mock.Anything).Once().Return(nil, io.EOF) for _, address := range []p2p.NodeAddress{a, b, c} { endpoint := p2p.Endpoint{Protocol: address.Protocol, Path: string(address.NodeID)} mockTransport.On("Dial", mock.Anything, endpoint).Run(func(_ mock.Arguments) { @@ -745,7 +735,6 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() added, err := peerManager.Add(a) require.NoError(t, err) @@ -813,7 +802,7 @@ func TestRouter_EvictPeers(t *testing.T) { mockConnection.On("String").Maybe().Return("mock") mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). Return(peerInfo, peerKey.PubKey(), nil) - mockConnection.On("ReceiveMessage").WaitUntil(closeCh).Return(chID, nil, io.EOF) + mockConnection.On("ReceiveMessage", mock.Anything).WaitUntil(closeCh).Return(chID, nil, io.EOF) mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{}) mockConnection.On("Close").Run(func(_ mock.Arguments) { closeOnce.Do(func() { @@ -825,13 +814,12 @@ func TestRouter_EvictPeers(t *testing.T) { mockTransport.On("String").Maybe().Return("mock") mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) - mockTransport.On("Accept").Once().Return(mockConnection, nil) - mockTransport.On("Accept").Maybe().Return(nil, io.EOF) + mockTransport.On("Accept", mock.Anything).Once().Return(mockConnection, nil) + mockTransport.On("Accept", mock.Anything).Maybe().Return(nil, io.EOF) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() sub := peerManager.Subscribe(ctx) defer sub.Close() @@ -893,13 +881,12 @@ func TestRouter_ChannelCompatability(t *testing.T) { mockTransport.On("String").Maybe().Return("mock") mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) - mockTransport.On("Accept").Once().Return(mockConnection, nil) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Accept", mock.Anything).Once().Return(mockConnection, nil) + mockTransport.On("Accept", mock.Anything).Once().Return(nil, io.EOF) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() router, err := p2p.NewRouter( ctx, @@ -941,20 +928,19 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) { Return(peer, peerKey.PubKey(), nil) mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{}) mockConnection.On("Close").Return(nil) - mockConnection.On("ReceiveMessage").Return(chID, nil, io.EOF) + mockConnection.On("ReceiveMessage", mock.Anything).Return(chID, nil, io.EOF) mockTransport := &mocks.Transport{} mockTransport.On("AddChannelDescriptors", mock.Anything).Return() mockTransport.On("String").Maybe().Return("mock") mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) - mockTransport.On("Accept").Once().Return(mockConnection, nil) - mockTransport.On("Accept").Maybe().Return(nil, io.EOF) + mockTransport.On("Accept", mock.Anything).Once().Return(mockConnection, nil) + mockTransport.On("Accept", mock.Anything).Maybe().Return(nil, io.EOF) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() sub := peerManager.Subscribe(ctx) defer sub.Close() diff --git a/internal/p2p/transport.go b/internal/p2p/transport.go index 08de0d3b0..041bbda3a 100644 --- a/internal/p2p/transport.go +++ b/internal/p2p/transport.go @@ -39,7 +39,7 @@ type Transport interface { // Accept waits for the next inbound connection on a listening endpoint, blocking // until either a connection is available or the transport is closed. On closure, // io.EOF is returned and further Accept calls are futile. - Accept() (Connection, error) + Accept(context.Context) (Connection, error) // Dial creates an outbound connection to an endpoint. Dial(context.Context, Endpoint) (Connection, error) @@ -85,10 +85,10 @@ type Connection interface { // ReceiveMessage returns the next message received on the connection, // blocking until one is available. Returns io.EOF if closed. - ReceiveMessage() (ChannelID, []byte, error) + ReceiveMessage(context.Context) (ChannelID, []byte, error) // SendMessage sends a message on the connection. Returns io.EOF if closed. - SendMessage(ChannelID, []byte) error + SendMessage(context.Context, ChannelID, []byte) error // LocalEndpoint returns the local endpoint for the connection. LocalEndpoint() Endpoint diff --git a/internal/p2p/transport_mconn.go b/internal/p2p/transport_mconn.go index b89671670..46227ff8f 100644 --- a/internal/p2p/transport_mconn.go +++ b/internal/p2p/transport_mconn.go @@ -44,10 +44,10 @@ type MConnTransport struct { options MConnTransportOptions mConnConfig conn.MConnConfig channelDescs []*ChannelDescriptor - closeCh chan struct{} - closeOnce sync.Once - listener net.Listener + closeOnce sync.Once + doneCh chan struct{} + listener net.Listener } // NewMConnTransport sets up a new MConnection transport. This uses the @@ -63,7 +63,7 @@ func NewMConnTransport( logger: logger, options: options, mConnConfig: mConnConfig, - closeCh: make(chan struct{}), + doneCh: make(chan struct{}), channelDescs: channelDescs, } } @@ -84,10 +84,11 @@ func (m *MConnTransport) Endpoints() []Endpoint { return []Endpoint{} } select { - case <-m.closeCh: + case <-m.doneCh: return []Endpoint{} default: } + endpoint := Endpoint{ Protocol: MConnProtocol, } @@ -132,7 +133,7 @@ func (m *MConnTransport) Listen(endpoint Endpoint) error { } // Accept implements Transport. -func (m *MConnTransport) Accept() (Connection, error) { +func (m *MConnTransport) Accept(ctx context.Context) (Connection, error) { if m.listener == nil { return nil, errors.New("transport is not listening") } @@ -140,7 +141,9 @@ func (m *MConnTransport) Accept() (Connection, error) { tcpConn, err := m.listener.Accept() if err != nil { select { - case <-m.closeCh: + case <-ctx.Done(): + return nil, io.EOF + case <-m.doneCh: return nil, io.EOF default: return nil, err @@ -178,7 +181,7 @@ func (m *MConnTransport) Dial(ctx context.Context, endpoint Endpoint) (Connectio func (m *MConnTransport) Close() error { var err error m.closeOnce.Do(func() { - close(m.closeCh) // must be closed first, to handle error in Accept() + close(m.doneCh) if m.listener != nil { err = m.listener.Close() } @@ -222,7 +225,7 @@ type mConnConnection struct { channelDescs []*ChannelDescriptor receiveCh chan mConnMessage errorCh chan error - closeCh chan struct{} + doneCh chan struct{} closeOnce sync.Once mconn *conn.MConnection // set during Handshake() @@ -248,7 +251,7 @@ func newMConnConnection( channelDescs: channelDescs, receiveCh: make(chan mConnMessage), errorCh: make(chan error, 1), // buffered to avoid onError leak - closeCh: make(chan struct{}), + doneCh: make(chan struct{}), } } @@ -370,16 +373,16 @@ func (c *mConnConnection) handshake( } // onReceive is a callback for MConnection received messages. -func (c *mConnConnection) onReceive(chID ChannelID, payload []byte) { +func (c *mConnConnection) onReceive(ctx context.Context, chID ChannelID, payload []byte) { select { case c.receiveCh <- mConnMessage{channelID: chID, payload: payload}: - case <-c.closeCh: + case <-ctx.Done(): } } // onError is a callback for MConnection errors. The error is passed via errorCh // to ReceiveMessage (but not SendMessage, for legacy P2P stack behavior). -func (c *mConnConnection) onError(e interface{}) { +func (c *mConnConnection) onError(ctx context.Context, e interface{}) { err, ok := e.(error) if !ok { err = fmt.Errorf("%v", err) @@ -389,7 +392,7 @@ func (c *mConnConnection) onError(e interface{}) { _ = c.Close() select { case c.errorCh <- err: - case <-c.closeCh: + case <-ctx.Done(): } } @@ -399,14 +402,14 @@ func (c *mConnConnection) String() string { } // SendMessage implements Connection. -func (c *mConnConnection) SendMessage(chID ChannelID, msg []byte) error { +func (c *mConnConnection) SendMessage(ctx context.Context, chID ChannelID, msg []byte) error { if chID > math.MaxUint8 { return fmt.Errorf("MConnection only supports 1-byte channel IDs (got %v)", chID) } select { case err := <-c.errorCh: return err - case <-c.closeCh: + case <-ctx.Done(): return io.EOF default: if ok := c.mconn.Send(chID, msg); !ok { @@ -418,11 +421,13 @@ func (c *mConnConnection) SendMessage(chID ChannelID, msg []byte) error { } // ReceiveMessage implements Connection. -func (c *mConnConnection) ReceiveMessage() (ChannelID, []byte, error) { +func (c *mConnConnection) ReceiveMessage(ctx context.Context) (ChannelID, []byte, error) { select { case err := <-c.errorCh: return 0, nil, err - case <-c.closeCh: + case <-c.doneCh: + return 0, nil, io.EOF + case <-ctx.Done(): return 0, nil, io.EOF case msg := <-c.receiveCh: return msg.channelID, msg.payload, nil @@ -462,7 +467,7 @@ func (c *mConnConnection) Close() error { } else { err = c.conn.Close() } - close(c.closeCh) + close(c.doneCh) }) return err } diff --git a/internal/p2p/transport_mconn_test.go b/internal/p2p/transport_mconn_test.go index 4d9a945cb..0851fe0e2 100644 --- a/internal/p2p/transport_mconn_test.go +++ b/internal/p2p/transport_mconn_test.go @@ -52,8 +52,10 @@ func TestMConnTransport_AcceptBeforeListen(t *testing.T) { t.Cleanup(func() { _ = transport.Close() }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - _, err := transport.Accept() + _, err := transport.Accept(ctx) require.Error(t, err) require.NotEqual(t, io.EOF, err) // io.EOF should be returned after Close() } @@ -85,7 +87,7 @@ func TestMConnTransport_AcceptMaxAcceptedConnections(t *testing.T) { acceptCh := make(chan p2p.Connection, 10) go func() { for { - conn, err := transport.Accept() + conn, err := transport.Accept(ctx) if err != nil { return } @@ -203,7 +205,7 @@ func TestMConnTransport_Listen(t *testing.T) { close(dialedChan) }() - conn, err := transport.Accept() + conn, err := transport.Accept(ctx) require.NoError(t, err) _ = conn.Close() <-dialedChan @@ -212,7 +214,7 @@ func TestMConnTransport_Listen(t *testing.T) { require.NoError(t, peerConn.Close()) // try to read from the connection should error - _, _, err = peerConn.ReceiveMessage() + _, _, err = peerConn.ReceiveMessage(ctx) require.Error(t, err) // Trying to listen again should error. diff --git a/internal/p2p/transport_memory.go b/internal/p2p/transport_memory.go index 5d9291675..27b9e77e1 100644 --- a/internal/p2p/transport_memory.go +++ b/internal/p2p/transport_memory.go @@ -94,9 +94,7 @@ type MemoryTransport struct { nodeID types.NodeID bufferSize int - acceptCh chan *MemoryConnection - closeCh chan struct{} - closeOnce sync.Once + acceptCh chan *MemoryConnection } // newMemoryTransport creates a new MemoryTransport. This is for internal use by @@ -108,7 +106,6 @@ func newMemoryTransport(network *MemoryNetwork, nodeID types.NodeID) *MemoryTran nodeID: nodeID, bufferSize: network.bufferSize, acceptCh: make(chan *MemoryConnection), - closeCh: make(chan struct{}), } } @@ -128,28 +125,27 @@ func (t *MemoryTransport) Protocols() []Protocol { // Endpoints implements Transport. func (t *MemoryTransport) Endpoints() []Endpoint { - select { - case <-t.closeCh: + if n := t.network.GetTransport(t.nodeID); n == nil { return []Endpoint{} - default: - return []Endpoint{{ - Protocol: MemoryProtocol, - Path: string(t.nodeID), - // An arbitrary IP and port is used in order for the pex - // reactor to be able to send addresses to one another. - IP: net.IPv4zero, - Port: 0, - }} } + + return []Endpoint{{ + Protocol: MemoryProtocol, + Path: string(t.nodeID), + // An arbitrary IP and port is used in order for the pex + // reactor to be able to send addresses to one another. + IP: net.IPv4zero, + Port: 0, + }} } // Accept implements Transport. -func (t *MemoryTransport) Accept() (Connection, error) { +func (t *MemoryTransport) Accept(ctx context.Context) (Connection, error) { select { case conn := <-t.acceptCh: t.logger.Info("accepted connection", "remote", conn.RemoteEndpoint().Path) return conn, nil - case <-t.closeCh: + case <-ctx.Done(): return nil, io.EOF } } @@ -187,20 +183,14 @@ func (t *MemoryTransport) Dial(ctx context.Context, endpoint Endpoint) (Connecti select { case peer.acceptCh <- inConn: return outConn, nil - case <-peer.closeCh: - return nil, io.EOF case <-ctx.Done(): - return nil, ctx.Err() + return nil, io.EOF } } // Close implements Transport. func (t *MemoryTransport) Close() error { t.network.RemoveTransport(t.nodeID) - t.closeOnce.Do(func() { - close(t.closeCh) - t.logger.Info("closed transport") - }) return nil } @@ -295,12 +285,14 @@ func (c *MemoryConnection) Handshake( } // ReceiveMessage implements Connection. -func (c *MemoryConnection) ReceiveMessage() (ChannelID, []byte, error) { +func (c *MemoryConnection) ReceiveMessage(ctx context.Context) (ChannelID, []byte, error) { // Check close first, since channels are buffered. Otherwise, below select // may non-deterministically return non-error even when closed. select { case <-c.closer.Done(): return 0, nil, io.EOF + case <-ctx.Done(): + return 0, nil, io.EOF default: } @@ -314,12 +306,14 @@ func (c *MemoryConnection) ReceiveMessage() (ChannelID, []byte, error) { } // SendMessage implements Connection. -func (c *MemoryConnection) SendMessage(chID ChannelID, msg []byte) error { +func (c *MemoryConnection) SendMessage(ctx context.Context, chID ChannelID, msg []byte) error { // Check close first, since channels are buffered. Otherwise, below select // may non-deterministically return non-error even when closed. select { case <-c.closer.Done(): return io.EOF + case <-ctx.Done(): + return io.EOF default: } @@ -327,6 +321,8 @@ func (c *MemoryConnection) SendMessage(chID ChannelID, msg []byte) error { case c.sendCh <- memoryMessage{channelID: chID, message: msg}: c.logger.Debug("sent message", "chID", chID, "msg", msg) return nil + case <-ctx.Done(): + return io.EOF case <-c.closer.Done(): return io.EOF } diff --git a/internal/p2p/transport_test.go b/internal/p2p/transport_test.go index a53be251d..63ce5ad5b 100644 --- a/internal/p2p/transport_test.go +++ b/internal/p2p/transport_test.go @@ -46,21 +46,23 @@ func TestTransport_AcceptClose(t *testing.T) { withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) + opctx, opcancel := context.WithCancel(ctx) // In-progress Accept should error on concurrent close. errCh := make(chan error, 1) go func() { time.Sleep(200 * time.Millisecond) + opcancel() errCh <- a.Close() }() - _, err := a.Accept() + _, err := a.Accept(opctx) require.Error(t, err) require.Equal(t, io.EOF, err) require.NoError(t, <-errCh) // Closed transport should return error immediately. - _, err = a.Accept() + _, err = a.Accept(opctx) require.Error(t, err) require.Equal(t, io.EOF, err) }) @@ -93,7 +95,7 @@ func TestTransport_DialEndpoints(t *testing.T) { // Spawn a goroutine to simply accept any connections until closed. go func() { for { - conn, err := a.Accept() + conn, err := a.Accept(ctx) if err != nil { return } @@ -177,7 +179,6 @@ func TestTransport_Dial(t *testing.T) { cancel() _, err := a.Dial(cancelCtx, bEndpoint) require.Error(t, err) - require.Equal(t, err, context.Canceled) // Unavailable endpoint should error. err = b.Close() @@ -188,7 +189,7 @@ func TestTransport_Dial(t *testing.T) { // Dialing from a closed transport should still work. errCh := make(chan error, 1) go func() { - conn, err := a.Accept() + conn, err := a.Accept(ctx) if err == nil { _ = conn.Close() } @@ -351,13 +352,12 @@ func TestConnection_FlushClose(t *testing.T) { err := ab.Close() require.NoError(t, err) - _, _, err = ab.ReceiveMessage() + _, _, err = ab.ReceiveMessage(ctx) require.Error(t, err) require.Equal(t, io.EOF, err) - err = ab.SendMessage(chID, []byte("closed")) + err = ab.SendMessage(ctx, chID, []byte("closed")) require.Error(t, err) - require.Equal(t, io.EOF, err) }) } @@ -388,19 +388,19 @@ func TestConnection_SendReceive(t *testing.T) { ab, ba := dialAcceptHandshake(ctx, t, a, b) // Can send and receive a to b. - err := ab.SendMessage(chID, []byte("foo")) + err := ab.SendMessage(ctx, chID, []byte("foo")) require.NoError(t, err) - ch, msg, err := ba.ReceiveMessage() + ch, msg, err := ba.ReceiveMessage(ctx) require.NoError(t, err) require.Equal(t, []byte("foo"), msg) require.Equal(t, chID, ch) // Can send and receive b to a. - err = ba.SendMessage(chID, []byte("bar")) + err = ba.SendMessage(ctx, chID, []byte("bar")) require.NoError(t, err) - _, msg, err = ab.ReceiveMessage() + _, msg, err = ab.ReceiveMessage(ctx) require.NoError(t, err) require.Equal(t, []byte("bar"), msg) @@ -410,9 +410,9 @@ func TestConnection_SendReceive(t *testing.T) { err = b.Close() require.NoError(t, err) - err = ab.SendMessage(chID, []byte("still here")) + err = ab.SendMessage(ctx, chID, []byte("still here")) require.NoError(t, err) - ch, msg, err = ba.ReceiveMessage() + ch, msg, err = ba.ReceiveMessage(ctx) require.NoError(t, err) require.Equal(t, chID, ch) require.Equal(t, []byte("still here"), msg) @@ -422,21 +422,20 @@ func TestConnection_SendReceive(t *testing.T) { err = ba.Close() require.NoError(t, err) - _, _, err = ab.ReceiveMessage() + _, _, err = ab.ReceiveMessage(ctx) require.Error(t, err) require.Equal(t, io.EOF, err) - err = ab.SendMessage(chID, []byte("closed")) + err = ab.SendMessage(ctx, chID, []byte("closed")) require.Error(t, err) require.Equal(t, io.EOF, err) - _, _, err = ba.ReceiveMessage() + _, _, err = ba.ReceiveMessage(ctx) require.Error(t, err) require.Equal(t, io.EOF, err) - err = ba.SendMessage(chID, []byte("closed")) + err = ba.SendMessage(ctx, chID, []byte("closed")) require.Error(t, err) - require.Equal(t, io.EOF, err) }) } @@ -606,7 +605,7 @@ func dialAccept(ctx context.Context, t *testing.T, a, b p2p.Transport) (p2p.Conn acceptCh := make(chan p2p.Connection, 1) errCh := make(chan error, 1) go func() { - conn, err := b.Accept() + conn, err := b.Accept(ctx) errCh <- err acceptCh <- conn }() From 0b3e00a6b574db95b5e0f7c93ad3516d3eea42ee Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 7 Dec 2021 10:54:14 -0500 Subject: [PATCH 03/33] ci: skip docker image builds during PRs (#7397) --- .github/workflows/docker.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index dffc28689..8d0f5bb3d 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -2,7 +2,6 @@ name: Docker # Build & Push rebuilds the tendermint docker image on every push to master and creation of tags # and pushes the image to https://hub.docker.com/r/interchainio/simapp/tags on: - pull_request: push: branches: - master @@ -39,7 +38,7 @@ jobs: with: platforms: all - - name: Set up Docker Buildx + - name: Set up Docker Build uses: docker/setup-buildx-action@v1.6.0 - name: Login to DockerHub From 0ff3d4b89dd316f049e3186bfa4a321e699a6ac0 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 7 Dec 2021 11:40:59 -0500 Subject: [PATCH 04/33] service: cleanup close channel in reactors (#7399) --- internal/blocksync/reactor.go | 26 ++--------------- internal/consensus/reactor.go | 47 ++++-------------------------- internal/evidence/reactor.go | 25 ++-------------- internal/statesync/dispatcher.go | 16 ++-------- internal/statesync/reactor.go | 32 ++++---------------- internal/statesync/reactor_test.go | 1 - internal/statesync/syncer.go | 17 ++++------- internal/statesync/syncer_test.go | 4 +-- 8 files changed, 26 insertions(+), 142 deletions(-) diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index f6ea48839..479506c71 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -86,7 +86,6 @@ type Reactor struct { // blockSyncCh.Out. blockSyncOutBridgeCh chan p2p.Envelope peerUpdates *p2p.PeerUpdates - closeCh chan struct{} requestsCh <-chan BlockRequest errorsCh <-chan peerError @@ -138,7 +137,6 @@ func NewReactor( blockSyncCh: blockSyncCh, blockSyncOutBridgeCh: make(chan p2p.Envelope), peerUpdates: peerUpdates, - closeCh: make(chan struct{}), metrics: metrics, syncStartTime: time.Time{}, } @@ -184,10 +182,6 @@ func (r *Reactor) OnStop() { // wait for the poolRoutine and requestRoutine goroutines to gracefully exit r.poolWG.Wait() - // Close closeCh to signal to all spawned goroutines to gracefully exit. All - // p2p Channels should execute Close(). - close(r.closeCh) - <-r.peerUpdates.Done() } @@ -295,6 +289,7 @@ func (r *Reactor) processBlockSyncCh(ctx context.Context) { for { select { case <-ctx.Done(): + r.logger.Debug("stopped listening on block sync channel; closing...") return case envelope := <-r.blockSyncCh.In: if err := r.handleMessage(r.blockSyncCh.ID, envelope); err != nil { @@ -304,14 +299,8 @@ func (r *Reactor) processBlockSyncCh(ctx context.Context) { Err: err, } } - case envelope := <-r.blockSyncOutBridgeCh: r.blockSyncCh.Out <- envelope - - case <-r.closeCh: - r.logger.Debug("stopped listening on block sync channel; closing...") - return - } } } @@ -350,13 +339,10 @@ func (r *Reactor) processPeerUpdates(ctx context.Context) { for { select { case <-ctx.Done(): + r.logger.Debug("stopped listening on peer updates channel; closing...") return case peerUpdate := <-r.peerUpdates.Updates(): r.processPeerUpdate(peerUpdate) - - case <-r.closeCh: - r.logger.Debug("stopped listening on peer updates channel; closing...") - return } } } @@ -391,24 +377,18 @@ func (r *Reactor) requestRoutine(ctx context.Context) { for { select { - case <-r.closeCh: - return - case <-ctx.Done(): return - case request := <-r.requestsCh: r.blockSyncOutBridgeCh <- p2p.Envelope{ To: request.PeerID, Message: &bcproto.BlockRequest{Height: request.Height}, } - case pErr := <-r.errorsCh: r.blockSyncCh.Error <- p2p.PeerError{ NodeID: pErr.peerID, Err: pErr.err, } - case <-statusUpdateTicker.C: r.poolWG.Add(1) @@ -598,8 +578,6 @@ FOR_LOOP: case <-ctx.Done(): break FOR_LOOP - case <-r.closeCh: - break FOR_LOOP case <-r.pool.exitedCh: break FOR_LOOP } diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index d0d625e26..387f84b15 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -126,13 +126,6 @@ type Reactor struct { voteCh *p2p.Channel voteSetBitsCh *p2p.Channel peerUpdates *p2p.PeerUpdates - - // NOTE: We need a dedicated stateCloseCh channel for signaling closure of - // the StateChannel due to the fact that the StateChannel message handler - // performs a send on the VoteSetBitsChannel. This is an antipattern, so having - // this dedicated channel,stateCloseCh, is necessary in order to avoid data races. - stateCloseCh chan struct{} - closeCh chan struct{} } // NewReactor returns a reference to a new consensus reactor, which implements @@ -162,8 +155,6 @@ func NewReactor( voteCh: voteCh, voteSetBitsCh: voteSetBitsCh, peerUpdates: peerUpdates, - stateCloseCh: make(chan struct{}), - closeCh: make(chan struct{}), } r.BaseService = *service.NewBaseService(logger, "Consensus", r) @@ -230,14 +221,6 @@ func (r *Reactor) OnStop() { } r.mtx.Unlock() - // Close the StateChannel goroutine separately since it uses its own channel - // to signal closure. - close(r.stateCloseCh) - - // Close closeCh to signal to all spawned goroutines to gracefully exit. All - // p2p Channels should execute Close(). - close(r.closeCh) - <-r.peerUpdates.Done() } @@ -993,8 +976,7 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda case p2p.PeerStatusUp: // Do not allow starting new broadcasting goroutines after reactor shutdown // has been initiated. This can happen after we've manually closed all - // peer goroutines and closed r.closeCh, but the router still sends in-flight - // peer updates. + // peer goroutines, but the router still sends in-flight peer updates. if !r.IsRunning() { return } @@ -1337,6 +1319,7 @@ func (r *Reactor) processStateCh(ctx context.Context) { for { select { case <-ctx.Done(): + r.logger.Debug("stopped listening on StateChannel; closing...") return case envelope := <-r.stateCh.In: if err := r.handleMessage(r.stateCh.ID, envelope); err != nil { @@ -1346,10 +1329,6 @@ func (r *Reactor) processStateCh(ctx context.Context) { Err: err, } } - - case <-r.stateCloseCh: - r.logger.Debug("stopped listening on StateChannel; closing...") - return } } } @@ -1363,6 +1342,7 @@ func (r *Reactor) processDataCh(ctx context.Context) { for { select { case <-ctx.Done(): + r.logger.Debug("stopped listening on DataChannel; closing...") return case envelope := <-r.dataCh.In: if err := r.handleMessage(r.dataCh.ID, envelope); err != nil { @@ -1372,10 +1352,6 @@ func (r *Reactor) processDataCh(ctx context.Context) { Err: err, } } - - case <-r.closeCh: - r.logger.Debug("stopped listening on DataChannel; closing...") - return } } } @@ -1389,6 +1365,7 @@ func (r *Reactor) processVoteCh(ctx context.Context) { for { select { case <-ctx.Done(): + r.logger.Debug("stopped listening on VoteChannel; closing...") return case envelope := <-r.voteCh.In: if err := r.handleMessage(r.voteCh.ID, envelope); err != nil { @@ -1398,10 +1375,6 @@ func (r *Reactor) processVoteCh(ctx context.Context) { Err: err, } } - - case <-r.closeCh: - r.logger.Debug("stopped listening on VoteChannel; closing...") - return } } } @@ -1415,6 +1388,7 @@ func (r *Reactor) processVoteSetBitsCh(ctx context.Context) { for { select { case <-ctx.Done(): + r.logger.Debug("stopped listening on VoteSetBitsChannel; closing...") return case envelope := <-r.voteSetBitsCh.In: if err := r.handleMessage(r.voteSetBitsCh.ID, envelope); err != nil { @@ -1424,10 +1398,6 @@ func (r *Reactor) processVoteSetBitsCh(ctx context.Context) { Err: err, } } - - case <-r.closeCh: - r.logger.Debug("stopped listening on VoteSetBitsChannel; closing...") - return } } } @@ -1441,13 +1411,10 @@ func (r *Reactor) processPeerUpdates(ctx context.Context) { for { select { case <-ctx.Done(): + r.logger.Debug("stopped listening on peer updates channel; closing...") return case peerUpdate := <-r.peerUpdates.Updates(): r.processPeerUpdate(ctx, peerUpdate) - - case <-r.closeCh: - r.logger.Debug("stopped listening on peer updates channel; closing...") - return } } } @@ -1486,8 +1453,6 @@ func (r *Reactor) peerStatsRoutine(ctx context.Context) { } case <-ctx.Done(): return - case <-r.closeCh: - return } } } diff --git a/internal/evidence/reactor.go b/internal/evidence/reactor.go index b559f0a2c..908e7d5f6 100644 --- a/internal/evidence/reactor.go +++ b/internal/evidence/reactor.go @@ -50,7 +50,6 @@ type Reactor struct { evpool *Pool evidenceCh *p2p.Channel peerUpdates *p2p.PeerUpdates - closeCh chan struct{} peerWG sync.WaitGroup @@ -72,7 +71,6 @@ func NewReactor( evpool: evpool, evidenceCh: evidenceCh, peerUpdates: peerUpdates, - closeCh: make(chan struct{}), peerRoutines: make(map[types.NodeID]*tmsync.Closer), } @@ -104,10 +102,6 @@ func (r *Reactor) OnStop() { // exit. r.peerWG.Wait() - // Close closeCh to signal to all spawned goroutines to gracefully exit. All - // p2p Channels should execute Close(). - close(r.closeCh) - // Wait for all p2p Channels to be closed before returning. This ensures we // can easily reason about synchronization of all p2p Channels and ensure no // panics will occur. @@ -188,6 +182,7 @@ func (r *Reactor) processEvidenceCh(ctx context.Context) { for { select { case <-ctx.Done(): + r.logger.Debug("stopped listening on evidence channel; closing...") return case envelope := <-r.evidenceCh.In: if err := r.handleMessage(r.evidenceCh.ID, envelope); err != nil { @@ -197,10 +192,6 @@ func (r *Reactor) processEvidenceCh(ctx context.Context) { Err: err, } } - - case <-r.closeCh: - r.logger.Debug("stopped listening on evidence channel; closing...") - return } } } @@ -226,8 +217,7 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda case p2p.PeerStatusUp: // Do not allow starting new evidence broadcast loops after reactor shutdown // has been initiated. This can happen after we've manually closed all - // peer broadcast loops and closed r.closeCh, but the router still sends - // in-flight peer updates. + // peer broadcast loops, but the router still sends in-flight peer updates. if !r.IsRunning() { return } @@ -268,8 +258,6 @@ func (r *Reactor) processPeerUpdates(ctx context.Context) { case peerUpdate := <-r.peerUpdates.Updates(): r.processPeerUpdate(ctx, peerUpdate) case <-ctx.Done(): - return - case <-r.closeCh: r.logger.Debug("stopped listening on peer updates channel; closing...") return } @@ -323,11 +311,6 @@ func (r *Reactor) broadcastEvidenceLoop(ctx context.Context, peerID types.NodeID // The peer is marked for removal via a PeerUpdate as the doneCh was // explicitly closed to signal we should exit. return - - case <-r.closeCh: - // The reactor has signaled that we are stopped and thus we should - // implicitly exit this peer's goroutine. - return } } @@ -366,9 +349,7 @@ func (r *Reactor) broadcastEvidenceLoop(ctx context.Context, peerID types.NodeID // explicitly closed to signal we should exit. return - case <-r.closeCh: - // The reactor has signaled that we are stopped and thus we should - // implicitly exit this peer's goroutine. + case <-ctx.Done(): return } } diff --git a/internal/statesync/dispatcher.go b/internal/statesync/dispatcher.go index 844cb5e32..8620e6285 100644 --- a/internal/statesync/dispatcher.go +++ b/internal/statesync/dispatcher.go @@ -27,7 +27,6 @@ var ( type Dispatcher struct { // the channel with which to send light block requests on requestCh chan<- p2p.Envelope - closeCh chan struct{} mtx sync.Mutex // all pending calls that have been dispatched and are awaiting an answer @@ -37,7 +36,6 @@ type Dispatcher struct { func NewDispatcher(requestCh chan<- p2p.Envelope) *Dispatcher { return &Dispatcher{ requestCh: requestCh, - closeCh: make(chan struct{}), calls: make(map[types.NodeID]chan *types.LightBlock), } } @@ -47,7 +45,7 @@ func NewDispatcher(requestCh chan<- p2p.Envelope) *Dispatcher { // LightBlock response is used to signal that the peer doesn't have the requested LightBlock. func (d *Dispatcher) LightBlock(ctx context.Context, height int64, peer types.NodeID) (*types.LightBlock, error) { // dispatch the request to the peer - callCh, err := d.dispatch(peer, height) + callCh, err := d.dispatch(ctx, peer, height) if err != nil { return nil, err } @@ -69,19 +67,16 @@ func (d *Dispatcher) LightBlock(ctx context.Context, height int64, peer types.No case <-ctx.Done(): return nil, ctx.Err() - - case <-d.closeCh: - return nil, errDisconnected } } // dispatch takes a peer and allocates it a channel so long as it's not already // busy and the receiving channel is still running. It then dispatches the message -func (d *Dispatcher) dispatch(peer types.NodeID, height int64) (chan *types.LightBlock, error) { +func (d *Dispatcher) dispatch(ctx context.Context, peer types.NodeID, height int64) (chan *types.LightBlock, error) { d.mtx.Lock() defer d.mtx.Unlock() select { - case <-d.closeCh: + case <-ctx.Done(): return nil, errDisconnected default: } @@ -141,17 +136,12 @@ func (d *Dispatcher) Respond(lb *tmproto.LightBlock, peer types.NodeID) error { func (d *Dispatcher) Close() { d.mtx.Lock() defer d.mtx.Unlock() - close(d.closeCh) for peer, call := range d.calls { delete(d.calls, peer) close(call) } } -func (d *Dispatcher) Done() <-chan struct{} { - return d.closeCh -} - //---------------------------------------------------------------- // BlockProvider is a p2p based light provider which uses a dispatcher connected diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index cd3dde3ea..f6eac2a97 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -142,7 +142,6 @@ type Reactor struct { blockCh *p2p.Channel paramsCh *p2p.Channel peerUpdates *p2p.PeerUpdates - closeCh chan struct{} // Dispatcher is used to multiplex light block requests and responses over multiple // peers used by the p2p state provider and in reverse sync. @@ -192,7 +191,6 @@ func NewReactor( blockCh: blockCh, paramsCh: paramsCh, peerUpdates: peerUpdates, - closeCh: make(chan struct{}), tempDir: tempDir, stateStore: stateStore, blockStore: blockStore, @@ -227,12 +225,6 @@ func (r *Reactor) OnStart(ctx context.Context) error { func (r *Reactor) OnStop() { // tell the dispatcher to stop sending any more requests r.dispatcher.Close() - // wait for any remaining requests to complete - <-r.dispatcher.Done() - - // Close closeCh to signal to all spawned goroutines to gracefully exit. All - // p2p Channels should execute Close(). - close(r.closeCh) <-r.peerUpdates.Done() } @@ -268,7 +260,6 @@ func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { r.stateProvider, r.snapshotCh.Out, r.chunkCh.Out, - ctx.Done(), r.tempDir, r.metrics, ) @@ -290,7 +281,6 @@ func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { select { case <-ctx.Done(): - case <-r.closeCh: case r.snapshotCh.Out <- msg: } } @@ -446,9 +436,6 @@ func (r *Reactor) backfill( // verify all light blocks for { select { - case <-r.closeCh: - queue.close() - return nil case <-ctx.Done(): queue.close() return nil @@ -816,6 +803,7 @@ func (r *Reactor) processCh(ctx context.Context, ch *p2p.Channel, chName string) for { select { case <-ctx.Done(): + r.logger.Debug("channel closed", "channel", chName) return case envelope := <-ch.In: if err := r.handleMessage(ch.ID, envelope); err != nil { @@ -829,17 +817,13 @@ func (r *Reactor) processCh(ctx context.Context, ch *p2p.Channel, chName string) Err: err, } } - - case <-r.closeCh: - r.logger.Debug("channel closed", "channel", chName) - return } } } // processPeerUpdate processes a PeerUpdate, returning an error upon failing to // handle the PeerUpdate or if a panic is recovered. -func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { +func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate) { r.logger.Info("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) switch peerUpdate.Status { @@ -859,7 +843,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { case p2p.PeerStatusUp: newProvider := NewBlockProvider(peerUpdate.NodeID, r.chainID, r.dispatcher) r.providers[peerUpdate.NodeID] = newProvider - err := r.syncer.AddPeer(peerUpdate.NodeID) + err := r.syncer.AddPeer(ctx, peerUpdate.NodeID) if err != nil { r.logger.Error("error adding peer to syncer", "error", err) return @@ -886,13 +870,10 @@ func (r *Reactor) processPeerUpdates(ctx context.Context) { for { select { case <-ctx.Done(): - return - case peerUpdate := <-r.peerUpdates.Updates(): - r.processPeerUpdate(peerUpdate) - - case <-r.closeCh: r.logger.Debug("stopped listening on peer updates channel; closing...") return + case peerUpdate := <-r.peerUpdates.Updates(): + r.processPeerUpdate(ctx, peerUpdate) } } } @@ -981,9 +962,6 @@ func (r *Reactor) waitForEnoughPeers(ctx context.Context, numPeers int) error { case <-ctx.Done(): return fmt.Errorf("operation canceled while waiting for peers after %.2fs [%d/%d]", time.Since(startAt).Seconds(), r.peers.Len(), numPeers) - case <-r.closeCh: - return fmt.Errorf("shutdown while waiting for peers after %.2fs [%d/%d]", - time.Since(startAt).Seconds(), r.peers.Len(), numPeers) case <-t.C: continue case <-logT.C: diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index 82ec0f68d..b1863f17b 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -172,7 +172,6 @@ func setup( stateProvider, rts.snapshotOutCh, rts.chunkOutCh, - ctx.Done(), "", rts.reactor.metrics, ) diff --git a/internal/statesync/syncer.go b/internal/statesync/syncer.go index f266017dd..a0f79494a 100644 --- a/internal/statesync/syncer.go +++ b/internal/statesync/syncer.go @@ -70,7 +70,6 @@ type syncer struct { avgChunkTime int64 lastSyncedSnapshotHeight int64 processingSnapshot *snapshot - closeCh <-chan struct{} } // newSyncer creates a new syncer. @@ -82,7 +81,6 @@ func newSyncer( stateProvider StateProvider, snapshotCh chan<- p2p.Envelope, chunkCh chan<- p2p.Envelope, - closeCh <-chan struct{}, tempDir string, metrics *Metrics, ) *syncer { @@ -98,7 +96,6 @@ func newSyncer( fetchers: cfg.Fetchers, retryTimeout: cfg.ChunkRequestTimeout, metrics: metrics, - closeCh: closeCh, } } @@ -141,7 +138,7 @@ func (s *syncer) AddSnapshot(peerID types.NodeID, snapshot *snapshot) (bool, err // AddPeer adds a peer to the pool. For now we just keep it simple and send a // single request to discover snapshots, later we may want to do retries and stuff. -func (s *syncer) AddPeer(peerID types.NodeID) (err error) { +func (s *syncer) AddPeer(ctx context.Context, peerID types.NodeID) (err error) { defer func() { // TODO: remove panic recover once AddPeer can no longer accientally send on // closed channel. @@ -160,7 +157,7 @@ func (s *syncer) AddPeer(peerID types.NodeID) (err error) { } select { - case <-s.closeCh: + case <-ctx.Done(): case s.snapshotCh <- msg: } return err @@ -494,8 +491,6 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch select { case <-ctx.Done(): return - case <-s.closeCh: - return case <-time.After(2 * time.Second): continue } @@ -511,7 +506,7 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch ticker := time.NewTicker(s.retryTimeout) defer ticker.Stop() - s.requestChunk(snapshot, index) + s.requestChunk(ctx, snapshot, index) select { case <-chunks.WaitFor(index): @@ -522,8 +517,6 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch case <-ctx.Done(): return - case <-s.closeCh: - return } ticker.Stop() @@ -531,7 +524,7 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch } // requestChunk requests a chunk from a peer. -func (s *syncer) requestChunk(snapshot *snapshot, chunk uint32) { +func (s *syncer) requestChunk(ctx context.Context, snapshot *snapshot, chunk uint32) { peer := s.snapshots.GetPeer(snapshot) if peer == "" { s.logger.Error("No valid peers found for snapshot", "height", snapshot.Height, @@ -558,7 +551,7 @@ func (s *syncer) requestChunk(snapshot *snapshot, chunk uint32) { select { case s.chunkCh <- msg: - case <-s.closeCh: + case <-ctx.Done(): } } diff --git a/internal/statesync/syncer_test.go b/internal/statesync/syncer_test.go index 4c240830f..816e6301a 100644 --- a/internal/statesync/syncer_test.go +++ b/internal/statesync/syncer_test.go @@ -78,13 +78,13 @@ func TestSyncer_SyncAny(t *testing.T) { require.Error(t, err) // Adding a couple of peers should trigger snapshot discovery messages - err = rts.syncer.AddPeer(peerAID) + err = rts.syncer.AddPeer(ctx, peerAID) require.NoError(t, err) e := <-rts.snapshotOutCh require.Equal(t, &ssproto.SnapshotsRequest{}, e.Message) require.Equal(t, peerAID, e.To) - err = rts.syncer.AddPeer(peerBID) + err = rts.syncer.AddPeer(ctx, peerBID) require.NoError(t, err) e = <-rts.snapshotOutCh require.Equal(t, &ssproto.SnapshotsRequest{}, e.Message) From b057740bd3dc92689b83901eb7cbd33758661f38 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 7 Dec 2021 11:47:22 -0500 Subject: [PATCH 05/33] ci: tweak e2e configuration (#7400) --- test/e2e/networks/ci.toml | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/test/e2e/networks/ci.toml b/test/e2e/networks/ci.toml index f73a18859..b3fa7891f 100644 --- a/test/e2e/networks/ci.toml +++ b/test/e2e/networks/ci.toml @@ -16,15 +16,14 @@ validator03 = 30 validator04 = 40 [validator_update.1010] -validator05 = 50 +validator04 = 50 # validator03 gets killed and validator05 has lots of perturbations, so weight them low. [validator_update.1020] validator01 = 100 validator02 = 100 validator03 = 50 -validator04 = 100 -validator05 = 50 +validator04 = 50 [node.seed01] mode = "seed" @@ -38,7 +37,7 @@ block_sync = "v0" [node.validator02] abci_protocol = "tcp" -database = "boltdb" +database = "rocksdb" persist_interval = 0 perturb = ["restart"] privval_protocol = "tcp" @@ -56,28 +55,19 @@ block_sync = "v0" retain_blocks = 10 [node.validator04] -abci_protocol = "builtin" -snapshot_interval = 5 -database = "rocksdb" -persistent_peers = ["validator01"] -perturb = ["pause"] -block_sync = "v0" - -[node.validator05] database = "cleveldb" block_sync = "v0" state_sync = "p2p" seeds = ["seed01"] start_at = 1005 # Becomes part of the validator set at 1010 abci_protocol = "grpc" -perturb = ["pause", "disconnect", "restart"] +perturb = ["pause"] privval_protocol = "tcp" [node.full01] mode = "full" start_at = 1010 block_sync = "v0" -persistent_peers = ["validator01", "validator02", "validator03", "validator04"] perturb = ["restart"] retain_blocks = 10 state_sync = "rpc" From 587c91132b1c18fd425a8b224bfaa3426992b79f Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 7 Dec 2021 14:53:22 -0500 Subject: [PATCH 06/33] build: declare packages variable in correct makefile (#7402) --- test/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/Makefile b/test/Makefile index 86226cf03..d141bb684 100644 --- a/test/Makefile +++ b/test/Makefile @@ -3,6 +3,8 @@ ######################################## ### Testing +PACKAGES=$(shell go list ./...) + BINDIR ?= $(GOPATH)/bin ## required to be run first by most tests From 26d421b8f6c3e9b2ba98c32b1f9cac3a06d3ee7b Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 7 Dec 2021 16:17:55 -0500 Subject: [PATCH 07/33] Revert "ci: tweak e2e configuration (#7400)" (#7404) This reverts commit b057740bd3dc92689b83901eb7cbd33758661f38. --- test/e2e/networks/ci.toml | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/test/e2e/networks/ci.toml b/test/e2e/networks/ci.toml index b3fa7891f..f73a18859 100644 --- a/test/e2e/networks/ci.toml +++ b/test/e2e/networks/ci.toml @@ -16,14 +16,15 @@ validator03 = 30 validator04 = 40 [validator_update.1010] -validator04 = 50 +validator05 = 50 # validator03 gets killed and validator05 has lots of perturbations, so weight them low. [validator_update.1020] validator01 = 100 validator02 = 100 validator03 = 50 -validator04 = 50 +validator04 = 100 +validator05 = 50 [node.seed01] mode = "seed" @@ -37,7 +38,7 @@ block_sync = "v0" [node.validator02] abci_protocol = "tcp" -database = "rocksdb" +database = "boltdb" persist_interval = 0 perturb = ["restart"] privval_protocol = "tcp" @@ -55,19 +56,28 @@ block_sync = "v0" retain_blocks = 10 [node.validator04] +abci_protocol = "builtin" +snapshot_interval = 5 +database = "rocksdb" +persistent_peers = ["validator01"] +perturb = ["pause"] +block_sync = "v0" + +[node.validator05] database = "cleveldb" block_sync = "v0" state_sync = "p2p" seeds = ["seed01"] start_at = 1005 # Becomes part of the validator set at 1010 abci_protocol = "grpc" -perturb = ["pause"] +perturb = ["pause", "disconnect", "restart"] privval_protocol = "tcp" [node.full01] mode = "full" start_at = 1010 block_sync = "v0" +persistent_peers = ["validator01", "validator02", "validator03", "validator04"] perturb = ["restart"] retain_blocks = 10 state_sync = "rpc" From 5ba3c6be42ee694c5eb0278dfc2c9a8828996342 Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Wed, 8 Dec 2021 11:21:58 +0100 Subject: [PATCH 08/33] cmd: cosmetic changes for errors and print statements (#7377) * cmd: cosmetic changes for errors and print statements * fix load block store test * lint --- cmd/tendermint/commands/reindex_event.go | 10 +++++++++ cmd/tendermint/commands/reindex_event_test.go | 22 +++++++++++++++++-- cmd/tendermint/commands/rollback.go | 2 +- 3 files changed, 31 insertions(+), 3 deletions(-) diff --git a/cmd/tendermint/commands/reindex_event.go b/cmd/tendermint/commands/reindex_event.go index 58f11657b..bd9577963 100644 --- a/cmd/tendermint/commands/reindex_event.go +++ b/cmd/tendermint/commands/reindex_event.go @@ -3,6 +3,7 @@ package commands import ( "errors" "fmt" + "path/filepath" "strings" "github.com/spf13/cobra" @@ -16,6 +17,7 @@ import ( "github.com/tendermint/tendermint/internal/state/indexer/sink/kv" "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" "github.com/tendermint/tendermint/internal/store" + "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) @@ -132,6 +134,10 @@ func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) { func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, error) { dbType := dbm.BackendType(cfg.DBBackend) + if !os.FileExists(filepath.Join(cfg.DBDir(), "blockstore.db")) { + return nil, nil, fmt.Errorf("no blockstore found in %v", cfg.DBDir()) + } + // Get BlockStore blockStoreDB, err := dbm.NewDB("blockstore", dbType, cfg.DBDir()) if err != nil { @@ -139,6 +145,10 @@ func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, } blockStore := store.NewBlockStore(blockStoreDB) + if !os.FileExists(filepath.Join(cfg.DBDir(), "state.db")) { + return nil, nil, fmt.Errorf("no blockstore found in %v", cfg.DBDir()) + } + // Get StateStore stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir()) if err != nil { diff --git a/cmd/tendermint/commands/reindex_event_test.go b/cmd/tendermint/commands/reindex_event_test.go index c76ce6d9a..2008251bc 100644 --- a/cmd/tendermint/commands/reindex_event_test.go +++ b/cmd/tendermint/commands/reindex_event_test.go @@ -15,6 +15,7 @@ import ( "github.com/tendermint/tendermint/internal/state/mocks" prototmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/types" + dbm "github.com/tendermint/tm-db" _ "github.com/lib/pq" // for the psql sink ) @@ -109,12 +110,29 @@ func TestLoadEventSink(t *testing.T) { } func TestLoadBlockStore(t *testing.T) { - bs, ss, err := loadStateAndBlockStore(tmcfg.TestConfig()) + testCfg, err := tmcfg.ResetTestRoot(t.Name()) + require.NoError(t, err) + testCfg.DBBackend = "goleveldb" + _, _, err = loadStateAndBlockStore(testCfg) + // we should return an error because the state store and block store + // don't yet exist + require.Error(t, err) + + dbType := dbm.BackendType(testCfg.DBBackend) + bsdb, err := dbm.NewDB("blockstore", dbType, testCfg.DBDir()) + require.NoError(t, err) + bsdb.Close() + + ssdb, err := dbm.NewDB("state", dbType, testCfg.DBDir()) + require.NoError(t, err) + ssdb.Close() + + bs, ss, err := loadStateAndBlockStore(testCfg) require.NoError(t, err) require.NotNil(t, bs) require.NotNil(t, ss) - } + func TestReIndexEvent(t *testing.T) { mockBlockStore := &mocks.BlockStore{} mockStateStore := &mocks.Store{} diff --git a/cmd/tendermint/commands/rollback.go b/cmd/tendermint/commands/rollback.go index c19d35cce..8391ee506 100644 --- a/cmd/tendermint/commands/rollback.go +++ b/cmd/tendermint/commands/rollback.go @@ -26,7 +26,7 @@ application. return fmt.Errorf("failed to rollback state: %w", err) } - fmt.Printf("Rolled back state to height %d and hash %v", height, hash) + fmt.Printf("Rolled back state to height %d and hash %X", height, hash) return nil }, } From 892f5d952481b956d6c317557a60fa7f48f31786 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 8 Dec 2021 08:44:32 -0500 Subject: [PATCH 09/33] service: cleanup mempool and peer update shutdown (#7401) --- internal/blocksync/reactor.go | 4 -- internal/blocksync/reactor_test.go | 4 +- internal/consensus/reactor.go | 4 -- internal/evidence/reactor.go | 7 -- internal/evidence/reactor_test.go | 4 +- internal/mempool/reactor.go | 47 +++--------- internal/mempool/reactor_test.go | 16 +++-- internal/p2p/p2ptest/network.go | 17 ++--- internal/p2p/p2ptest/require.go | 6 -- internal/p2p/peermanager.go | 60 ++++++---------- internal/p2p/peermanager_scoring_test.go | 6 +- internal/p2p/peermanager_test.go | 91 ++++++++++++++---------- internal/p2p/pex/reactor.go | 1 - internal/p2p/pex/reactor_test.go | 14 +--- internal/p2p/router.go | 4 +- internal/p2p/router_test.go | 7 -- internal/statesync/reactor.go | 4 -- 17 files changed, 110 insertions(+), 186 deletions(-) diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index 479506c71..6a5620f4e 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -181,8 +181,6 @@ func (r *Reactor) OnStop() { // wait for the poolRoutine and requestRoutine goroutines to gracefully exit r.poolWG.Wait() - - <-r.peerUpdates.Done() } // respondToPeer loads a block and sends it to the requesting peer, if we have it. @@ -334,8 +332,6 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. func (r *Reactor) processPeerUpdates(ctx context.Context) { - defer r.peerUpdates.Close() - for { select { case <-ctx.Done(): diff --git a/internal/blocksync/reactor_test.go b/internal/blocksync/reactor_test.go index 5345cb5c4..d9d959ddc 100644 --- a/internal/blocksync/reactor_test.go +++ b/internal/blocksync/reactor_test.go @@ -82,8 +82,6 @@ func setup( t.Cleanup(func() { cancel() for _, nodeID := range rts.nodes { - rts.peerUpdates[nodeID].Close() - if rts.reactors[nodeID].IsRunning() { rts.reactors[nodeID].Wait() rts.app[nodeID].Wait() @@ -228,7 +226,7 @@ func TestReactor_AbruptDisconnect(t *testing.T) { Status: p2p.PeerStatusDown, NodeID: rts.nodes[0], } - rts.network.Nodes[rts.nodes[1]].PeerManager.Disconnected(rts.nodes[0]) + rts.network.Nodes[rts.nodes[1]].PeerManager.Disconnected(ctx, rts.nodes[0]) } func TestReactor_SyncTime(t *testing.T) { diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index 387f84b15..8f5cdd0b1 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -220,8 +220,6 @@ func (r *Reactor) OnStop() { state.broadcastWG.Wait() } r.mtx.Unlock() - - <-r.peerUpdates.Done() } // SetEventBus sets the reactor's event bus. @@ -1406,8 +1404,6 @@ func (r *Reactor) processVoteSetBitsCh(ctx context.Context) { // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. func (r *Reactor) processPeerUpdates(ctx context.Context) { - defer r.peerUpdates.Close() - for { select { case <-ctx.Done(): diff --git a/internal/evidence/reactor.go b/internal/evidence/reactor.go index 908e7d5f6..31e927ba7 100644 --- a/internal/evidence/reactor.go +++ b/internal/evidence/reactor.go @@ -102,11 +102,6 @@ func (r *Reactor) OnStop() { // exit. r.peerWG.Wait() - // Wait for all p2p Channels to be closed before returning. This ensures we - // can easily reason about synchronization of all p2p Channels and ensure no - // panics will occur. - <-r.peerUpdates.Done() - // Close the evidence db r.evpool.Close() } @@ -251,8 +246,6 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. func (r *Reactor) processPeerUpdates(ctx context.Context) { - defer r.peerUpdates.Close() - for { select { case peerUpdate := <-r.peerUpdates.Updates(): diff --git a/internal/evidence/reactor_test.go b/internal/evidence/reactor_test.go index b30f9e9b1..df636ba66 100644 --- a/internal/evidence/reactor_test.go +++ b/internal/evidence/reactor_test.go @@ -257,11 +257,11 @@ func TestReactorMultiDisconnect(t *testing.T) { // Ensure "disconnecting" the secondary peer from the primary more than once // is handled gracefully. - primary.PeerManager.Disconnected(secondary.NodeID) + primary.PeerManager.Disconnected(ctx, secondary.NodeID) require.Equal(t, primary.PeerManager.Status(secondary.NodeID), p2p.PeerStatusDown) _, err := primary.PeerManager.TryEvictNext() require.NoError(t, err) - primary.PeerManager.Disconnected(secondary.NodeID) + primary.PeerManager.Disconnected(ctx, secondary.NodeID) require.Equal(t, primary.PeerManager.Status(secondary.NodeID), p2p.PeerStatusDown) require.Equal(t, secondary.PeerManager.Status(primary.NodeID), p2p.PeerStatusUp) diff --git a/internal/mempool/reactor.go b/internal/mempool/reactor.go index 00345ccf8..43215f5f8 100644 --- a/internal/mempool/reactor.go +++ b/internal/mempool/reactor.go @@ -48,7 +48,6 @@ type Reactor struct { mempoolCh *p2p.Channel peerUpdates *p2p.PeerUpdates - closeCh chan struct{} // peerWG is used to coordinate graceful termination of all peer broadcasting // goroutines. @@ -80,7 +79,6 @@ func NewReactor( ids: NewMempoolIDs(), mempoolCh: mempoolCh, peerUpdates: peerUpdates, - closeCh: make(chan struct{}), peerRoutines: make(map[types.NodeID]*tmsync.Closer), observePanic: defaultObservePanic, } @@ -136,19 +134,13 @@ func (r *Reactor) OnStop() { // wait for all spawned peer tx broadcasting goroutines to gracefully exit r.peerWG.Wait() - - // Close closeCh to signal to all spawned goroutines to gracefully exit. All - // p2p Channels should execute Close(). - close(r.closeCh) - - <-r.peerUpdates.Done() } // handleMempoolMessage handles envelopes sent from peers on the MempoolChannel. // For every tx in the message, we execute CheckTx. It returns an error if an // empty set of txs are sent in an envelope or if we receive an unexpected // message type. -func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { +func (r *Reactor) handleMempoolMessage(ctx context.Context, envelope p2p.Envelope) error { logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { @@ -164,7 +156,7 @@ func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { } for _, tx := range protoTxs { - if err := r.mempool.CheckTx(context.Background(), types.Tx(tx), nil, txInfo); err != nil { + if err := r.mempool.CheckTx(ctx, types.Tx(tx), nil, txInfo); err != nil { logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "err", err) } } @@ -179,7 +171,7 @@ func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope p2p.Envelope) (err error) { defer func() { if e := recover(); e != nil { r.observePanic(e) @@ -196,7 +188,7 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err switch chID { case MempoolChannel: - err = r.handleMempoolMessage(envelope) + err = r.handleMempoolMessage(ctx, envelope) default: err = fmt.Errorf("unknown channel ID (%d) for envelope (%T)", chID, envelope.Message) @@ -211,7 +203,7 @@ func (r *Reactor) processMempoolCh(ctx context.Context) { for { select { case envelope := <-r.mempoolCh.In: - if err := r.handleMessage(r.mempoolCh.ID, envelope); err != nil { + if err := r.handleMessage(ctx, r.mempoolCh.ID, envelope); err != nil { r.logger.Error("failed to process message", "ch_id", r.mempoolCh.ID, "envelope", envelope, "err", err) r.mempoolCh.Error <- p2p.PeerError{ NodeID: envelope.From, @@ -219,8 +211,6 @@ func (r *Reactor) processMempoolCh(ctx context.Context) { } } case <-ctx.Done(): - return - case <-r.closeCh: r.logger.Debug("stopped listening on mempool channel; closing...") return } @@ -242,8 +232,7 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda case p2p.PeerStatusUp: // Do not allow starting new tx broadcast loops after reactor shutdown // has been initiated. This can happen after we've manually closed all - // peer broadcast loops and closed r.closeCh, but the router still sends - // in-flight peer updates. + // peer broadcast, but the router still sends in-flight peer updates. if !r.IsRunning() { return } @@ -285,18 +274,13 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. func (r *Reactor) processPeerUpdates(ctx context.Context) { - defer r.peerUpdates.Close() - for { select { case <-ctx.Done(): + r.logger.Debug("stopped listening on peer updates channel; closing...") return case peerUpdate := <-r.peerUpdates.Updates(): r.processPeerUpdate(ctx, peerUpdate) - - case <-r.closeCh: - r.logger.Debug("stopped listening on peer updates channel; closing...") - return } } } @@ -333,6 +317,8 @@ func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID, c // start from the beginning. if nextGossipTx == nil { select { + case <-ctx.Done(): + return case <-r.mempool.WaitForNextTx(): // wait until a tx is available if nextGossipTx = r.mempool.NextGossipTx(); nextGossipTx == nil { continue @@ -342,14 +328,6 @@ func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID, c // The peer is marked for removal via a PeerUpdate as the doneCh was // explicitly closed to signal we should exit. return - - case <-ctx.Done(): - return - - case <-r.closeCh: - // The reactor has signaled that we are stopped and thus we should - // implicitly exit this peer's goroutine. - return } } @@ -388,19 +366,12 @@ func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID, c select { case <-nextGossipTx.NextWaitChan(): nextGossipTx = nextGossipTx.Next() - case <-closer.Done(): // The peer is marked for removal via a PeerUpdate as the doneCh was // explicitly closed to signal we should exit. return - case <-ctx.Done(): return - - case <-r.closeCh: - // The reactor has signaled that we are stopped and thus we should - // implicitly exit this peer's goroutine. - return } } } diff --git a/internal/mempool/reactor_test.go b/internal/mempool/reactor_test.go index 86a3b4db4..096544910 100644 --- a/internal/mempool/reactor_test.go +++ b/internal/mempool/reactor_test.go @@ -64,7 +64,7 @@ func setupReactors(ctx context.Context, t *testing.T, numNodes int, chBuf uint) mempool := setup(ctx, t, 0) rts.mempools[nodeID] = mempool - rts.peerChans[nodeID] = make(chan p2p.PeerUpdate) + rts.peerChans[nodeID] = make(chan p2p.PeerUpdate, chBuf) rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1) rts.network.Nodes[nodeID].PeerManager.Register(ctx, rts.peerUpdates[nodeID]) @@ -102,6 +102,7 @@ func setupReactors(ctx context.Context, t *testing.T, numNodes int, chBuf uint) func (rts *reactorTestSuite) start(ctx context.Context, t *testing.T) { t.Helper() rts.network.Start(ctx, t) + require.Len(t, rts.network.RandomNode().PeerManager.Peers(), len(rts.nodes)-1, @@ -126,13 +127,17 @@ func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs []types.Tx, ids ...ty if !p2ptest.NodeInSlice(name, ids) { continue } + if len(txs) == pool.Size() { + continue + } wg.Add(1) go func(pool *TxMempool) { defer wg.Done() require.Eventually(t, func() bool { return len(txs) == pool.Size() }, time.Minute, - 100*time.Millisecond, + 250*time.Millisecond, + "ntx=%d, size=%d", len(txs), pool.Size(), ) }(pool) } @@ -191,14 +196,15 @@ func TestReactorBroadcastTxs(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - rts := setupReactors(ctx, t, numNodes, 0) + rts := setupReactors(ctx, t, numNodes, uint(numTxs)) primary := rts.nodes[0] secondaries := rts.nodes[1:] txs := checkTxs(ctx, t, rts.reactors[primary].mempool, numTxs, UnknownPeerID) - // run the router + require.Equal(t, numTxs, rts.reactors[primary].mempool.Size()) + rts.start(ctx, t) // Wait till all secondary suites (reactor) received all mempool txs from the @@ -407,7 +413,7 @@ func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - rts := setupReactors(ctx, t, 2, 0) + rts := setupReactors(ctx, t, 2, 2) primary := rts.nodes[0] secondary := rts.nodes[1] diff --git a/internal/p2p/p2ptest/network.go b/internal/p2p/p2ptest/network.go index 30f1a435f..bde96ba66 100644 --- a/internal/p2p/p2ptest/network.go +++ b/internal/p2p/p2ptest/network.go @@ -76,10 +76,11 @@ func (n *Network) Start(ctx context.Context, t *testing.T) { // for each node. dialQueue := []p2p.NodeAddress{} subs := map[types.NodeID]*p2p.PeerUpdates{} + subctx, subcancel := context.WithCancel(ctx) + defer subcancel() for _, node := range n.Nodes { dialQueue = append(dialQueue, node.NodeAddress) - subs[node.NodeID] = node.PeerManager.Subscribe(ctx) - defer subs[node.NodeID].Close() + subs[node.NodeID] = node.PeerManager.Subscribe(subctx) } // For each node, dial the nodes that it still doesn't have a connection to @@ -197,9 +198,10 @@ func (n *Network) Remove(ctx context.Context, t *testing.T, id types.NodeID) { delete(n.Nodes, id) subs := []*p2p.PeerUpdates{} + subctx, subcancel := context.WithCancel(ctx) + defer subcancel() for _, peer := range n.Nodes { - sub := peer.PeerManager.Subscribe(ctx) - defer sub.Close() + sub := peer.PeerManager.Subscribe(subctx) subs = append(subs, sub) } @@ -329,7 +331,6 @@ func (n *Node) MakePeerUpdates(ctx context.Context, t *testing.T) *p2p.PeerUpdat sub := n.PeerManager.Subscribe(ctx) t.Cleanup(func() { RequireNoUpdates(ctx, t, sub) - sub.Close() }) return sub @@ -339,11 +340,7 @@ func (n *Node) MakePeerUpdates(ctx context.Context, t *testing.T) *p2p.PeerUpdat // It does *not* check that all updates have been consumed, but will // close the update channel. func (n *Node) MakePeerUpdatesNoRequireEmpty(ctx context.Context, t *testing.T) *p2p.PeerUpdates { - sub := n.PeerManager.Subscribe(ctx) - - t.Cleanup(sub.Close) - - return sub + return n.PeerManager.Subscribe(ctx) } func MakeChannelDesc(chID p2p.ChannelID) *p2p.ChannelDescriptor { diff --git a/internal/p2p/p2ptest/require.go b/internal/p2p/p2ptest/require.go index 3a7731829..ce44a724c 100644 --- a/internal/p2p/p2ptest/require.go +++ b/internal/p2p/p2ptest/require.go @@ -119,9 +119,6 @@ func RequireUpdate(t *testing.T, peerUpdates *p2p.PeerUpdates, expect p2p.PeerUp case update := <-peerUpdates.Updates(): require.Equal(t, expect, update, "peer update did not match") - case <-peerUpdates.Done(): - require.Fail(t, "peer updates subscription is closed") - case <-timer.C: require.Fail(t, "timed out waiting for peer update", "expected %v", expect) } @@ -143,9 +140,6 @@ func RequireUpdates(t *testing.T, peerUpdates *p2p.PeerUpdates, expect []p2p.Pee return } - case <-peerUpdates.Done(): - require.Fail(t, "peer updates subscription is closed") - case <-timer.C: require.Equal(t, expect, actual, "did not receive expected peer updates") return diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index 40dcf8464..f7e4fb730 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -56,8 +56,6 @@ type PeerUpdate struct { type PeerUpdates struct { routerUpdatesCh chan PeerUpdate reactorUpdatesCh chan PeerUpdate - closeOnce sync.Once - doneCh chan struct{} } // NewPeerUpdates creates a new PeerUpdates subscription. It is primarily for @@ -67,7 +65,6 @@ func NewPeerUpdates(updatesCh chan PeerUpdate, buf int) *PeerUpdates { return &PeerUpdates{ reactorUpdatesCh: updatesCh, routerUpdatesCh: make(chan PeerUpdate, buf), - doneCh: make(chan struct{}), } } @@ -76,21 +73,6 @@ func (pu *PeerUpdates) Updates() <-chan PeerUpdate { return pu.reactorUpdatesCh } -// Done returns a channel that is closed when the subscription is closed. -func (pu *PeerUpdates) Done() <-chan struct{} { - return pu.doneCh -} - -// Close closes the peer updates subscription. -func (pu *PeerUpdates) Close() { - pu.closeOnce.Do(func() { - // NOTE: We don't close updatesCh since multiple goroutines may be - // sending on it. The PeerManager senders will select on doneCh as well - // to avoid blocking on a closed subscription. - close(pu.doneCh) - }) -} - // SendUpdate pushes information about a peer into the routing layer, // presumably from a peer. func (pu *PeerUpdates) SendUpdate(ctx context.Context, update PeerUpdate) { @@ -692,13 +674,13 @@ func (m *PeerManager) Accepted(peerID types.NodeID) error { // peer must already be marked as connected. This is separate from Dialed() and // Accepted() to allow the router to set up its internal queues before reactors // start sending messages. -func (m *PeerManager) Ready(peerID types.NodeID) { +func (m *PeerManager) Ready(ctx context.Context, peerID types.NodeID) { m.mtx.Lock() defer m.mtx.Unlock() if m.connected[peerID] { m.ready[peerID] = true - m.broadcast(PeerUpdate{ + m.broadcast(ctx, PeerUpdate{ NodeID: peerID, Status: PeerStatusUp, }) @@ -759,7 +741,7 @@ func (m *PeerManager) TryEvictNext() (types.NodeID, error) { // Disconnected unmarks a peer as connected, allowing it to be dialed or // accepted again as appropriate. -func (m *PeerManager) Disconnected(peerID types.NodeID) { +func (m *PeerManager) Disconnected(ctx context.Context, peerID types.NodeID) { m.mtx.Lock() defer m.mtx.Unlock() @@ -772,7 +754,7 @@ func (m *PeerManager) Disconnected(peerID types.NodeID) { delete(m.ready, peerID) if ready { - m.broadcast(PeerUpdate{ + m.broadcast(ctx, PeerUpdate{ NodeID: peerID, Status: PeerStatusDown, }) @@ -854,8 +836,8 @@ func (m *PeerManager) Subscribe(ctx context.Context) *PeerUpdates { // otherwise the PeerManager will halt. func (m *PeerManager) Register(ctx context.Context, peerUpdates *PeerUpdates) { m.mtx.Lock() + defer m.mtx.Unlock() m.subscriptions[peerUpdates] = peerUpdates - m.mtx.Unlock() go func() { for { @@ -863,26 +845,27 @@ func (m *PeerManager) Register(ctx context.Context, peerUpdates *PeerUpdates) { case <-ctx.Done(): return case pu := <-peerUpdates.routerUpdatesCh: - m.processPeerEvent(pu) + m.processPeerEvent(ctx, pu) } } }() go func() { - select { - case <-peerUpdates.Done(): - m.mtx.Lock() - delete(m.subscriptions, peerUpdates) - m.mtx.Unlock() - case <-ctx.Done(): - } + <-ctx.Done() + m.mtx.Lock() + defer m.mtx.Unlock() + delete(m.subscriptions, peerUpdates) }() } -func (m *PeerManager) processPeerEvent(pu PeerUpdate) { +func (m *PeerManager) processPeerEvent(ctx context.Context, pu PeerUpdate) { m.mtx.Lock() defer m.mtx.Unlock() + if ctx.Err() != nil { + return + } + if _, ok := m.store.peers[pu.NodeID]; !ok { m.store.peers[pu.NodeID] = &peerInfo{} } @@ -902,18 +885,15 @@ func (m *PeerManager) processPeerEvent(pu PeerUpdate) { // // FIXME: Consider using an internal channel to buffer updates while also // maintaining order if this is a problem. -func (m *PeerManager) broadcast(peerUpdate PeerUpdate) { +func (m *PeerManager) broadcast(ctx context.Context, peerUpdate PeerUpdate) { for _, sub := range m.subscriptions { - // We have to check doneChan separately first, otherwise there's a 50% - // chance the second select will send on a closed subscription. - select { - case <-sub.doneCh: - continue - default: + if ctx.Err() != nil { + return } select { + case <-ctx.Done(): + return case sub.reactorUpdatesCh <- peerUpdate: - case <-sub.doneCh: } } } diff --git a/internal/p2p/peermanager_scoring_test.go b/internal/p2p/peermanager_scoring_test.go index ecaf71c98..4c7bef0cc 100644 --- a/internal/p2p/peermanager_scoring_test.go +++ b/internal/p2p/peermanager_scoring_test.go @@ -38,7 +38,7 @@ func TestPeerScoring(t *testing.T) { // add a bunch of good status updates and watch things increase. for i := 1; i < 10; i++ { - peerManager.processPeerEvent(PeerUpdate{ + peerManager.processPeerEvent(ctx, PeerUpdate{ NodeID: id, Status: PeerStatusGood, }) @@ -47,7 +47,7 @@ func TestPeerScoring(t *testing.T) { // watch the corresponding decreases respond to update for i := 10; i == 0; i-- { - peerManager.processPeerEvent(PeerUpdate{ + peerManager.processPeerEvent(ctx, PeerUpdate{ NodeID: id, Status: PeerStatusBad, }) @@ -57,7 +57,6 @@ func TestPeerScoring(t *testing.T) { t.Run("AsynchronousIncrement", func(t *testing.T) { start := peerManager.Scores()[id] pu := peerManager.Subscribe(ctx) - defer pu.Close() pu.SendUpdate(ctx, PeerUpdate{ NodeID: id, Status: PeerStatusGood, @@ -71,7 +70,6 @@ func TestPeerScoring(t *testing.T) { t.Run("AsynchronousDecrement", func(t *testing.T) { start := peerManager.Scores()[id] pu := peerManager.Subscribe(ctx) - defer pu.Close() pu.SendUpdate(ctx, PeerUpdate{ NodeID: id, Status: PeerStatusBad, diff --git a/internal/p2p/peermanager_test.go b/internal/p2p/peermanager_test.go index dec92dab0..2999e8d6d 100644 --- a/internal/p2p/peermanager_test.go +++ b/internal/p2p/peermanager_test.go @@ -461,9 +461,11 @@ func TestPeerManager_DialNext_WakeOnDisconnected(t *testing.T) { require.NoError(t, err) require.Zero(t, dial) + dctx, dcancel := context.WithTimeout(ctx, 300*time.Millisecond) + defer dcancel() go func() { time.Sleep(200 * time.Millisecond) - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(dctx, a.NodeID) }() ctx, cancel = context.WithTimeout(ctx, 3*time.Second) @@ -510,6 +512,9 @@ func TestPeerManager_TryDialNext_MaxConnected(t *testing.T) { } func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} @@ -575,7 +580,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { // Now, if we disconnect a, we should be allowed to dial d because we have a // free upgrade slot. - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) dial, err = peerManager.TryDialNext() require.NoError(t, err) require.Equal(t, d, dial) @@ -584,7 +589,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { // However, if we disconnect b (such that only c and d are connected), we // should not be allowed to dial e even though there are upgrade slots, // because there are no lower-scored nodes that can be upgraded. - peerManager.Disconnected(b.NodeID) + peerManager.Disconnected(ctx, b.NodeID) added, err = peerManager.Add(e) require.NoError(t, err) require.True(t, added) @@ -966,6 +971,9 @@ func TestPeerManager_Dialed_Upgrade(t *testing.T) { } func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} @@ -1005,7 +1013,7 @@ func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) { // In the meanwhile, a disconnects and d connects. d is even lower-scored // than b (1 vs 2), which is currently being upgraded. - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) added, err = peerManager.Add(d) require.NoError(t, err) require.True(t, added) @@ -1020,6 +1028,9 @@ func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) { } func TestPeerManager_Dialed_UpgradeNoEvict(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} @@ -1055,7 +1066,7 @@ func TestPeerManager_Dialed_UpgradeNoEvict(t *testing.T) { require.Equal(t, c, dial) // In the meanwhile, b disconnects. - peerManager.Disconnected(b.NodeID) + peerManager.Disconnected(ctx, b.NodeID) // Once c completes the upgrade of b, there is no longer a need to // evict anything since we're at capacity. @@ -1188,6 +1199,9 @@ func TestPeerManager_Accepted_MaxConnectedUpgrade(t *testing.T) { } func TestPeerManager_Accepted_Upgrade(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} @@ -1224,7 +1238,7 @@ func TestPeerManager_Accepted_Upgrade(t *testing.T) { evict, err := peerManager.TryEvictNext() require.NoError(t, err) require.Equal(t, a.NodeID, evict) - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) // c still cannot get accepted, since it's not scored above b. require.Error(t, peerManager.Accepted(c.NodeID)) @@ -1288,7 +1302,6 @@ func TestPeerManager_Ready(t *testing.T) { require.NoError(t, err) sub := peerManager.Subscribe(ctx) - defer sub.Close() // Connecting to a should still have it as status down. added, err := peerManager.Add(a) @@ -1298,7 +1311,7 @@ func TestPeerManager_Ready(t *testing.T) { require.Equal(t, p2p.PeerStatusDown, peerManager.Status(a.NodeID)) // Marking a as ready should transition it to PeerStatusUp and send an update. - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) require.Equal(t, p2p.PeerStatusUp, peerManager.Status(a.NodeID)) require.Equal(t, p2p.PeerUpdate{ NodeID: a.NodeID, @@ -1310,7 +1323,7 @@ func TestPeerManager_Ready(t *testing.T) { require.NoError(t, err) require.True(t, added) require.Equal(t, p2p.PeerStatusDown, peerManager.Status(b.NodeID)) - peerManager.Ready(b.NodeID) + peerManager.Ready(ctx, b.NodeID) require.Equal(t, p2p.PeerStatusDown, peerManager.Status(b.NodeID)) require.Empty(t, sub.Updates()) } @@ -1329,7 +1342,7 @@ func TestPeerManager_EvictNext(t *testing.T) { require.NoError(t, err) require.True(t, added) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) // Since there are no peers to evict, EvictNext should block until timeout. timeoutCtx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) @@ -1365,7 +1378,7 @@ func TestPeerManager_EvictNext_WakeOnError(t *testing.T) { require.NoError(t, err) require.True(t, added) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) // Spawn a goroutine to error a peer after a delay. go func() { @@ -1400,7 +1413,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) { require.NoError(t, err) require.True(t, added) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) // Spawn a goroutine to upgrade to b with a delay. go func() { @@ -1441,7 +1454,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) { require.NoError(t, err) require.True(t, added) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) // Spawn a goroutine to upgrade b with a delay. go func() { @@ -1457,6 +1470,9 @@ func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) { require.Equal(t, a.NodeID, evict) } func TestPeerManager_TryEvictNext(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -1473,7 +1489,7 @@ func TestPeerManager_TryEvictNext(t *testing.T) { // Connecting to a won't evict anything either. require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) // But if a errors it should be evicted. peerManager.Errored(a.NodeID, errors.New("foo")) @@ -1502,10 +1518,9 @@ func TestPeerManager_Disconnected(t *testing.T) { defer cancel() sub := peerManager.Subscribe(ctx) - defer sub.Close() // Disconnecting an unknown peer does nothing. - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) require.Empty(t, peerManager.Peers()) require.Empty(t, sub.Updates()) @@ -1514,14 +1529,14 @@ func TestPeerManager_Disconnected(t *testing.T) { require.NoError(t, err) require.True(t, added) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) require.Empty(t, sub.Updates()) // Disconnecting a ready peer sends a status update. _, err = peerManager.Add(a) require.NoError(t, err) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) require.Equal(t, p2p.PeerStatusUp, peerManager.Status(a.NodeID)) require.NotEmpty(t, sub.Updates()) require.Equal(t, p2p.PeerUpdate{ @@ -1529,7 +1544,7 @@ func TestPeerManager_Disconnected(t *testing.T) { Status: p2p.PeerStatusUp, }, <-sub.Updates()) - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) require.Equal(t, p2p.PeerStatusDown, peerManager.Status(a.NodeID)) require.NotEmpty(t, sub.Updates()) require.Equal(t, p2p.PeerUpdate{ @@ -1543,13 +1558,16 @@ func TestPeerManager_Disconnected(t *testing.T) { require.NoError(t, err) require.Equal(t, a, dial) - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) dial, err = peerManager.TryDialNext() require.NoError(t, err) require.Zero(t, dial) } func TestPeerManager_Errored(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -1573,7 +1591,7 @@ func TestPeerManager_Errored(t *testing.T) { require.Zero(t, evict) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) evict, err = peerManager.TryEvictNext() require.NoError(t, err) require.Zero(t, evict) @@ -1596,7 +1614,6 @@ func TestPeerManager_Subscribe(t *testing.T) { // This tests all subscription events for full peer lifecycles. sub := peerManager.Subscribe(ctx) - defer sub.Close() added, err := peerManager.Add(a) require.NoError(t, err) @@ -1607,11 +1624,11 @@ func TestPeerManager_Subscribe(t *testing.T) { require.NoError(t, peerManager.Accepted(a.NodeID)) require.Empty(t, sub.Updates()) - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) require.NotEmpty(t, sub.Updates()) require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusUp}, <-sub.Updates()) - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) require.NotEmpty(t, sub.Updates()) require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusDown}, <-sub.Updates()) @@ -1624,7 +1641,7 @@ func TestPeerManager_Subscribe(t *testing.T) { require.NoError(t, peerManager.Dialed(a)) require.Empty(t, sub.Updates()) - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) require.NotEmpty(t, sub.Updates()) require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusUp}, <-sub.Updates()) @@ -1635,7 +1652,7 @@ func TestPeerManager_Subscribe(t *testing.T) { require.NoError(t, err) require.Equal(t, a.NodeID, evict) - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) require.NotEmpty(t, sub.Updates()) require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusDown}, <-sub.Updates()) @@ -1659,7 +1676,6 @@ func TestPeerManager_Subscribe_Close(t *testing.T) { require.NoError(t, err) sub := peerManager.Subscribe(ctx) - defer sub.Close() added, err := peerManager.Add(a) require.NoError(t, err) @@ -1667,13 +1683,13 @@ func TestPeerManager_Subscribe_Close(t *testing.T) { require.NoError(t, peerManager.Accepted(a.NodeID)) require.Empty(t, sub.Updates()) - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) require.NotEmpty(t, sub.Updates()) require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusUp}, <-sub.Updates()) // Closing the subscription should not send us the disconnected update. - sub.Close() - peerManager.Disconnected(a.NodeID) + cancel() + peerManager.Disconnected(ctx, a.NodeID) require.Empty(t, sub.Updates()) } @@ -1688,19 +1704,19 @@ func TestPeerManager_Subscribe_Broadcast(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) + s2ctx, s2cancel := context.WithCancel(ctx) + defer s2cancel() + s1 := peerManager.Subscribe(ctx) - defer s1.Close() - s2 := peerManager.Subscribe(ctx) - defer s2.Close() + s2 := peerManager.Subscribe(s2ctx) s3 := peerManager.Subscribe(ctx) - defer s3.Close() // Connecting to a peer should send updates on all subscriptions. added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) expectUp := p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusUp} require.NotEmpty(t, s1) @@ -1712,8 +1728,9 @@ func TestPeerManager_Subscribe_Broadcast(t *testing.T) { // We now close s2. Disconnecting the peer should only send updates // on s1 and s3. - s2.Close() - peerManager.Disconnected(a.NodeID) + s2cancel() + time.Sleep(250 * time.Millisecond) // give the thread a chance to exit + peerManager.Disconnected(ctx, a.NodeID) expectDown := p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusDown} require.NotEmpty(t, s1) diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index b42bb2f4b..6970c6fef 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -185,7 +185,6 @@ func (r *Reactor) processPexCh(ctx context.Context) { // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. func (r *Reactor) processPeerUpdates(ctx context.Context) { - defer r.peerUpdates.Close() for { select { case <-ctx.Done(): diff --git a/internal/p2p/pex/reactor_test.go b/internal/p2p/pex/reactor_test.go index 5a061d76d..28da5c72c 100644 --- a/internal/p2p/pex/reactor_test.go +++ b/internal/p2p/pex/reactor_test.go @@ -296,10 +296,7 @@ func setupSingle(ctx context.Context, t *testing.T) *singleTestReactor { reactor := pex.NewReactor(log.TestingLogger(), peerManager, pexCh, peerUpdates) require.NoError(t, reactor.Start(ctx)) - t.Cleanup(func() { - peerUpdates.Close() - reactor.Wait() - }) + t.Cleanup(reactor.Wait) return &singleTestReactor{ reactor: reactor, @@ -396,15 +393,11 @@ func setupNetwork(ctx context.Context, t *testing.T, opts testOptions) *reactorT require.Len(t, rts.reactors, realNodes) t.Cleanup(func() { - for nodeID, reactor := range rts.reactors { + for _, reactor := range rts.reactors { if reactor.IsRunning() { reactor.Wait() require.False(t, reactor.IsRunning()) } - rts.peerUpdates[nodeID].Close() - } - for _, nodeID := range rts.mocks { - rts.peerUpdates[nodeID].Close() } }) @@ -542,7 +535,6 @@ func (r *reactorTestSuite) listenForPeerUpdate( ) { on, with := r.checkNodePair(t, onNode, withNode) sub := r.network.Nodes[on].PeerManager.Subscribe(ctx) - defer sub.Close() timesUp := time.After(waitPeriod) for { select { @@ -649,9 +641,7 @@ func (r *reactorTestSuite) connectPeers(ctx context.Context, t *testing.T, sourc } sourceSub := n1.PeerManager.Subscribe(ctx) - defer sourceSub.Close() targetSub := n2.PeerManager.Subscribe(ctx) - defer targetSub.Close() sourceAddress := n1.NodeAddress r.logger.Debug("source address", "address", sourceAddress) diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 8f751ec6a..87842bee6 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -758,7 +758,7 @@ func (r *Router) runWithPeerMutex(fn func() error) error { // they are closed elsewhere it will cause this method to shut down and return. func (r *Router) routePeer(ctx context.Context, peerID types.NodeID, conn Connection, channels channelIDs) { r.metrics.Peers.Add(1) - r.peerManager.Ready(peerID) + r.peerManager.Ready(ctx, peerID) sendQueue := r.getOrMakeQueue(peerID, channels) defer func() { @@ -769,7 +769,7 @@ func (r *Router) routePeer(ctx context.Context, peerID types.NodeID, conn Connec sendQueue.close() - r.peerManager.Disconnected(peerID) + r.peerManager.Disconnected(ctx, peerID) r.metrics.Peers.Add(-1) }() diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index a561f68cd..1a59a0239 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -409,7 +409,6 @@ func TestRouter_AcceptPeers(t *testing.T) { require.NoError(t, err) sub := peerManager.Subscribe(ctx) - defer sub.Close() router, err := p2p.NewRouter( ctx, @@ -433,7 +432,6 @@ func TestRouter_AcceptPeers(t *testing.T) { // force a context switch so that the // connection is handled. time.Sleep(time.Millisecond) - sub.Close() } else { select { case <-closer.Done(): @@ -659,7 +657,6 @@ func TestRouter_DialPeers(t *testing.T) { require.NoError(t, err) require.True(t, added) sub := peerManager.Subscribe(ctx) - defer sub.Close() router, err := p2p.NewRouter( ctx, @@ -683,7 +680,6 @@ func TestRouter_DialPeers(t *testing.T) { // force a context switch so that the // connection is handled. time.Sleep(time.Millisecond) - sub.Close() } else { select { case <-closer.Done(): @@ -822,7 +818,6 @@ func TestRouter_EvictPeers(t *testing.T) { require.NoError(t, err) sub := peerManager.Subscribe(ctx) - defer sub.Close() router, err := p2p.NewRouter( ctx, @@ -850,7 +845,6 @@ func TestRouter_EvictPeers(t *testing.T) { NodeID: peerInfo.NodeID, Status: p2p.PeerStatusDown, }) - sub.Close() require.NoError(t, router.Stop()) mockTransport.AssertExpectations(t) @@ -943,7 +937,6 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) { require.NoError(t, err) sub := peerManager.Subscribe(ctx) - defer sub.Close() router, err := p2p.NewRouter( ctx, diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index f6eac2a97..8cac68891 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -225,8 +225,6 @@ func (r *Reactor) OnStart(ctx context.Context) error { func (r *Reactor) OnStop() { // tell the dispatcher to stop sending any more requests r.dispatcher.Close() - - <-r.peerUpdates.Done() } // Sync runs a state sync, fetching snapshots and providing chunks to the @@ -865,8 +863,6 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. func (r *Reactor) processPeerUpdates(ctx context.Context) { - defer r.peerUpdates.Close() - for { select { case <-ctx.Done(): From cb88bd39414fe37add21e8d9019b85ebb7cbd40f Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 8 Dec 2021 09:05:01 -0500 Subject: [PATCH 10/33] p2p: migrate to use new interface for channel errors (#7403) * p2p: migrate to use new interface for channel errors * Update internal/p2p/p2ptest/require.go Co-authored-by: M. J. Fromberger * rename * feedback Co-authored-by: M. J. Fromberger --- internal/blocksync/reactor.go | 16 ++++++++++++---- internal/consensus/reactor.go | 16 ++++++++++++---- internal/evidence/reactor.go | 4 +++- internal/mempool/reactor.go | 4 +++- internal/p2p/channel.go | 6 +++--- internal/p2p/channel_test.go | 2 +- internal/p2p/p2ptest/require.go | 16 ++++++++++------ internal/p2p/pex/reactor.go | 4 +++- internal/p2p/router_test.go | 6 +++--- internal/statesync/reactor.go | 18 +++++++++++------- 10 files changed, 61 insertions(+), 31 deletions(-) diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index 6a5620f4e..2f93a3cf3 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -292,9 +292,11 @@ func (r *Reactor) processBlockSyncCh(ctx context.Context) { case envelope := <-r.blockSyncCh.In: if err := r.handleMessage(r.blockSyncCh.ID, envelope); err != nil { r.logger.Error("failed to process message", "ch_id", r.blockSyncCh.ID, "envelope", envelope, "err", err) - r.blockSyncCh.Error <- p2p.PeerError{ + if serr := r.blockSyncCh.SendError(ctx, p2p.PeerError{ NodeID: envelope.From, Err: err, + }); serr != nil { + return } } case envelope := <-r.blockSyncOutBridgeCh: @@ -381,9 +383,11 @@ func (r *Reactor) requestRoutine(ctx context.Context) { Message: &bcproto.BlockRequest{Height: request.Height}, } case pErr := <-r.errorsCh: - r.blockSyncCh.Error <- p2p.PeerError{ + if err := r.blockSyncCh.SendError(ctx, p2p.PeerError{ NodeID: pErr.peerID, Err: pErr.err, + }); err != nil { + return } case <-statusUpdateTicker.C: r.poolWG.Add(1) @@ -523,16 +527,20 @@ FOR_LOOP: // NOTE: We've already removed the peer's request, but we still need // to clean up the rest. peerID := r.pool.RedoRequest(first.Height) - r.blockSyncCh.Error <- p2p.PeerError{ + if serr := r.blockSyncCh.SendError(ctx, p2p.PeerError{ NodeID: peerID, Err: err, + }); serr != nil { + break FOR_LOOP } peerID2 := r.pool.RedoRequest(second.Height) if peerID2 != peerID { - r.blockSyncCh.Error <- p2p.PeerError{ + if serr := r.blockSyncCh.SendError(ctx, p2p.PeerError{ NodeID: peerID2, Err: err, + }); serr != nil { + break FOR_LOOP } } diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index 8f5cdd0b1..5e2a6b535 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -1322,9 +1322,11 @@ func (r *Reactor) processStateCh(ctx context.Context) { case envelope := <-r.stateCh.In: if err := r.handleMessage(r.stateCh.ID, envelope); err != nil { r.logger.Error("failed to process message", "ch_id", r.stateCh.ID, "envelope", envelope, "err", err) - r.stateCh.Error <- p2p.PeerError{ + if serr := r.stateCh.SendError(ctx, p2p.PeerError{ NodeID: envelope.From, Err: err, + }); serr != nil { + return } } } @@ -1345,9 +1347,11 @@ func (r *Reactor) processDataCh(ctx context.Context) { case envelope := <-r.dataCh.In: if err := r.handleMessage(r.dataCh.ID, envelope); err != nil { r.logger.Error("failed to process message", "ch_id", r.dataCh.ID, "envelope", envelope, "err", err) - r.dataCh.Error <- p2p.PeerError{ + if serr := r.dataCh.SendError(ctx, p2p.PeerError{ NodeID: envelope.From, Err: err, + }); serr != nil { + return } } } @@ -1368,9 +1372,11 @@ func (r *Reactor) processVoteCh(ctx context.Context) { case envelope := <-r.voteCh.In: if err := r.handleMessage(r.voteCh.ID, envelope); err != nil { r.logger.Error("failed to process message", "ch_id", r.voteCh.ID, "envelope", envelope, "err", err) - r.voteCh.Error <- p2p.PeerError{ + if serr := r.voteCh.SendError(ctx, p2p.PeerError{ NodeID: envelope.From, Err: err, + }); serr != nil { + return } } } @@ -1391,9 +1397,11 @@ func (r *Reactor) processVoteSetBitsCh(ctx context.Context) { case envelope := <-r.voteSetBitsCh.In: if err := r.handleMessage(r.voteSetBitsCh.ID, envelope); err != nil { r.logger.Error("failed to process message", "ch_id", r.voteSetBitsCh.ID, "envelope", envelope, "err", err) - r.voteSetBitsCh.Error <- p2p.PeerError{ + if serr := r.voteSetBitsCh.SendError(ctx, p2p.PeerError{ NodeID: envelope.From, Err: err, + }); serr != nil { + return } } } diff --git a/internal/evidence/reactor.go b/internal/evidence/reactor.go index 31e927ba7..29712581c 100644 --- a/internal/evidence/reactor.go +++ b/internal/evidence/reactor.go @@ -182,9 +182,11 @@ func (r *Reactor) processEvidenceCh(ctx context.Context) { case envelope := <-r.evidenceCh.In: if err := r.handleMessage(r.evidenceCh.ID, envelope); err != nil { r.logger.Error("failed to process message", "ch_id", r.evidenceCh.ID, "envelope", envelope, "err", err) - r.evidenceCh.Error <- p2p.PeerError{ + if serr := r.evidenceCh.SendError(ctx, p2p.PeerError{ NodeID: envelope.From, Err: err, + }); serr != nil { + return } } } diff --git a/internal/mempool/reactor.go b/internal/mempool/reactor.go index 43215f5f8..19d857614 100644 --- a/internal/mempool/reactor.go +++ b/internal/mempool/reactor.go @@ -205,9 +205,11 @@ func (r *Reactor) processMempoolCh(ctx context.Context) { case envelope := <-r.mempoolCh.In: if err := r.handleMessage(ctx, r.mempoolCh.ID, envelope); err != nil { r.logger.Error("failed to process message", "ch_id", r.mempoolCh.ID, "envelope", envelope, "err", err) - r.mempoolCh.Error <- p2p.PeerError{ + if serr := r.mempoolCh.SendError(ctx, p2p.PeerError{ NodeID: envelope.From, Err: err, + }); serr != nil { + return } } case <-ctx.Done(): diff --git a/internal/p2p/channel.go b/internal/p2p/channel.go index 2143589fd..9296ca15e 100644 --- a/internal/p2p/channel.go +++ b/internal/p2p/channel.go @@ -64,7 +64,7 @@ type Channel struct { ID ChannelID In <-chan Envelope // inbound messages (peers to reactors) Out chan<- Envelope // outbound messages (reactors to peers) - Error chan<- PeerError // peer error reporting + errCh chan<- PeerError // peer error reporting messageType proto.Message // the channel's message type, used for unmarshaling } @@ -83,7 +83,7 @@ func NewChannel( messageType: messageType, In: inCh, Out: outCh, - Error: errCh, + errCh: errCh, } } @@ -104,7 +104,7 @@ func (ch *Channel) SendError(ctx context.Context, pe PeerError) error { select { case <-ctx.Done(): return ctx.Err() - case ch.Error <- pe: + case ch.errCh <- pe: return nil } } diff --git a/internal/p2p/channel_test.go b/internal/p2p/channel_test.go index 0e2d7ea7c..4b2ce5937 100644 --- a/internal/p2p/channel_test.go +++ b/internal/p2p/channel_test.go @@ -25,7 +25,7 @@ func testChannel(size int) (*channelInternal, *Channel) { ch := &Channel{ In: in.In, Out: in.Out, - Error: in.Error, + errCh: in.Error, } return in, ch } diff --git a/internal/p2p/p2ptest/require.go b/internal/p2p/p2ptest/require.go index ce44a724c..b55d6a51f 100644 --- a/internal/p2p/p2ptest/require.go +++ b/internal/p2p/p2ptest/require.go @@ -2,6 +2,7 @@ package p2ptest import ( "context" + "errors" "testing" "time" @@ -100,13 +101,16 @@ func RequireNoUpdates(ctx context.Context, t *testing.T, peerUpdates *p2p.PeerUp } // RequireError requires that the given peer error is submitted for a peer. -func RequireError(t *testing.T, channel *p2p.Channel, peerError p2p.PeerError) { - timer := time.NewTimer(time.Second) // not time.After due to goroutine leaks - defer timer.Stop() - select { - case channel.Error <- peerError: - case <-timer.C: +func RequireError(ctx context.Context, t *testing.T, channel *p2p.Channel, peerError p2p.PeerError) { + tctx, tcancel := context.WithTimeout(ctx, time.Second) + defer tcancel() + + err := channel.SendError(tctx, peerError) + switch { + case errors.Is(err, context.DeadlineExceeded): require.Fail(t, "timed out reporting error", "%v on %v", peerError, channel.ID) + default: + require.NoError(t, err, "unexpected error") } } diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index 6970c6fef..24aeec05f 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -172,9 +172,11 @@ func (r *Reactor) processPexCh(ctx context.Context) { case envelope := <-r.pexCh.In: if err := r.handleMessage(r.pexCh.ID, envelope); err != nil { r.logger.Error("failed to process message", "ch_id", r.pexCh.ID, "envelope", envelope, "err", err) - r.pexCh.Error <- p2p.PeerError{ + if serr := r.pexCh.SendError(ctx, p2p.PeerError{ NodeID: envelope.From, Err: err, + }); serr != nil { + return } } } diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index 1a59a0239..2974c1e88 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -87,10 +87,10 @@ func TestRouter_Network(t *testing.T) { // We then submit an error for a peer, and watch it get disconnected and // then reconnected as the router retries it. peerUpdates := local.MakePeerUpdatesNoRequireEmpty(ctx, t) - channel.Error <- p2p.PeerError{ + require.NoError(t, channel.SendError(ctx, p2p.PeerError{ NodeID: peers[0].NodeID, Err: errors.New("boom"), - } + })) p2ptest.RequireUpdates(t, peerUpdates, []p2p.PeerUpdate{ {NodeID: peers[0].NodeID, Status: p2p.PeerStatusDown}, {NodeID: peers[0].NodeID, Status: p2p.PeerStatusUp}, @@ -345,7 +345,7 @@ func TestRouter_Channel_Error(t *testing.T) { // Erroring b should cause it to be disconnected. It will reconnect shortly after. sub := network.Nodes[aID].MakePeerUpdates(ctx, t) - p2ptest.RequireError(t, a, p2p.PeerError{NodeID: bID, Err: errors.New("boom")}) + p2ptest.RequireError(ctx, t, a, p2p.PeerError{NodeID: bID, Err: errors.New("boom")}) p2ptest.RequireUpdates(t, sub, []p2p.PeerUpdate{ {NodeID: bID, Status: p2p.PeerStatusDown}, {NodeID: bID, Status: p2p.PeerStatusUp}, diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 8cac68891..61e3dec08 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -410,9 +410,11 @@ func (r *Reactor) backfill( r.logger.Info("backfill: fetched light block failed validate basic, removing peer...", "err", err, "height", height) queue.retry(height) - r.blockCh.Error <- p2p.PeerError{ + if serr := r.blockCh.SendError(ctx, p2p.PeerError{ NodeID: peer, Err: fmt.Errorf("received invalid light block: %w", err), + }); serr != nil { + return } continue } @@ -445,25 +447,25 @@ func (r *Reactor) backfill( if w, g := trustedBlockID.Hash, resp.block.Hash(); !bytes.Equal(w, g) { r.logger.Info("received invalid light block. header hash doesn't match trusted LastBlockID", "trustedHash", w, "receivedHash", g, "height", resp.block.Height) - r.blockCh.Error <- p2p.PeerError{ + if err := r.blockCh.SendError(ctx, p2p.PeerError{ NodeID: resp.peer, Err: fmt.Errorf("received invalid light block. Expected hash %v, got: %v", w, g), + }); err != nil { + return nil } queue.retry(resp.block.Height) continue } // save the signed headers - err := r.blockStore.SaveSignedHeader(resp.block.SignedHeader, trustedBlockID) - if err != nil { + if err := r.blockStore.SaveSignedHeader(resp.block.SignedHeader, trustedBlockID); err != nil { return err } // check if there has been a change in the validator set if lastValidatorSet != nil && !bytes.Equal(resp.block.Header.ValidatorsHash, resp.block.Header.NextValidatorsHash) { // save all the heights that the last validator set was the same - err = r.stateStore.SaveValidatorSets(resp.block.Height+1, lastChangeHeight, lastValidatorSet) - if err != nil { + if err := r.stateStore.SaveValidatorSets(resp.block.Height+1, lastChangeHeight, lastValidatorSet); err != nil { return err } @@ -810,9 +812,11 @@ func (r *Reactor) processCh(ctx context.Context, ch *p2p.Channel, chName string) "channel", chName, "ch_id", ch.ID, "envelope", envelope) - ch.Error <- p2p.PeerError{ + if serr := ch.SendError(ctx, p2p.PeerError{ NodeID: envelope.From, Err: err, + }); serr != nil { + return } } } From 9c21d4140bb563e412f1676db28781d1a725731b Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 8 Dec 2021 09:29:13 -0500 Subject: [PATCH 11/33] mempool: avoid arbitrary background contexts (#7409) Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- internal/consensus/replay_stubs.go | 11 ++++++----- internal/mempool/mempool.go | 10 +++++----- internal/mempool/mempool_test.go | 12 ++++++------ internal/mempool/mock/mempool.go | 11 ++++++----- internal/mempool/reactor_test.go | 4 ++-- internal/mempool/types.go | 3 ++- internal/state/execution.go | 6 ++++-- rpc/client/mocks/client.go | 14 -------------- 8 files changed, 31 insertions(+), 40 deletions(-) diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go index 8672f8e1e..649e4387b 100644 --- a/internal/consensus/replay_stubs.go +++ b/internal/consensus/replay_stubs.go @@ -29,6 +29,7 @@ func (emptyMempool) RemoveTxByKey(txKey types.TxKey) error { return nil } func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } func (emptyMempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } func (emptyMempool) Update( + _ context.Context, _ int64, _ types.Txs, _ []*abci.ResponseDeliverTx, @@ -37,11 +38,11 @@ func (emptyMempool) Update( ) error { return nil } -func (emptyMempool) Flush() {} -func (emptyMempool) FlushAppConn() error { return nil } -func (emptyMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } -func (emptyMempool) EnableTxsAvailable() {} -func (emptyMempool) SizeBytes() int64 { return 0 } +func (emptyMempool) Flush() {} +func (emptyMempool) FlushAppConn(ctx context.Context) error { return nil } +func (emptyMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } +func (emptyMempool) EnableTxsAvailable() {} +func (emptyMempool) SizeBytes() int64 { return 0 } func (emptyMempool) TxsFront() *clist.CElement { return nil } func (emptyMempool) TxsWaitChan() <-chan struct{} { return nil } diff --git a/internal/mempool/mempool.go b/internal/mempool/mempool.go index ec7ef2e15..f5d1c926d 100644 --- a/internal/mempool/mempool.go +++ b/internal/mempool/mempool.go @@ -175,8 +175,8 @@ func (txmp *TxMempool) SizeBytes() int64 { // FlushAppConn executes FlushSync on the mempool's proxyAppConn. // // NOTE: The caller must obtain a write-lock prior to execution. -func (txmp *TxMempool) FlushAppConn() error { - return txmp.proxyAppConn.FlushSync(context.Background()) +func (txmp *TxMempool) FlushAppConn(ctx context.Context) error { + return txmp.proxyAppConn.FlushSync(ctx) } // WaitForNextTx returns a blocking channel that will be closed when the next @@ -428,6 +428,7 @@ func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs { // NOTE: // - The caller must explicitly acquire a write-lock. func (txmp *TxMempool) Update( + ctx context.Context, blockHeight int64, blockTxs types.Txs, deliverTxResponses []*abci.ResponseDeliverTx, @@ -472,7 +473,7 @@ func (txmp *TxMempool) Update( "num_txs", txmp.Size(), "height", blockHeight, ) - txmp.updateReCheckTxs() + txmp.updateReCheckTxs(ctx) } else { txmp.notifyTxsAvailable() } @@ -713,14 +714,13 @@ func (txmp *TxMempool) defaultTxCallback(req *abci.Request, res *abci.Response) // // NOTE: // - The caller must have a write-lock when executing updateReCheckTxs. -func (txmp *TxMempool) updateReCheckTxs() { +func (txmp *TxMempool) updateReCheckTxs(ctx context.Context) { if txmp.Size() == 0 { panic("attempted to update re-CheckTx txs when mempool is empty") } txmp.recheckCursor = txmp.gossipIndex.Front() txmp.recheckEnd = txmp.gossipIndex.Back() - ctx := context.Background() for e := txmp.gossipIndex.Front(); e != nil; e = e.Next() { wtx := e.Value.(*WrappedTx) diff --git a/internal/mempool/mempool_test.go b/internal/mempool/mempool_test.go index 1613dce98..a0dc658a4 100644 --- a/internal/mempool/mempool_test.go +++ b/internal/mempool/mempool_test.go @@ -179,7 +179,7 @@ func TestTxMempool_TxsAvailable(t *testing.T) { // commit half the transactions and ensure we fire an event txmp.Lock() - require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil)) txmp.Unlock() ensureTxFire() ensureNoTxFire() @@ -210,7 +210,7 @@ func TestTxMempool_Size(t *testing.T) { } txmp.Lock() - require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil)) txmp.Unlock() require.Equal(t, len(rawTxs)/2, txmp.Size()) @@ -237,7 +237,7 @@ func TestTxMempool_Flush(t *testing.T) { } txmp.Lock() - require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil)) txmp.Unlock() txmp.Flush() @@ -460,7 +460,7 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) { } txmp.Lock() - require.NoError(t, txmp.Update(height, reapedTxs, responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, height, reapedTxs, responses, nil, nil)) txmp.Unlock() height++ @@ -500,7 +500,7 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { } txmp.Lock() - require.NoError(t, txmp.Update(txmp.height+1, reapedTxs, responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, txmp.height+1, reapedTxs, responses, nil, nil)) txmp.Unlock() require.Equal(t, 95, txmp.Size()) @@ -526,7 +526,7 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { } txmp.Lock() - require.NoError(t, txmp.Update(txmp.height+10, reapedTxs, responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, txmp.height+10, reapedTxs, responses, nil, nil)) txmp.Unlock() require.GreaterOrEqual(t, txmp.Size(), 45) diff --git a/internal/mempool/mock/mempool.go b/internal/mempool/mock/mempool.go index 8344220a0..2b32a7ce6 100644 --- a/internal/mempool/mock/mempool.go +++ b/internal/mempool/mock/mempool.go @@ -24,6 +24,7 @@ func (Mempool) RemoveTxByKey(txKey types.TxKey) error { return nil } func (Mempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } func (Mempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } func (Mempool) Update( + _ context.Context, _ int64, _ types.Txs, _ []*abci.ResponseDeliverTx, @@ -32,11 +33,11 @@ func (Mempool) Update( ) error { return nil } -func (Mempool) Flush() {} -func (Mempool) FlushAppConn() error { return nil } -func (Mempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } -func (Mempool) EnableTxsAvailable() {} -func (Mempool) SizeBytes() int64 { return 0 } +func (Mempool) Flush() {} +func (Mempool) FlushAppConn(ctx context.Context) error { return nil } +func (Mempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } +func (Mempool) EnableTxsAvailable() {} +func (Mempool) SizeBytes() int64 { return 0 } func (Mempool) TxsFront() *clist.CElement { return nil } func (Mempool) TxsWaitChan() <-chan struct{} { return nil } diff --git a/internal/mempool/reactor_test.go b/internal/mempool/reactor_test.go index 096544910..f75809744 100644 --- a/internal/mempool/reactor_test.go +++ b/internal/mempool/reactor_test.go @@ -249,7 +249,7 @@ func TestReactorConcurrency(t *testing.T) { deliverTxResponses[i] = &abci.ResponseDeliverTx{Code: 0} } - require.NoError(t, mempool.Update(1, convertTex(txs), deliverTxResponses, nil, nil)) + require.NoError(t, mempool.Update(ctx, 1, convertTex(txs), deliverTxResponses, nil, nil)) }() // 1. submit a bunch of txs @@ -263,7 +263,7 @@ func TestReactorConcurrency(t *testing.T) { mempool.Lock() defer mempool.Unlock() - err := mempool.Update(1, []types.Tx{}, make([]*abci.ResponseDeliverTx, 0), nil, nil) + err := mempool.Update(ctx, 1, []types.Tx{}, make([]*abci.ResponseDeliverTx, 0), nil, nil) require.NoError(t, err) }() diff --git a/internal/mempool/types.go b/internal/mempool/types.go index 6e3955dc3..05d4ba3e3 100644 --- a/internal/mempool/types.go +++ b/internal/mempool/types.go @@ -63,6 +63,7 @@ type Mempool interface { // 1. This should be called *after* block is committed by consensus. // 2. Lock/Unlock must be managed by the caller. Update( + ctx context.Context, blockHeight int64, blockTxs types.Txs, deliverTxResponses []*abci.ResponseDeliverTx, @@ -75,7 +76,7 @@ type Mempool interface { // // NOTE: // 1. Lock/Unlock must be managed by caller. - FlushAppConn() error + FlushAppConn(context.Context) error // Flush removes all transactions from the mempool and caches. Flush() diff --git a/internal/state/execution.go b/internal/state/execution.go index 85e96b017..9d385c956 100644 --- a/internal/state/execution.go +++ b/internal/state/execution.go @@ -202,7 +202,7 @@ func (blockExec *BlockExecutor) ApplyBlock( } // Lock mempool, commit app state, update mempoool. - appHash, retainHeight, err := blockExec.Commit(state, block, abciResponses.DeliverTxs) + appHash, retainHeight, err := blockExec.Commit(ctx, state, block, abciResponses.DeliverTxs) if err != nil { return state, fmt.Errorf("commit failed for application: %v", err) } @@ -247,6 +247,7 @@ func (blockExec *BlockExecutor) ApplyBlock( // typically reset on Commit and old txs must be replayed against committed // state before new txs are run in the mempool, lest they be invalid. func (blockExec *BlockExecutor) Commit( + ctx context.Context, state State, block *types.Block, deliverTxResponses []*abci.ResponseDeliverTx, @@ -256,7 +257,7 @@ func (blockExec *BlockExecutor) Commit( // while mempool is Locked, flush to ensure all async requests have completed // in the ABCI app before Commit. - err := blockExec.mempool.FlushAppConn() + err := blockExec.mempool.FlushAppConn(ctx) if err != nil { blockExec.logger.Error("client error during mempool.FlushAppConn", "err", err) return nil, 0, err @@ -279,6 +280,7 @@ func (blockExec *BlockExecutor) Commit( // Update mempool. err = blockExec.mempool.Update( + ctx, block.Height, block.Txs, deliverTxResponses, diff --git a/rpc/client/mocks/client.go b/rpc/client/mocks/client.go index 3c3ebd443..e638980a8 100644 --- a/rpc/client/mocks/client.go +++ b/rpc/client/mocks/client.go @@ -637,20 +637,6 @@ func (_m *Client) Status(_a0 context.Context) (*coretypes.ResultStatus, error) { return r0, r1 } -// Stop provides a mock function with given fields: -func (_m *Client) Stop() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - // Subscribe provides a mock function with given fields: ctx, subscriber, query, outCapacity func (_m *Client) Subscribe(ctx context.Context, subscriber string, query string, outCapacity ...int) (<-chan coretypes.ResultEvent, error) { _va := make([]interface{}, len(outCapacity)) From 867d406c6c200694f73cc4a9c2aa0532e29bbb14 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 8 Dec 2021 11:09:08 -0500 Subject: [PATCH 12/33] state: pass connected context (#7410) --- internal/state/execution.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/state/execution.go b/internal/state/execution.go index 9d385c956..627a6770c 100644 --- a/internal/state/execution.go +++ b/internal/state/execution.go @@ -264,7 +264,7 @@ func (blockExec *BlockExecutor) Commit( } // Commit block, get hash back - res, err := blockExec.proxyApp.CommitSync(context.Background()) + res, err := blockExec.proxyApp.CommitSync(ctx) if err != nil { blockExec.logger.Error("client error during proxyAppConn.CommitSync", "err", err) return nil, 0, err @@ -609,7 +609,7 @@ func ExecCommitBlock( } // Commit block, get hash back - res, err := appConnConsensus.CommitSync(context.Background()) + res, err := appConnConsensus.CommitSync(ctx) if err != nil { logger.Error("client error during proxyAppConn.CommitSync", "err", res) return nil, err From 358fc5f6c4330d311daa176278715d51aaa5f719 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Dec 2021 16:21:45 +0000 Subject: [PATCH 13/33] build(deps): Bump github.com/adlio/schema from 1.1.14 to 1.1.15 (#7407) Bumps [github.com/adlio/schema](https://github.com/adlio/schema) from 1.1.14 to 1.1.15.
Commits
  • 3b57e35 Security patch: Update upstream runc dependency to 1.0.3.
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/adlio/schema&package-manager=go_modules&previous-version=1.1.14&new-version=1.1.15)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 2725c3df9..01b2930c8 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.16 require ( github.com/BurntSushi/toml v0.4.1 - github.com/adlio/schema v1.1.14 + github.com/adlio/schema v1.1.15 github.com/btcsuite/btcd v0.22.0-beta github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect diff --git a/go.sum b/go.sum index 9b411330e..5aaff4e4b 100644 --- a/go.sum +++ b/go.sum @@ -88,8 +88,8 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/adlio/schema v1.1.14 h1:lIjyp5/2wSuEOmeQGNPpaRsVGZRqz9A/B+PaMtEotaU= -github.com/adlio/schema v1.1.14/go.mod h1:hQveFEMiDlG/M9yz9RAajnH5DzT6nAfqOG9YkEQU2pg= +github.com/adlio/schema v1.1.15 h1:ap+yp+RFcfDs1Eq1D89LX4KR/UDqxjRnLRGuwsxGyOo= +github.com/adlio/schema v1.1.15/go.mod h1:ThQUeMpGSGpfzeElY/f3wW1S7jIgnYAQ+5vON7w1T4o= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= @@ -761,8 +761,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg= -github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.0.3 h1:1hbqejyQWCJBvtKAfdO0b1FmaEf2z/bxnjqbARass5k= +github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= From f79b77036f6da66db06a866e4e01e17ce749808e Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 9 Dec 2021 08:15:57 -0800 Subject: [PATCH 14/33] Fix link to Terraform/Ansible documentation. (#7416) --- README.md | 10 +++++----- networks/remote/README.md | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 7823d45c1..4082752ad 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/ab Please do not depend on master as your production branch. Use [releases](https://github.com/tendermint/tendermint/releases) instead. -Tendermint has been in the production of private and public environments, most notably the blockchains of the Cosmos Network. we haven't released v1.0 yet since we are making breaking changes to the protocol and the APIs. +Tendermint has been in the production of private and public environments, most notably the blockchains of the Cosmos Network. we haven't released v1.0 yet since we are making breaking changes to the protocol and the APIs. See below for more details about [versioning](#versioning). In any case, if you intend to run Tendermint in production, we're happy to help. You can @@ -40,7 +40,7 @@ More on how releases are conducted can be found [here](./RELEASES.md). ## Security To report a security vulnerability, see our [bug bounty -program](https://hackerone.com/cosmos). +program](https://hackerone.com/cosmos). For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md). We also maintain a dedicated mailing list for security updates. We will only ever use this mailing list @@ -64,7 +64,7 @@ See the [install instructions](/docs/introduction/install.md). - [Single node](/docs/introduction/quick-start.md) - [Local cluster using docker-compose](/docs/networks/docker-compose.md) -- [Remote cluster using Terraform and Ansible](/docs/networks/terraform-and-ansible.md) +- [Remote cluster using Terraform and Ansible](/docs/tools/terraform-and-ansible.md) - [Join the Cosmos testnet](https://cosmos.network/testnet) ## Contributing @@ -73,7 +73,7 @@ Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions. Before contributing to the project, please take a look at the [contributing guidelines](CONTRIBUTING.md) and the [style guide](STYLE_GUIDE.md). You may also find it helpful to read the -[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md), +[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md), and familiarize yourself with our [Architectural Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture). @@ -97,7 +97,7 @@ In an effort to avoid accumulating technical debt prior to 1.0.0, we do not guarantee that breaking changes (ie. bumps in the MINOR version) will work with existing Tendermint blockchains. In these cases you will have to start a new blockchain, or write something custom to get the old -data into the new chain. However, any bump in the PATCH version should be +data into the new chain. However, any bump in the PATCH version should be compatible with existing blockchain histories. diff --git a/networks/remote/README.md b/networks/remote/README.md index 8f2e04736..eab906e45 100644 --- a/networks/remote/README.md +++ b/networks/remote/README.md @@ -1,3 +1,3 @@ # Remote Cluster with Terraform and Ansible -See the [docs](https://docs.tendermint.com/master/networks/terraform-and-ansible.html). +See the [docs](https://docs.tendermint.com/master/tools/terraform-and-ansible.html). From bd6dc3ca8858446745240aadab32f1bce5c15f84 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Thu, 9 Dec 2021 14:03:41 -0500 Subject: [PATCH 15/33] p2p: refactor channel Send/out (#7414) --- internal/blocksync/reactor.go | 40 +++-- internal/consensus/byzantine_test.go | 26 +-- internal/consensus/invalid_test.go | 5 +- internal/consensus/reactor.go | 231 ++++++++++++++++---------- internal/consensus/replay_test.go | 8 +- internal/consensus/state.go | 67 +++++--- internal/consensus/state_test.go | 26 +-- internal/evidence/reactor.go | 8 +- internal/evidence/reactor_test.go | 21 +-- internal/mempool/reactor.go | 8 +- internal/mempool/reactor_test.go | 33 +--- internal/p2p/channel.go | 6 +- internal/p2p/channel_test.go | 2 +- internal/p2p/p2ptest/require.go | 20 ++- internal/p2p/pex/reactor.go | 20 ++- internal/p2p/pex/reactor_test.go | 27 +-- internal/p2p/router_test.go | 43 ++--- internal/statesync/dispatcher.go | 11 +- internal/statesync/dispatcher_test.go | 63 +++++-- internal/statesync/reactor.go | 66 ++++---- internal/statesync/reactor_test.go | 4 +- internal/statesync/stateprovider.go | 12 +- internal/statesync/syncer.go | 54 +++--- internal/statesync/syncer_test.go | 14 +- libs/events/event_cache.go | 6 +- libs/events/event_cache_test.go | 12 +- libs/events/events.go | 15 +- libs/events/events_test.go | 231 ++++++++++++++++++-------- 28 files changed, 626 insertions(+), 453 deletions(-) diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index 2f93a3cf3..53a63fb84 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -2,6 +2,7 @@ package blocksync import ( "context" + "errors" "fmt" "runtime/debug" "sync" @@ -185,40 +186,38 @@ func (r *Reactor) OnStop() { // respondToPeer loads a block and sends it to the requesting peer, if we have it. // Otherwise, we'll respond saying we do not have it. -func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID types.NodeID) { +func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest, peerID types.NodeID) error { block := r.store.LoadBlock(msg.Height) if block != nil { blockProto, err := block.ToProto() if err != nil { r.logger.Error("failed to convert msg to protobuf", "err", err) - return + return err } - r.blockSyncCh.Out <- p2p.Envelope{ + return r.blockSyncCh.Send(ctx, p2p.Envelope{ To: peerID, Message: &bcproto.BlockResponse{Block: blockProto}, - } - - return + }) } r.logger.Info("peer requesting a block we do not have", "peer", peerID, "height", msg.Height) - r.blockSyncCh.Out <- p2p.Envelope{ + + return r.blockSyncCh.Send(ctx, p2p.Envelope{ To: peerID, Message: &bcproto.NoBlockResponse{Height: msg.Height}, - } + }) } // handleBlockSyncMessage handles envelopes sent from peers on the // BlockSyncChannel. It returns an error only if the Envelope.Message is unknown // for this channel. This should never be called outside of handleMessage. -func (r *Reactor) handleBlockSyncMessage(envelope p2p.Envelope) error { +func (r *Reactor) handleBlockSyncMessage(ctx context.Context, envelope p2p.Envelope) error { logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { case *bcproto.BlockRequest: - r.respondToPeer(msg, envelope.From) - + return r.respondToPeer(ctx, msg, envelope.From) case *bcproto.BlockResponse: block, err := types.BlockFromProto(msg.Block) if err != nil { @@ -229,14 +228,13 @@ func (r *Reactor) handleBlockSyncMessage(envelope p2p.Envelope) error { r.pool.AddBlock(envelope.From, block, block.Size()) case *bcproto.StatusRequest: - r.blockSyncCh.Out <- p2p.Envelope{ + return r.blockSyncCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &bcproto.StatusResponse{ Height: r.store.Height(), Base: r.store.Base(), }, - } - + }) case *bcproto.StatusResponse: r.pool.SetPeerRange(envelope.From, msg.Base, msg.Height) @@ -253,7 +251,7 @@ func (r *Reactor) handleBlockSyncMessage(envelope p2p.Envelope) error { // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope p2p.Envelope) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) @@ -269,7 +267,7 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err switch chID { case BlockSyncChannel: - err = r.handleBlockSyncMessage(envelope) + err = r.handleBlockSyncMessage(ctx, envelope) default: err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) @@ -290,7 +288,11 @@ func (r *Reactor) processBlockSyncCh(ctx context.Context) { r.logger.Debug("stopped listening on block sync channel; closing...") return case envelope := <-r.blockSyncCh.In: - if err := r.handleMessage(r.blockSyncCh.ID, envelope); err != nil { + if err := r.handleMessage(ctx, r.blockSyncCh.ID, envelope); err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return + } + r.logger.Error("failed to process message", "ch_id", r.blockSyncCh.ID, "envelope", envelope, "err", err) if serr := r.blockSyncCh.SendError(ctx, p2p.PeerError{ NodeID: envelope.From, @@ -300,7 +302,9 @@ func (r *Reactor) processBlockSyncCh(ctx context.Context) { } } case envelope := <-r.blockSyncOutBridgeCh: - r.blockSyncCh.Out <- envelope + if err := r.blockSyncCh.Send(ctx, envelope); err != nil { + return + } } } } diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index 9526f4ae1..3133e3659 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -141,20 +141,22 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { for _, ps := range bzReactor.peers { if i < len(bzReactor.peers)/2 { bzNodeState.logger.Info("signed and pushed vote", "vote", prevote1, "peer", ps.peerID) - bzReactor.voteCh.Out <- p2p.Envelope{ - To: ps.peerID, - Message: &tmcons.Vote{ - Vote: prevote1.ToProto(), - }, - } + require.NoError(t, bzReactor.voteCh.Send(ctx, + p2p.Envelope{ + To: ps.peerID, + Message: &tmcons.Vote{ + Vote: prevote1.ToProto(), + }, + })) } else { bzNodeState.logger.Info("signed and pushed vote", "vote", prevote2, "peer", ps.peerID) - bzReactor.voteCh.Out <- p2p.Envelope{ - To: ps.peerID, - Message: &tmcons.Vote{ - Vote: prevote2.ToProto(), - }, - } + require.NoError(t, bzReactor.voteCh.Send(ctx, + p2p.Envelope{ + To: ps.peerID, + Message: &tmcons.Vote{ + Vote: prevote2.ToProto(), + }, + })) } i++ diff --git a/internal/consensus/invalid_test.go b/internal/consensus/invalid_test.go index 0c0528d6f..1b3636f02 100644 --- a/internal/consensus/invalid_test.go +++ b/internal/consensus/invalid_test.go @@ -124,13 +124,12 @@ func invalidDoPrevoteFunc( for _, ps := range r.peers { cs.logger.Info("sending bad vote", "block", blockHash, "peer", ps.peerID) - - r.voteCh.Out <- p2p.Envelope{ + require.NoError(t, r.voteCh.Send(ctx, p2p.Envelope{ To: ps.peerID, Message: &tmcons.Vote{ Vote: precommit.ToProto(), }, - } + })) } }() } diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index 5e2a6b535..88a831ede 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -317,16 +317,16 @@ func (r *Reactor) GetPeerState(peerID types.NodeID) (*PeerState, bool) { return ps, ok } -func (r *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) { - r.stateCh.Out <- p2p.Envelope{ +func (r *Reactor) broadcastNewRoundStepMessage(ctx context.Context, rs *cstypes.RoundState) error { + return r.stateCh.Send(ctx, p2p.Envelope{ Broadcast: true, Message: makeRoundStepMessage(rs), - } + }) } -func (r *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) { +func (r *Reactor) broadcastNewValidBlockMessage(ctx context.Context, rs *cstypes.RoundState) error { psHeader := rs.ProposalBlockParts.Header() - r.stateCh.Out <- p2p.Envelope{ + return r.stateCh.Send(ctx, p2p.Envelope{ Broadcast: true, Message: &tmcons.NewValidBlock{ Height: rs.Height, @@ -335,11 +335,11 @@ func (r *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) { BlockParts: rs.ProposalBlockParts.BitArray().ToProto(), IsCommit: rs.Step == cstypes.RoundStepCommit, }, - } + }) } -func (r *Reactor) broadcastHasVoteMessage(vote *types.Vote) { - r.stateCh.Out <- p2p.Envelope{ +func (r *Reactor) broadcastHasVoteMessage(ctx context.Context, vote *types.Vote) error { + return r.stateCh.Send(ctx, p2p.Envelope{ Broadcast: true, Message: &tmcons.HasVote{ Height: vote.Height, @@ -347,7 +347,7 @@ func (r *Reactor) broadcastHasVoteMessage(vote *types.Vote) { Type: vote.Type, Index: vote.ValidatorIndex, }, - } + }) } // subscribeToBroadcastEvents subscribes for new round steps and votes using the @@ -357,11 +357,17 @@ func (r *Reactor) subscribeToBroadcastEvents() { err := r.state.evsw.AddListenerForEvent( listenerIDConsensus, types.EventNewRoundStepValue, - func(data tmevents.EventData) { - r.broadcastNewRoundStepMessage(data.(*cstypes.RoundState)) + func(ctx context.Context, data tmevents.EventData) error { + if err := r.broadcastNewRoundStepMessage(ctx, data.(*cstypes.RoundState)); err != nil { + return err + } select { case r.state.onStopCh <- data.(*cstypes.RoundState): + return nil + case <-ctx.Done(): + return ctx.Err() default: + return nil } }, ) @@ -372,8 +378,8 @@ func (r *Reactor) subscribeToBroadcastEvents() { err = r.state.evsw.AddListenerForEvent( listenerIDConsensus, types.EventValidBlockValue, - func(data tmevents.EventData) { - r.broadcastNewValidBlockMessage(data.(*cstypes.RoundState)) + func(ctx context.Context, data tmevents.EventData) error { + return r.broadcastNewValidBlockMessage(ctx, data.(*cstypes.RoundState)) }, ) if err != nil { @@ -383,8 +389,8 @@ func (r *Reactor) subscribeToBroadcastEvents() { err = r.state.evsw.AddListenerForEvent( listenerIDConsensus, types.EventVoteValue, - func(data tmevents.EventData) { - r.broadcastHasVoteMessage(data.(*types.Vote)) + func(ctx context.Context, data tmevents.EventData) error { + return r.broadcastHasVoteMessage(ctx, data.(*types.Vote)) }, ) if err != nil { @@ -406,19 +412,14 @@ func makeRoundStepMessage(rs *cstypes.RoundState) *tmcons.NewRoundStep { } } -func (r *Reactor) sendNewRoundStepMessage(ctx context.Context, peerID types.NodeID) { - rs := r.state.GetRoundState() - msg := makeRoundStepMessage(rs) - select { - case <-ctx.Done(): - case r.stateCh.Out <- p2p.Envelope{ +func (r *Reactor) sendNewRoundStepMessage(ctx context.Context, peerID types.NodeID) error { + return r.stateCh.Send(ctx, p2p.Envelope{ To: peerID, - Message: msg, - }: - } + Message: makeRoundStepMessage(r.state.GetRoundState()), + }) } -func (r *Reactor) gossipDataForCatchup(rs *cstypes.RoundState, prs *cstypes.PeerRoundState, ps *PeerState) { +func (r *Reactor) gossipDataForCatchup(ctx context.Context, rs *cstypes.RoundState, prs *cstypes.PeerRoundState, ps *PeerState) { logger := r.logger.With("height", prs.Height).With("peer", ps.peerID) if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok { @@ -467,14 +468,14 @@ func (r *Reactor) gossipDataForCatchup(rs *cstypes.RoundState, prs *cstypes.Peer } logger.Debug("sending block part for catchup", "round", prs.Round, "index", index) - r.dataCh.Out <- p2p.Envelope{ + _ = r.dataCh.Send(ctx, p2p.Envelope{ To: ps.peerID, Message: &tmcons.BlockPart{ Height: prs.Height, // not our height, so it does not matter. Round: prs.Round, // not our height, so it does not matter Part: *partProto, }, - } + }) return } @@ -521,13 +522,15 @@ OUTER_LOOP: } logger.Debug("sending block part", "height", prs.Height, "round", prs.Round) - r.dataCh.Out <- p2p.Envelope{ + if err := r.dataCh.Send(ctx, p2p.Envelope{ To: ps.peerID, Message: &tmcons.BlockPart{ Height: rs.Height, // this tells peer that this part applies to us Round: rs.Round, // this tells peer that this part applies to us Part: *partProto, }, + }); err != nil { + return } ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) @@ -566,7 +569,7 @@ OUTER_LOOP: continue OUTER_LOOP } - r.gossipDataForCatchup(rs, prs, ps) + r.gossipDataForCatchup(ctx, rs, prs, ps) continue OUTER_LOOP } @@ -593,11 +596,13 @@ OUTER_LOOP: propProto := rs.Proposal.ToProto() logger.Debug("sending proposal", "height", prs.Height, "round", prs.Round) - r.dataCh.Out <- p2p.Envelope{ + if err := r.dataCh.Send(ctx, p2p.Envelope{ To: ps.peerID, Message: &tmcons.Proposal{ Proposal: *propProto, }, + }); err != nil { + return } // NOTE: A peer might have received a different proposal message, so @@ -614,13 +619,15 @@ OUTER_LOOP: pPolProto := pPol.ToProto() logger.Debug("sending POL", "height", prs.Height, "round", prs.Round) - r.dataCh.Out <- p2p.Envelope{ + if err := r.dataCh.Send(ctx, p2p.Envelope{ To: ps.peerID, Message: &tmcons.ProposalPOL{ Height: rs.Height, ProposalPolRound: rs.Proposal.POLRound, ProposalPol: *pPolProto, }, + }); err != nil { + return } } @@ -640,24 +647,24 @@ OUTER_LOOP: // pickSendVote picks a vote and sends it to the peer. It will return true if // there is a vote to send and false otherwise. -func (r *Reactor) pickSendVote(ctx context.Context, ps *PeerState, votes types.VoteSetReader) bool { - if vote, ok := ps.PickVoteToSend(votes); ok { - r.logger.Debug("sending vote message", "ps", ps, "vote", vote) - select { - case <-ctx.Done(): - case r.voteCh.Out <- p2p.Envelope{ - To: ps.peerID, - Message: &tmcons.Vote{ - Vote: vote.ToProto(), - }, - }: - } - - ps.SetHasVote(vote) - return true +func (r *Reactor) pickSendVote(ctx context.Context, ps *PeerState, votes types.VoteSetReader) (bool, error) { + vote, ok := ps.PickVoteToSend(votes) + if !ok { + return false, nil } - return false + r.logger.Debug("sending vote message", "ps", ps, "vote", vote) + if err := r.voteCh.Send(ctx, p2p.Envelope{ + To: ps.peerID, + Message: &tmcons.Vote{ + Vote: vote.ToProto(), + }, + }); err != nil { + return false, err + } + + ps.SetHasVote(vote) + return true, nil } func (r *Reactor) gossipVotesForHeight( @@ -665,62 +672,75 @@ func (r *Reactor) gossipVotesForHeight( rs *cstypes.RoundState, prs *cstypes.PeerRoundState, ps *PeerState, -) bool { +) (bool, error) { logger := r.logger.With("height", prs.Height).With("peer", ps.peerID) // if there are lastCommits to send... if prs.Step == cstypes.RoundStepNewHeight { - if r.pickSendVote(ctx, ps, rs.LastCommit) { + if ok, err := r.pickSendVote(ctx, ps, rs.LastCommit); err != nil { + return false, err + } else if ok { logger.Debug("picked rs.LastCommit to send") - return true + return true, nil + } } // if there are POL prevotes to send... if prs.Step <= cstypes.RoundStepPropose && prs.Round != -1 && prs.Round <= rs.Round && prs.ProposalPOLRound != -1 { if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { - if r.pickSendVote(ctx, ps, polPrevotes) { + if ok, err := r.pickSendVote(ctx, ps, polPrevotes); err != nil { + return false, err + } else if ok { logger.Debug("picked rs.Prevotes(prs.ProposalPOLRound) to send", "round", prs.ProposalPOLRound) - return true + return true, nil } } } // if there are prevotes to send... if prs.Step <= cstypes.RoundStepPrevoteWait && prs.Round != -1 && prs.Round <= rs.Round { - if r.pickSendVote(ctx, ps, rs.Votes.Prevotes(prs.Round)) { + if ok, err := r.pickSendVote(ctx, ps, rs.Votes.Prevotes(prs.Round)); err != nil { + return false, err + } else if ok { logger.Debug("picked rs.Prevotes(prs.Round) to send", "round", prs.Round) - return true + return true, nil } } // if there are precommits to send... if prs.Step <= cstypes.RoundStepPrecommitWait && prs.Round != -1 && prs.Round <= rs.Round { - if r.pickSendVote(ctx, ps, rs.Votes.Precommits(prs.Round)) { + if ok, err := r.pickSendVote(ctx, ps, rs.Votes.Precommits(prs.Round)); err != nil { + return false, err + } else if ok { logger.Debug("picked rs.Precommits(prs.Round) to send", "round", prs.Round) - return true + return true, nil } } // if there are prevotes to send...(which are needed because of validBlock mechanism) if prs.Round != -1 && prs.Round <= rs.Round { - if r.pickSendVote(ctx, ps, rs.Votes.Prevotes(prs.Round)) { + if ok, err := r.pickSendVote(ctx, ps, rs.Votes.Prevotes(prs.Round)); err != nil { + return false, err + } else if ok { logger.Debug("picked rs.Prevotes(prs.Round) to send", "round", prs.Round) - return true + return true, nil } } // if there are POLPrevotes to send... if prs.ProposalPOLRound != -1 { if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { - if r.pickSendVote(ctx, ps, polPrevotes) { + if ok, err := r.pickSendVote(ctx, ps, polPrevotes); err != nil { + return false, err + } else if ok { logger.Debug("picked rs.Prevotes(prs.ProposalPOLRound) to send", "round", prs.ProposalPOLRound) - return true + return true, nil } } } - return false + return false, nil } func (r *Reactor) gossipVotesRoutine(ctx context.Context, ps *PeerState) { @@ -763,14 +783,18 @@ OUTER_LOOP: // if height matches, then send LastCommit, Prevotes, and Precommits if rs.Height == prs.Height { - if r.gossipVotesForHeight(ctx, rs, prs, ps) { + if ok, err := r.gossipVotesForHeight(ctx, rs, prs, ps); err != nil { + return + } else if ok { continue OUTER_LOOP } } // special catchup logic -- if peer is lagging by height 1, send LastCommit if prs.Height != 0 && rs.Height == prs.Height+1 { - if r.pickSendVote(ctx, ps, rs.LastCommit) { + if ok, err := r.pickSendVote(ctx, ps, rs.LastCommit); err != nil { + return + } else if ok { logger.Debug("picked rs.LastCommit to send", "height", prs.Height) continue OUTER_LOOP } @@ -782,7 +806,9 @@ OUTER_LOOP: // Load the block commit for prs.Height, which contains precommit // signatures for prs.Height. if commit := r.state.blockStore.LoadBlockCommit(prs.Height); commit != nil { - if r.pickSendVote(ctx, ps, commit) { + if ok, err := r.pickSendVote(ctx, ps, commit); err != nil { + return + } else if ok { logger.Debug("picked Catchup commit to send", "height", prs.Height) continue OUTER_LOOP } @@ -844,7 +870,7 @@ OUTER_LOOP: if rs.Height == prs.Height { if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok { - r.stateCh.Out <- p2p.Envelope{ + if err := r.stateCh.Send(ctx, p2p.Envelope{ To: ps.peerID, Message: &tmcons.VoteSetMaj23{ Height: prs.Height, @@ -852,6 +878,8 @@ OUTER_LOOP: Type: tmproto.PrevoteType, BlockID: maj23.ToProto(), }, + }); err != nil { + return } timer.Reset(r.state.config.PeerQueryMaj23SleepDuration) @@ -871,7 +899,7 @@ OUTER_LOOP: if rs.Height == prs.Height { if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok { - r.stateCh.Out <- p2p.Envelope{ + if err := r.stateCh.Send(ctx, p2p.Envelope{ To: ps.peerID, Message: &tmcons.VoteSetMaj23{ Height: prs.Height, @@ -879,6 +907,8 @@ OUTER_LOOP: Type: tmproto.PrecommitType, BlockID: maj23.ToProto(), }, + }); err != nil { + return } select { @@ -898,7 +928,7 @@ OUTER_LOOP: if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 { if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok { - r.stateCh.Out <- p2p.Envelope{ + if err := r.stateCh.Send(ctx, p2p.Envelope{ To: ps.peerID, Message: &tmcons.VoteSetMaj23{ Height: prs.Height, @@ -906,6 +936,8 @@ OUTER_LOOP: Type: tmproto.PrevoteType, BlockID: maj23.ToProto(), }, + }); err != nil { + return } timer.Reset(r.state.config.PeerQueryMaj23SleepDuration) @@ -928,7 +960,7 @@ OUTER_LOOP: if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= r.state.blockStore.Height() && prs.Height >= r.state.blockStore.Base() { if commit := r.state.LoadCommit(prs.Height); commit != nil { - r.stateCh.Out <- p2p.Envelope{ + if err := r.stateCh.Send(ctx, p2p.Envelope{ To: ps.peerID, Message: &tmcons.VoteSetMaj23{ Height: prs.Height, @@ -936,6 +968,8 @@ OUTER_LOOP: Type: tmproto.PrecommitType, BlockID: commit.BlockID.ToProto(), }, + }); err != nil { + return } timer.Reset(r.state.config.PeerQueryMaj23SleepDuration) @@ -1006,7 +1040,7 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda // Send our state to the peer. If we're block-syncing, broadcast a // RoundStepMessage later upon SwitchToConsensus(). if !r.waitSync { - go r.sendNewRoundStepMessage(ctx, ps.peerID) + go func() { _ = r.sendNewRoundStepMessage(ctx, ps.peerID) }() } } @@ -1036,7 +1070,7 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda // If we fail to find the peer state for the envelope sender, we perform a no-op // and return. This can happen when we process the envelope after the peer is // removed. -func (r *Reactor) handleStateMessage(envelope p2p.Envelope, msgI Message) error { +func (r *Reactor) handleStateMessage(ctx context.Context, envelope p2p.Envelope, msgI Message) error { ps, ok := r.GetPeerState(envelope.From) if !ok || ps == nil { r.logger.Debug("failed to find peer state", "peer", envelope.From, "ch_id", "StateChannel") @@ -1104,9 +1138,11 @@ func (r *Reactor) handleStateMessage(envelope p2p.Envelope, msgI Message) error eMsg.Votes = *votesProto } - r.voteSetBitsCh.Out <- p2p.Envelope{ + if err := r.voteSetBitsCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: eMsg, + }); err != nil { + return err } default: @@ -1120,7 +1156,7 @@ func (r *Reactor) handleStateMessage(envelope p2p.Envelope, msgI Message) error // fail to find the peer state for the envelope sender, we perform a no-op and // return. This can happen when we process the envelope after the peer is // removed. -func (r *Reactor) handleDataMessage(envelope p2p.Envelope, msgI Message) error { +func (r *Reactor) handleDataMessage(ctx context.Context, envelope p2p.Envelope, msgI Message) error { logger := r.logger.With("peer", envelope.From, "ch_id", "DataChannel") ps, ok := r.GetPeerState(envelope.From) @@ -1139,17 +1175,24 @@ func (r *Reactor) handleDataMessage(envelope p2p.Envelope, msgI Message) error { pMsg := msgI.(*ProposalMessage) ps.SetHasProposal(pMsg.Proposal) - r.state.peerMsgQueue <- msgInfo{pMsg, envelope.From} - + select { + case <-ctx.Done(): + return ctx.Err() + case r.state.peerMsgQueue <- msgInfo{pMsg, envelope.From}: + } case *tmcons.ProposalPOL: ps.ApplyProposalPOLMessage(msgI.(*ProposalPOLMessage)) - case *tmcons.BlockPart: bpMsg := msgI.(*BlockPartMessage) ps.SetHasProposalBlockPart(bpMsg.Height, bpMsg.Round, int(bpMsg.Part.Index)) r.Metrics.BlockParts.With("peer_id", string(envelope.From)).Add(1) - r.state.peerMsgQueue <- msgInfo{bpMsg, envelope.From} + select { + case r.state.peerMsgQueue <- msgInfo{bpMsg, envelope.From}: + return nil + case <-ctx.Done(): + return ctx.Err() + } default: return fmt.Errorf("received unknown message on DataChannel: %T", msg) @@ -1162,7 +1205,7 @@ func (r *Reactor) handleDataMessage(envelope p2p.Envelope, msgI Message) error { // fail to find the peer state for the envelope sender, we perform a no-op and // return. This can happen when we process the envelope after the peer is // removed. -func (r *Reactor) handleVoteMessage(envelope p2p.Envelope, msgI Message) error { +func (r *Reactor) handleVoteMessage(ctx context.Context, envelope p2p.Envelope, msgI Message) error { logger := r.logger.With("peer", envelope.From, "ch_id", "VoteChannel") ps, ok := r.GetPeerState(envelope.From) @@ -1188,20 +1231,22 @@ func (r *Reactor) handleVoteMessage(envelope p2p.Envelope, msgI Message) error { ps.EnsureVoteBitArrays(height-1, lastCommitSize) ps.SetHasVote(vMsg.Vote) - r.state.peerMsgQueue <- msgInfo{vMsg, envelope.From} - + select { + case r.state.peerMsgQueue <- msgInfo{vMsg, envelope.From}: + return nil + case <-ctx.Done(): + return ctx.Err() + } default: return fmt.Errorf("received unknown message on VoteChannel: %T", msg) } - - return nil } // handleVoteSetBitsMessage handles envelopes sent from peers on the // VoteSetBitsChannel. If we fail to find the peer state for the envelope sender, // we perform a no-op and return. This can happen when we process the envelope // after the peer is removed. -func (r *Reactor) handleVoteSetBitsMessage(envelope p2p.Envelope, msgI Message) error { +func (r *Reactor) handleVoteSetBitsMessage(ctx context.Context, envelope p2p.Envelope, msgI Message) error { logger := r.logger.With("peer", envelope.From, "ch_id", "VoteSetBitsChannel") ps, ok := r.GetPeerState(envelope.From) @@ -1259,7 +1304,7 @@ func (r *Reactor) handleVoteSetBitsMessage(envelope p2p.Envelope, msgI Message) // the p2p channel. // // NOTE: We block on consensus state for proposals, block parts, and votes. -func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope p2p.Envelope) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) @@ -1290,16 +1335,16 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err switch chID { case StateChannel: - err = r.handleStateMessage(envelope, msgI) + err = r.handleStateMessage(ctx, envelope, msgI) case DataChannel: - err = r.handleDataMessage(envelope, msgI) + err = r.handleDataMessage(ctx, envelope, msgI) case VoteChannel: - err = r.handleVoteMessage(envelope, msgI) + err = r.handleVoteMessage(ctx, envelope, msgI) case VoteSetBitsChannel: - err = r.handleVoteSetBitsMessage(envelope, msgI) + err = r.handleVoteSetBitsMessage(ctx, envelope, msgI) default: err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) @@ -1320,7 +1365,7 @@ func (r *Reactor) processStateCh(ctx context.Context) { r.logger.Debug("stopped listening on StateChannel; closing...") return case envelope := <-r.stateCh.In: - if err := r.handleMessage(r.stateCh.ID, envelope); err != nil { + if err := r.handleMessage(ctx, r.stateCh.ID, envelope); err != nil { r.logger.Error("failed to process message", "ch_id", r.stateCh.ID, "envelope", envelope, "err", err) if serr := r.stateCh.SendError(ctx, p2p.PeerError{ NodeID: envelope.From, @@ -1345,7 +1390,7 @@ func (r *Reactor) processDataCh(ctx context.Context) { r.logger.Debug("stopped listening on DataChannel; closing...") return case envelope := <-r.dataCh.In: - if err := r.handleMessage(r.dataCh.ID, envelope); err != nil { + if err := r.handleMessage(ctx, r.dataCh.ID, envelope); err != nil { r.logger.Error("failed to process message", "ch_id", r.dataCh.ID, "envelope", envelope, "err", err) if serr := r.dataCh.SendError(ctx, p2p.PeerError{ NodeID: envelope.From, @@ -1370,7 +1415,7 @@ func (r *Reactor) processVoteCh(ctx context.Context) { r.logger.Debug("stopped listening on VoteChannel; closing...") return case envelope := <-r.voteCh.In: - if err := r.handleMessage(r.voteCh.ID, envelope); err != nil { + if err := r.handleMessage(ctx, r.voteCh.ID, envelope); err != nil { r.logger.Error("failed to process message", "ch_id", r.voteCh.ID, "envelope", envelope, "err", err) if serr := r.voteCh.SendError(ctx, p2p.PeerError{ NodeID: envelope.From, @@ -1395,7 +1440,11 @@ func (r *Reactor) processVoteSetBitsCh(ctx context.Context) { r.logger.Debug("stopped listening on VoteSetBitsChannel; closing...") return case envelope := <-r.voteSetBitsCh.In: - if err := r.handleMessage(r.voteSetBitsCh.ID, envelope); err != nil { + if err := r.handleMessage(ctx, r.voteSetBitsCh.ID, envelope); err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return + } + r.logger.Error("failed to process message", "ch_id", r.voteSetBitsCh.ID, "envelope", envelope, "err", err) if serr := r.voteSetBitsCh.SendError(ctx, p2p.PeerError{ NodeID: envelope.From, diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index 036614b71..56a4924cd 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -391,7 +391,7 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite { proposal.Signature = p.Signature // set the proposal block - if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } ensureNewProposal(proposalCh, height, round) @@ -423,7 +423,7 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite { proposal.Signature = p.Signature // set the proposal block - if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } ensureNewProposal(proposalCh, height, round) @@ -482,7 +482,7 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite { proposal.Signature = p.Signature // set the proposal block - if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } ensureNewProposal(proposalCh, height, round) @@ -545,7 +545,7 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite { proposal.Signature = p.Signature // set the proposal block - if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } ensureNewProposal(proposalCh, height, round) diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 02ab2ae54..051b7afba 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -511,58 +511,85 @@ func (cs *State) OpenWAL(ctx context.Context, walFile string) (WAL, error) { // TODO: should these return anything or let callers just use events? // AddVote inputs a vote. -func (cs *State) AddVote(vote *types.Vote, peerID types.NodeID) (added bool, err error) { +func (cs *State) AddVote(ctx context.Context, vote *types.Vote, peerID types.NodeID) error { if peerID == "" { - cs.internalMsgQueue <- msgInfo{&VoteMessage{vote}, ""} + select { + case <-ctx.Done(): + return ctx.Err() + case cs.internalMsgQueue <- msgInfo{&VoteMessage{vote}, ""}: + return nil + } } else { - cs.peerMsgQueue <- msgInfo{&VoteMessage{vote}, peerID} + select { + case <-ctx.Done(): + return ctx.Err() + case cs.peerMsgQueue <- msgInfo{&VoteMessage{vote}, peerID}: + return nil + } } // TODO: wait for event?! - return false, nil } // SetProposal inputs a proposal. -func (cs *State) SetProposal(proposal *types.Proposal, peerID types.NodeID) error { +func (cs *State) SetProposal(ctx context.Context, proposal *types.Proposal, peerID types.NodeID) error { if peerID == "" { - cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, ""} + select { + case <-ctx.Done(): + return ctx.Err() + case cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, ""}: + return nil + } } else { - cs.peerMsgQueue <- msgInfo{&ProposalMessage{proposal}, peerID} + select { + case <-ctx.Done(): + return ctx.Err() + case cs.peerMsgQueue <- msgInfo{&ProposalMessage{proposal}, peerID}: + return nil + } } // TODO: wait for event?! - return nil } // AddProposalBlockPart inputs a part of the proposal block. -func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Part, peerID types.NodeID) error { - +func (cs *State) AddProposalBlockPart(ctx context.Context, height int64, round int32, part *types.Part, peerID types.NodeID) error { if peerID == "" { - cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""} + select { + case <-ctx.Done(): + return ctx.Err() + case cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""}: + return nil + } } else { - cs.peerMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, peerID} + select { + case <-ctx.Done(): + return ctx.Err() + case cs.peerMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, peerID}: + return nil + } } // TODO: wait for event?! - return nil } // SetProposalAndBlock inputs the proposal and all block parts. func (cs *State) SetProposalAndBlock( + ctx context.Context, proposal *types.Proposal, block *types.Block, parts *types.PartSet, peerID types.NodeID, ) error { - if err := cs.SetProposal(proposal, peerID); err != nil { + if err := cs.SetProposal(ctx, proposal, peerID); err != nil { return err } for i := 0; i < int(parts.Total()); i++ { part := parts.GetPart(i) - if err := cs.AddProposalBlockPart(proposal.Height, proposal.Round, part, peerID); err != nil { + if err := cs.AddProposalBlockPart(ctx, proposal.Height, proposal.Round, part, peerID); err != nil { return err } } @@ -761,7 +788,7 @@ func (cs *State) newStep(ctx context.Context) { cs.logger.Error("failed publishing new round step", "err", err) } - cs.evsw.FireEvent(types.EventNewRoundStepValue, &cs.RoundState) + cs.evsw.FireEvent(ctx, types.EventNewRoundStepValue, &cs.RoundState) } } @@ -1607,7 +1634,7 @@ func (cs *State) enterCommit(ctx context.Context, height int64, commitRound int3 logger.Error("failed publishing valid block", "err", err) } - cs.evsw.FireEvent(types.EventValidBlockValue, &cs.RoundState) + cs.evsw.FireEvent(ctx, types.EventValidBlockValue, &cs.RoundState) } } } @@ -2075,7 +2102,7 @@ func (cs *State) addVote( return added, err } - cs.evsw.FireEvent(types.EventVoteValue, vote) + cs.evsw.FireEvent(ctx, types.EventVoteValue, vote) // if we can skip timeoutCommit and have all the votes now, if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() { @@ -2104,7 +2131,7 @@ func (cs *State) addVote( if err := cs.eventBus.PublishEventVote(ctx, types.EventDataVote{Vote: vote}); err != nil { return added, err } - cs.evsw.FireEvent(types.EventVoteValue, vote) + cs.evsw.FireEvent(ctx, types.EventVoteValue, vote) switch vote.Type { case tmproto.PrevoteType: @@ -2158,7 +2185,7 @@ func (cs *State) addVote( cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader) } - cs.evsw.FireEvent(types.EventValidBlockValue, &cs.RoundState) + cs.evsw.FireEvent(ctx, types.EventValidBlockValue, &cs.RoundState) if err := cs.eventBus.PublishEventValidBlock(ctx, cs.RoundStateEvent()); err != nil { return added, err } diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index 5d09908aa..387650704 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -251,7 +251,7 @@ func TestStateBadProposal(t *testing.T) { proposal.Signature = p.Signature // set the proposal block - if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + if err := cs1.SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } @@ -314,7 +314,7 @@ func TestStateOversizedBlock(t *testing.T) { totalBytes += len(part.Bytes) } - if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + if err := cs1.SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } @@ -621,7 +621,7 @@ func TestStateLockNoPOL(t *testing.T) { // now we're on a new round and not the proposer // so set the proposal block - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlock.MakePartSet(partSize), ""); err != nil { + if err := cs1.SetProposalAndBlock(ctx, prop, propBlock, propBlock.MakePartSet(partSize), ""); err != nil { t.Fatal(err) } @@ -723,7 +723,7 @@ func TestStateLockPOLRelock(t *testing.T) { round++ // moving to the next round //XXX: this isnt guaranteed to get there before the timeoutPropose ... - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { + if err := cs1.SetProposalAndBlock(ctx, prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } @@ -828,7 +828,7 @@ func TestStateLockPOLUnlock(t *testing.T) { cs1 unlocks! */ //XXX: this isnt guaranteed to get there before the timeoutPropose ... - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { + if err := cs1.SetProposalAndBlock(ctx, prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } @@ -940,7 +940,7 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { // we should have unlocked and locked on the new block, sending a precommit for this new block validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) - if err := cs1.SetProposalAndBlock(prop, propBlock, secondBlockParts, "some peer"); err != nil { + if err := cs1.SetProposalAndBlock(ctx, prop, propBlock, secondBlockParts, "some peer"); err != nil { t.Fatal(err) } @@ -971,7 +971,7 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { Round2 (vs3, C) // C C C C // C nil nil nil) */ - if err := cs1.SetProposalAndBlock(prop, propBlock, thirdPropBlockParts, "some peer"); err != nil { + if err := cs1.SetProposalAndBlock(ctx, prop, propBlock, thirdPropBlockParts, "some peer"); err != nil { t.Fatal(err) } @@ -1048,7 +1048,7 @@ func TestStateLockPOLSafety1(t *testing.T) { ensureNewRound(newRoundCh, height, round) //XXX: this isnt guaranteed to get there before the timeoutPropose ... - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { + if err := cs1.SetProposalAndBlock(ctx, prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } /*Round2 @@ -1160,7 +1160,7 @@ func TestStateLockPOLSafety2(t *testing.T) { startTestRound(ctx, cs1, height, round) ensureNewRound(newRoundCh, height, round) - if err := cs1.SetProposalAndBlock(prop1, propBlock1, propBlockParts1, "some peer"); err != nil { + if err := cs1.SetProposalAndBlock(ctx, prop1, propBlock1, propBlockParts1, "some peer"); err != nil { t.Fatal(err) } ensureNewProposal(proposalCh, height, round) @@ -1193,7 +1193,7 @@ func TestStateLockPOLSafety2(t *testing.T) { newProp.Signature = p.Signature - if err := cs1.SetProposalAndBlock(newProp, propBlock0, propBlockParts0, "some peer"); err != nil { + if err := cs1.SetProposalAndBlock(ctx, newProp, propBlock0, propBlockParts0, "some peer"); err != nil { t.Fatal(err) } @@ -1428,7 +1428,7 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) { ensurePrecommit(voteCh, height, round) validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { + if err := cs1.SetProposalAndBlock(ctx, prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } @@ -1658,7 +1658,7 @@ func TestCommitFromPreviousRound(t *testing.T) { assert.True(t, rs.ProposalBlock == nil) assert.True(t, rs.ProposalBlockParts.Header().Equals(propBlockParts.Header())) - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { + if err := cs1.SetProposalAndBlock(ctx, prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } @@ -1797,7 +1797,7 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { prop, propBlock := decideProposal(ctx, cs1, vs2, height+1, 0) propBlockParts := propBlock.MakePartSet(partSize) - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { + if err := cs1.SetProposalAndBlock(ctx, prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } ensureNewProposal(proposalCh, height+1, 0) diff --git a/internal/evidence/reactor.go b/internal/evidence/reactor.go index 29712581c..7302773ae 100644 --- a/internal/evidence/reactor.go +++ b/internal/evidence/reactor.go @@ -319,15 +319,13 @@ func (r *Reactor) broadcastEvidenceLoop(ctx context.Context, peerID types.NodeID // and thus would not be able to process the evidence correctly. Also, the // peer may receive this piece of evidence multiple times if it added and // removed frequently from the broadcasting peer. - select { - case <-ctx.Done(): - return - case r.evidenceCh.Out <- p2p.Envelope{ + if err := r.evidenceCh.Send(ctx, p2p.Envelope{ To: peerID, Message: &tmproto.EvidenceList{ Evidence: []tmproto.Evidence{*evProto}, }, - }: + }); err != nil { + return } r.logger.Debug("gossiped evidence to peer", "evidence", ev, "peer", peerID) diff --git a/internal/evidence/reactor_test.go b/internal/evidence/reactor_test.go index df636ba66..156d47c6f 100644 --- a/internal/evidence/reactor_test.go +++ b/internal/evidence/reactor_test.go @@ -108,8 +108,8 @@ func setup(ctx context.Context, t *testing.T, stateStores []sm.Store, chBuf uint } } - leaktest.Check(t) }) + t.Cleanup(leaktest.Check(t)) return rts } @@ -191,21 +191,6 @@ func (rts *reactorTestSuite) waitForEvidence(t *testing.T, evList types.Evidence wg.Wait() } -func (rts *reactorTestSuite) assertEvidenceChannelsEmpty(t *testing.T) { - t.Helper() - - for id, r := range rts.reactors { - require.NoError(t, r.Stop(), "stopping reactor #%s", id) - r.Wait() - require.False(t, r.IsRunning(), "reactor #%d did not stop", id) - - } - - for id, ech := range rts.evidenceChannels { - require.Empty(t, ech.Out, "checking channel #%q", id) - } -} - func createEvidenceList( t *testing.T, pool *evidence.Pool, @@ -325,8 +310,6 @@ func TestReactorBroadcastEvidence(t *testing.T) { for _, pool := range rts.pools { require.Equal(t, numEvidence, int(pool.Size())) } - - rts.assertEvidenceChannelsEmpty(t) } // TestReactorSelectiveBroadcast tests a context where we have two reactors @@ -367,8 +350,6 @@ func TestReactorBroadcastEvidence_Lagging(t *testing.T) { require.Equal(t, numEvidence, int(rts.pools[primary.NodeID].Size())) require.Equal(t, int(height2), int(rts.pools[secondary.NodeID].Size())) - - rts.assertEvidenceChannelsEmpty(t) } func TestReactorBroadcastEvidence_Pending(t *testing.T) { diff --git a/internal/mempool/reactor.go b/internal/mempool/reactor.go index 19d857614..2e1a94f01 100644 --- a/internal/mempool/reactor.go +++ b/internal/mempool/reactor.go @@ -349,15 +349,15 @@ func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID, c if ok := r.mempool.txStore.TxHasPeer(memTx.hash, peerMempoolID); !ok { // Send the mempool tx to the corresponding peer. Note, the peer may be // behind and thus would not be able to process the mempool tx correctly. - select { - case r.mempoolCh.Out <- p2p.Envelope{ + if err := r.mempoolCh.Send(ctx, p2p.Envelope{ To: peerID, Message: &protomem.Txs{ Txs: [][]byte{memTx.tx}, }, - }: - case <-ctx.Done(): + }); err != nil { + return } + r.logger.Debug( "gossiped tx to peer", "tx", fmt.Sprintf("%X", memTx.tx.Hash()), diff --git a/internal/mempool/reactor_test.go b/internal/mempool/reactor_test.go index f75809744..e3f0b5718 100644 --- a/internal/mempool/reactor_test.go +++ b/internal/mempool/reactor_test.go @@ -109,14 +109,6 @@ func (rts *reactorTestSuite) start(ctx context.Context, t *testing.T) { "network does not have expected number of nodes") } -func (rts *reactorTestSuite) assertMempoolChannelsDrained(t *testing.T) { - t.Helper() - - for _, mch := range rts.mempoolChannels { - require.Empty(t, mch.Out, "checking channel %q (len=%d)", mch.ID, len(mch.Out)) - } -} - func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs []types.Tx, ids ...types.NodeID) { t.Helper() @@ -296,8 +288,6 @@ func TestReactorNoBroadcastToSender(t *testing.T) { require.Eventually(t, func() bool { return rts.mempools[secondary].Size() == 0 }, time.Minute, 100*time.Millisecond) - - rts.assertMempoolChannelsDrained(t) } func TestReactor_MaxTxBytes(t *testing.T) { @@ -334,8 +324,6 @@ func TestReactor_MaxTxBytes(t *testing.T) { tx2 := tmrand.Bytes(cfg.Mempool.MaxTxBytes + 1) err = rts.mempools[primary].CheckTx(ctx, tx2, nil, TxInfo{SenderID: UnknownPeerID}) require.Error(t, err) - - rts.assertMempoolChannelsDrained(t) } func TestDontExhaustMaxActiveIDs(t *testing.T) { @@ -359,30 +347,13 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) { NodeID: peerID, } - rts.mempoolChannels[nodeID].Out <- p2p.Envelope{ + require.NoError(t, rts.mempoolChannels[nodeID].Send(ctx, p2p.Envelope{ To: peerID, Message: &protomem.Txs{ Txs: [][]byte{}, }, - } + })) } - - require.Eventually( - t, - func() bool { - for _, mch := range rts.mempoolChannels { - if len(mch.Out) > 0 { - return false - } - } - - return true - }, - time.Minute, - 10*time.Millisecond, - ) - - rts.assertMempoolChannelsDrained(t) } func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) { diff --git a/internal/p2p/channel.go b/internal/p2p/channel.go index 9296ca15e..da6955596 100644 --- a/internal/p2p/channel.go +++ b/internal/p2p/channel.go @@ -63,7 +63,7 @@ func (pe PeerError) Unwrap() error { return pe.Err } type Channel struct { ID ChannelID In <-chan Envelope // inbound messages (peers to reactors) - Out chan<- Envelope // outbound messages (reactors to peers) + outCh chan<- Envelope // outbound messages (reactors to peers) errCh chan<- PeerError // peer error reporting messageType proto.Message // the channel's message type, used for unmarshaling @@ -82,7 +82,7 @@ func NewChannel( ID: id, messageType: messageType, In: inCh, - Out: outCh, + outCh: outCh, errCh: errCh, } } @@ -93,7 +93,7 @@ func (ch *Channel) Send(ctx context.Context, envelope Envelope) error { select { case <-ctx.Done(): return ctx.Err() - case ch.Out <- envelope: + case ch.outCh <- envelope: return nil } } diff --git a/internal/p2p/channel_test.go b/internal/p2p/channel_test.go index 4b2ce5937..525eb18fb 100644 --- a/internal/p2p/channel_test.go +++ b/internal/p2p/channel_test.go @@ -24,7 +24,7 @@ func testChannel(size int) (*channelInternal, *Channel) { } ch := &Channel{ In: in.In, - Out: in.Out, + outCh: in.Out, errCh: in.Error, } return in, ch diff --git a/internal/p2p/p2ptest/require.go b/internal/p2p/p2ptest/require.go index b55d6a51f..22a1d2a81 100644 --- a/internal/p2p/p2ptest/require.go +++ b/internal/p2p/p2ptest/require.go @@ -64,26 +64,30 @@ func RequireReceiveUnordered(t *testing.T, channel *p2p.Channel, expect []p2p.En } // RequireSend requires that the given envelope is sent on the channel. -func RequireSend(t *testing.T, channel *p2p.Channel, envelope p2p.Envelope) { - timer := time.NewTimer(time.Second) // not time.After due to goroutine leaks - defer timer.Stop() - select { - case channel.Out <- envelope: - case <-timer.C: - require.Fail(t, "timed out sending message", "%v on channel %v", envelope, channel.ID) +func RequireSend(ctx context.Context, t *testing.T, channel *p2p.Channel, envelope p2p.Envelope) { + tctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + + err := channel.Send(tctx, envelope) + switch { + case errors.Is(err, context.DeadlineExceeded): + require.Fail(t, "timed out sending message to %q", envelope.To) + default: + require.NoError(t, err, "unexpected error") } } // RequireSendReceive requires that a given Protobuf message is sent to the // given peer, and then that the given response is received back. func RequireSendReceive( + ctx context.Context, t *testing.T, channel *p2p.Channel, peerID types.NodeID, send proto.Message, receive proto.Message, ) { - RequireSend(t, channel, p2p.Envelope{To: peerID, Message: send}) + RequireSend(ctx, t, channel, p2p.Envelope{To: peerID, Message: send}) RequireReceive(t, channel, p2p.Envelope{From: peerID, Message: send}) } diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index 24aeec05f..f5eb2ab7f 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -165,12 +165,12 @@ func (r *Reactor) processPexCh(ctx context.Context) { // outbound requests for new peers case <-timer.C: - r.sendRequestForPeers() + r.sendRequestForPeers(ctx) // inbound requests for new peers or responses to requests sent by this // reactor case envelope := <-r.pexCh.In: - if err := r.handleMessage(r.pexCh.ID, envelope); err != nil { + if err := r.handleMessage(ctx, r.pexCh.ID, envelope); err != nil { r.logger.Error("failed to process message", "ch_id", r.pexCh.ID, "envelope", envelope, "err", err) if serr := r.pexCh.SendError(ctx, p2p.PeerError{ NodeID: envelope.From, @@ -199,7 +199,7 @@ func (r *Reactor) processPeerUpdates(ctx context.Context) { } // handlePexMessage handles envelopes sent from peers on the PexChannel. -func (r *Reactor) handlePexMessage(envelope p2p.Envelope) error { +func (r *Reactor) handlePexMessage(ctx context.Context, envelope p2p.Envelope) error { logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { @@ -219,9 +219,11 @@ func (r *Reactor) handlePexMessage(envelope p2p.Envelope) error { URL: addr.String(), } } - r.pexCh.Out <- p2p.Envelope{ + if err := r.pexCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &protop2p.PexResponse{Addresses: pexAddresses}, + }); err != nil { + return err } case *protop2p.PexResponse: @@ -264,7 +266,7 @@ func (r *Reactor) handlePexMessage(envelope p2p.Envelope) error { // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope p2p.Envelope) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) @@ -280,7 +282,7 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err switch chID { case p2p.ChannelID(PexChannel): - err = r.handlePexMessage(envelope) + err = r.handlePexMessage(ctx, envelope) default: err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) @@ -312,7 +314,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { // peer a request for more peer addresses. The function then moves the // peer into the requestsSent bucket and calculates when the next request // time should be -func (r *Reactor) sendRequestForPeers() { +func (r *Reactor) sendRequestForPeers(ctx context.Context) { r.mtx.Lock() defer r.mtx.Unlock() if len(r.availablePeers) == 0 { @@ -330,9 +332,11 @@ func (r *Reactor) sendRequestForPeers() { } // send out the pex request - r.pexCh.Out <- p2p.Envelope{ + if err := r.pexCh.Send(ctx, p2p.Envelope{ To: peerID, Message: &protop2p.PexRequest{}, + }); err != nil { + return } // remove the peer from the abvailable peers list and mark it in the requestsSent map diff --git a/internal/p2p/pex/reactor_test.go b/internal/p2p/pex/reactor_test.go index 28da5c72c..3f0adcf89 100644 --- a/internal/p2p/pex/reactor_test.go +++ b/internal/p2p/pex/reactor_test.go @@ -45,7 +45,7 @@ func TestReactorBasic(t *testing.T) { // assert that when a mock node sends a request it receives a response (and // the correct one) - testNet.sendRequest(t, firstNode, secondNode) + testNet.sendRequest(ctx, t, firstNode, secondNode) testNet.listenForResponse(t, secondNode, firstNode, shortWait, []p2pproto.PexAddress(nil)) } @@ -112,8 +112,8 @@ func TestReactorSendsResponseWithoutRequest(t *testing.T) { // firstNode sends the secondNode an unrequested response // NOTE: secondNode will send a request by default during startup so we send // two responses to counter that. - testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode}) - testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode}) + testNet.sendResponse(ctx, t, firstNode, secondNode, []int{thirdNode}) + testNet.sendResponse(ctx, t, firstNode, secondNode, []int{thirdNode}) // secondNode should evict the firstNode testNet.listenForPeerUpdate(ctx, t, secondNode, firstNode, p2p.PeerStatusDown, shortWait) @@ -139,7 +139,7 @@ func TestReactorNeverSendsTooManyPeers(t *testing.T) { // first we check that even although we have 110 peers, honest pex reactors // only send 100 (test if secondNode sends firstNode 100 addresses) - testNet.pingAndlistenForNAddresses(t, secondNode, firstNode, shortWait, 100) + testNet.pingAndlistenForNAddresses(ctx, t, secondNode, firstNode, shortWait, 100) } func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) { @@ -475,11 +475,13 @@ func (r *reactorTestSuite) listenForRequest(t *testing.T, fromNode, toNode int, } func (r *reactorTestSuite) pingAndlistenForNAddresses( + ctx context.Context, t *testing.T, fromNode, toNode int, waitPeriod time.Duration, addresses int, ) { + t.Helper() r.logger.Info("Listening for addresses", "from", fromNode, "to", toNode) to, from := r.checkNodePair(t, toNode, fromNode) conditional := func(msg p2p.Envelope) bool { @@ -499,10 +501,10 @@ func (r *reactorTestSuite) pingAndlistenForNAddresses( // if we didn't get the right length, we wait and send the // request again time.Sleep(300 * time.Millisecond) - r.sendRequest(t, toNode, fromNode) + r.sendRequest(ctx, t, toNode, fromNode) return false } - r.sendRequest(t, toNode, fromNode) + r.sendRequest(ctx, t, toNode, fromNode) r.listenFor(t, to, conditional, assertion, waitPeriod) } @@ -566,27 +568,30 @@ func (r *reactorTestSuite) getAddressesFor(nodes []int) []p2pproto.PexAddress { return addresses } -func (r *reactorTestSuite) sendRequest(t *testing.T, fromNode, toNode int) { +func (r *reactorTestSuite) sendRequest(ctx context.Context, t *testing.T, fromNode, toNode int) { + t.Helper() to, from := r.checkNodePair(t, toNode, fromNode) - r.pexChannels[from].Out <- p2p.Envelope{ + require.NoError(t, r.pexChannels[from].Send(ctx, p2p.Envelope{ To: to, Message: &p2pproto.PexRequest{}, - } + })) } func (r *reactorTestSuite) sendResponse( + ctx context.Context, t *testing.T, fromNode, toNode int, withNodes []int, ) { + t.Helper() from, to := r.checkNodePair(t, fromNode, toNode) addrs := r.getAddressesFor(withNodes) - r.pexChannels[from].Out <- p2p.Envelope{ + require.NoError(t, r.pexChannels[from].Send(ctx, p2p.Envelope{ To: to, Message: &p2pproto.PexResponse{ Addresses: addrs, }, - } + })) } func (r *reactorTestSuite) requireNumberOfPeers( diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index 2974c1e88..a6d5fdc03 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -32,11 +32,12 @@ func echoReactor(ctx context.Context, channel *p2p.Channel) { select { case envelope := <-channel.In: value := envelope.Message.(*p2ptest.Message).Value - channel.Out <- p2p.Envelope{ + if err := channel.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &p2ptest.Message{Value: value}, + }); err != nil { + return } - case <-ctx.Done(): return } @@ -64,14 +65,14 @@ func TestRouter_Network(t *testing.T) { // Sending a message to each peer should work. for _, peer := range peers { - p2ptest.RequireSendReceive(t, channel, peer.NodeID, + p2ptest.RequireSendReceive(ctx, t, channel, peer.NodeID, &p2ptest.Message{Value: "foo"}, &p2ptest.Message{Value: "foo"}, ) } // Sending a broadcast should return back a message from all peers. - p2ptest.RequireSend(t, channel, p2p.Envelope{ + p2ptest.RequireSend(ctx, t, channel, p2p.Envelope{ Broadcast: true, Message: &p2ptest.Message{Value: "bar"}, }) @@ -151,13 +152,13 @@ func TestRouter_Channel_Basic(t *testing.T) { require.NoError(t, err) // We should be able to send on the channel, even though there are no peers. - p2ptest.RequireSend(t, channel, p2p.Envelope{ + p2ptest.RequireSend(ctx, t, channel, p2p.Envelope{ To: types.NodeID(strings.Repeat("a", 40)), Message: &p2ptest.Message{Value: "foo"}, }) // A message to ourselves should be dropped. - p2ptest.RequireSend(t, channel, p2p.Envelope{ + p2ptest.RequireSend(ctx, t, channel, p2p.Envelope{ To: selfID, Message: &p2ptest.Message{Value: "self"}, }) @@ -184,40 +185,40 @@ func TestRouter_Channel_SendReceive(t *testing.T) { // Sending a message a->b should work, and not send anything // further to a, b, or c. - p2ptest.RequireSend(t, a, p2p.Envelope{To: bID, Message: &p2ptest.Message{Value: "foo"}}) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: &p2ptest.Message{Value: "foo"}}) p2ptest.RequireReceive(t, b, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "foo"}}) p2ptest.RequireEmpty(t, a, b, c) // Sending a nil message a->b should be dropped. - p2ptest.RequireSend(t, a, p2p.Envelope{To: bID, Message: nil}) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: nil}) p2ptest.RequireEmpty(t, a, b, c) // Sending a different message type should be dropped. - p2ptest.RequireSend(t, a, p2p.Envelope{To: bID, Message: &gogotypes.BoolValue{Value: true}}) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: &gogotypes.BoolValue{Value: true}}) p2ptest.RequireEmpty(t, a, b, c) // Sending to an unknown peer should be dropped. - p2ptest.RequireSend(t, a, p2p.Envelope{ + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{ To: types.NodeID(strings.Repeat("a", 40)), Message: &p2ptest.Message{Value: "a"}, }) p2ptest.RequireEmpty(t, a, b, c) // Sending without a recipient should be dropped. - p2ptest.RequireSend(t, a, p2p.Envelope{Message: &p2ptest.Message{Value: "noto"}}) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{Message: &p2ptest.Message{Value: "noto"}}) p2ptest.RequireEmpty(t, a, b, c) // Sending to self should be dropped. - p2ptest.RequireSend(t, a, p2p.Envelope{To: aID, Message: &p2ptest.Message{Value: "self"}}) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: aID, Message: &p2ptest.Message{Value: "self"}}) p2ptest.RequireEmpty(t, a, b, c) // Removing b and sending to it should be dropped. network.Remove(ctx, t, bID) - p2ptest.RequireSend(t, a, p2p.Envelope{To: bID, Message: &p2ptest.Message{Value: "nob"}}) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: &p2ptest.Message{Value: "nob"}}) p2ptest.RequireEmpty(t, a, b, c) // After all this, sending a message c->a should work. - p2ptest.RequireSend(t, c, p2p.Envelope{To: aID, Message: &p2ptest.Message{Value: "bar"}}) + p2ptest.RequireSend(ctx, t, c, p2p.Envelope{To: aID, Message: &p2ptest.Message{Value: "bar"}}) p2ptest.RequireReceive(t, a, p2p.Envelope{From: cID, Message: &p2ptest.Message{Value: "bar"}}) p2ptest.RequireEmpty(t, a, b, c) @@ -244,7 +245,7 @@ func TestRouter_Channel_Broadcast(t *testing.T) { network.Start(ctx, t) // Sending a broadcast from b should work. - p2ptest.RequireSend(t, b, p2p.Envelope{Broadcast: true, Message: &p2ptest.Message{Value: "foo"}}) + p2ptest.RequireSend(ctx, t, b, p2p.Envelope{Broadcast: true, Message: &p2ptest.Message{Value: "foo"}}) p2ptest.RequireReceive(t, a, p2p.Envelope{From: bID, Message: &p2ptest.Message{Value: "foo"}}) p2ptest.RequireReceive(t, c, p2p.Envelope{From: bID, Message: &p2ptest.Message{Value: "foo"}}) p2ptest.RequireReceive(t, d, p2p.Envelope{From: bID, Message: &p2ptest.Message{Value: "foo"}}) @@ -252,7 +253,7 @@ func TestRouter_Channel_Broadcast(t *testing.T) { // Removing one node from the network shouldn't prevent broadcasts from working. network.Remove(ctx, t, dID) - p2ptest.RequireSend(t, a, p2p.Envelope{Broadcast: true, Message: &p2ptest.Message{Value: "bar"}}) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{Broadcast: true, Message: &p2ptest.Message{Value: "bar"}}) p2ptest.RequireReceive(t, b, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "bar"}}) p2ptest.RequireReceive(t, c, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "bar"}}) p2ptest.RequireEmpty(t, a, b, c, d) @@ -285,16 +286,16 @@ func TestRouter_Channel_Wrapper(t *testing.T) { // Since wrapperMessage implements p2p.Wrapper and handles Message, it // should automatically wrap and unwrap sent messages -- we prepend the // wrapper actions to the message value to signal this. - p2ptest.RequireSend(t, a, p2p.Envelope{To: bID, Message: &p2ptest.Message{Value: "foo"}}) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: &p2ptest.Message{Value: "foo"}}) p2ptest.RequireReceive(t, b, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "unwrap:wrap:foo"}}) // If we send a different message that can't be wrapped, it should be dropped. - p2ptest.RequireSend(t, a, p2p.Envelope{To: bID, Message: &gogotypes.BoolValue{Value: true}}) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: &gogotypes.BoolValue{Value: true}}) p2ptest.RequireEmpty(t, b) // If we send the wrapper message itself, it should also be passed through // since WrapperMessage supports it, and should only be unwrapped at the receiver. - p2ptest.RequireSend(t, a, p2p.Envelope{ + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{ To: bID, Message: &wrapperMessage{Message: p2ptest.Message{Value: "foo"}}, }) @@ -960,10 +961,10 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) { channel, err := router.OpenChannel(ctx, chDesc) require.NoError(t, err) - channel.Out <- p2p.Envelope{ + require.NoError(t, channel.Send(ctx, p2p.Envelope{ To: peer.NodeID, Message: &p2ptest.Message{Value: "Hi"}, - } + })) require.NoError(t, router.Stop()) mockTransport.AssertExpectations(t) diff --git a/internal/statesync/dispatcher.go b/internal/statesync/dispatcher.go index 8620e6285..2e476c25d 100644 --- a/internal/statesync/dispatcher.go +++ b/internal/statesync/dispatcher.go @@ -26,16 +26,16 @@ var ( // NOTE: It is not the responsibility of the dispatcher to verify the light blocks. type Dispatcher struct { // the channel with which to send light block requests on - requestCh chan<- p2p.Envelope + requestCh *p2p.Channel mtx sync.Mutex // all pending calls that have been dispatched and are awaiting an answer calls map[types.NodeID]chan *types.LightBlock } -func NewDispatcher(requestCh chan<- p2p.Envelope) *Dispatcher { +func NewDispatcher(requestChannel *p2p.Channel) *Dispatcher { return &Dispatcher{ - requestCh: requestCh, + requestCh: requestChannel, calls: make(map[types.NodeID]chan *types.LightBlock), } } @@ -91,11 +91,14 @@ func (d *Dispatcher) dispatch(ctx context.Context, peer types.NodeID, height int d.calls[peer] = ch // send request - d.requestCh <- p2p.Envelope{ + if err := d.requestCh.Send(ctx, p2p.Envelope{ To: peer, Message: &ssproto.LightBlockRequest{ Height: uint64(height), }, + }); err != nil { + close(ch) + return ch, err } return ch, nil diff --git a/internal/statesync/dispatcher_test.go b/internal/statesync/dispatcher_test.go index e717dad12..7441327a8 100644 --- a/internal/statesync/dispatcher_test.go +++ b/internal/statesync/dispatcher_test.go @@ -18,16 +18,32 @@ import ( "github.com/tendermint/tendermint/types" ) +type channelInternal struct { + In chan p2p.Envelope + Out chan p2p.Envelope + Error chan p2p.PeerError +} + +func testChannel(size int) (*channelInternal, *p2p.Channel) { + in := &channelInternal{ + In: make(chan p2p.Envelope, size), + Out: make(chan p2p.Envelope, size), + Error: make(chan p2p.PeerError, size), + } + return in, p2p.NewChannel(0, nil, in.In, in.Out, in.Error) +} + func TestDispatcherBasic(t *testing.T) { t.Cleanup(leaktest.Check(t)) const numPeers = 5 - ch := make(chan p2p.Envelope, 100) - closeCh := make(chan struct{}) - defer close(closeCh) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + chans, ch := testChannel(100) d := NewDispatcher(ch) - go handleRequests(t, d, ch, closeCh) + go handleRequests(ctx, t, d, chans.Out) peers := createPeerSet(numPeers) wg := sync.WaitGroup{} @@ -52,19 +68,24 @@ func TestDispatcherBasic(t *testing.T) { func TestDispatcherReturnsNoBlock(t *testing.T) { t.Cleanup(leaktest.Check(t)) - ch := make(chan p2p.Envelope, 100) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + chans, ch := testChannel(100) + d := NewDispatcher(ch) - doneCh := make(chan struct{}) + peer := factory.NodeID("a") go func() { - <-ch + <-chans.Out require.NoError(t, d.Respond(nil, peer)) - close(doneCh) + cancel() }() - lb, err := d.LightBlock(context.Background(), 1, peer) - <-doneCh + lb, err := d.LightBlock(ctx, 1, peer) + <-ctx.Done() require.Nil(t, lb) require.Nil(t, err) @@ -72,11 +93,15 @@ func TestDispatcherReturnsNoBlock(t *testing.T) { func TestDispatcherTimeOutWaitingOnLightBlock(t *testing.T) { t.Cleanup(leaktest.Check(t)) - ch := make(chan p2p.Envelope, 100) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _, ch := testChannel(100) d := NewDispatcher(ch) peer := factory.NodeID("a") - ctx, cancelFunc := context.WithTimeout(context.Background(), 10*time.Millisecond) + ctx, cancelFunc := context.WithTimeout(ctx, 10*time.Millisecond) defer cancelFunc() lb, err := d.LightBlock(ctx, 1, peer) @@ -89,13 +114,15 @@ func TestDispatcherTimeOutWaitingOnLightBlock(t *testing.T) { func TestDispatcherProviders(t *testing.T) { t.Cleanup(leaktest.Check(t)) - ch := make(chan p2p.Envelope, 100) chainID := "test-chain" - closeCh := make(chan struct{}) - defer close(closeCh) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + chans, ch := testChannel(100) d := NewDispatcher(ch) - go handleRequests(t, d, ch, closeCh) + go handleRequests(ctx, t, d, chans.Out) peers := createPeerSet(5) providers := make([]*BlockProvider, len(peers)) @@ -270,7 +297,7 @@ func TestPeerListRemove(t *testing.T) { // handleRequests is a helper function usually run in a separate go routine to // imitate the expected responses of the reactor wired to the dispatcher -func handleRequests(t *testing.T, d *Dispatcher, ch chan p2p.Envelope, closeCh chan struct{}) { +func handleRequests(ctx context.Context, t *testing.T, d *Dispatcher, ch chan p2p.Envelope) { t.Helper() for { select { @@ -280,7 +307,7 @@ func handleRequests(t *testing.T, d *Dispatcher, ch chan p2p.Envelope, closeCh c resp := mockLBResp(t, peer, int64(height), time.Now()) block, _ := resp.block.ToProto() require.NoError(t, d.Respond(block, resp.peer)) - case <-closeCh: + case <-ctx.Done(): return } } diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 61e3dec08..09716fb23 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -195,7 +195,7 @@ func NewReactor( stateStore: stateStore, blockStore: blockStore, peers: newPeerList(), - dispatcher: NewDispatcher(blockCh.Out), + dispatcher: NewDispatcher(blockCh), providers: make(map[types.NodeID]*BlockProvider), metrics: ssMetrics, } @@ -256,8 +256,8 @@ func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { r.conn, r.connQuery, r.stateProvider, - r.snapshotCh.Out, - r.chunkCh.Out, + r.snapshotCh, + r.chunkCh, r.tempDir, r.metrics, ) @@ -270,17 +270,12 @@ func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { r.mtx.Unlock() }() - requestSnapshotsHook := func() { + requestSnapshotsHook := func() error { // request snapshots from all currently connected peers - msg := p2p.Envelope{ + return r.snapshotCh.Send(ctx, p2p.Envelope{ Broadcast: true, Message: &ssproto.SnapshotsRequest{}, - } - - select { - case <-ctx.Done(): - case r.snapshotCh.Out <- msg: - } + }) } state, commit, err := r.syncer.SyncAny(ctx, r.cfg.DiscoveryTime, requestSnapshotsHook) @@ -508,7 +503,7 @@ func (r *Reactor) backfill( // handleSnapshotMessage handles envelopes sent from peers on the // SnapshotChannel. It returns an error only if the Envelope.Message is unknown // for this channel. This should never be called outside of handleMessage. -func (r *Reactor) handleSnapshotMessage(envelope p2p.Envelope) error { +func (r *Reactor) handleSnapshotMessage(ctx context.Context, envelope p2p.Envelope) error { logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { @@ -526,7 +521,8 @@ func (r *Reactor) handleSnapshotMessage(envelope p2p.Envelope) error { "format", snapshot.Format, "peer", envelope.From, ) - r.snapshotCh.Out <- p2p.Envelope{ + + if err := r.snapshotCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &ssproto.SnapshotsResponse{ Height: snapshot.Height, @@ -535,6 +531,8 @@ func (r *Reactor) handleSnapshotMessage(envelope p2p.Envelope) error { Hash: snapshot.Hash, Metadata: snapshot.Metadata, }, + }); err != nil { + return err } } @@ -577,7 +575,7 @@ func (r *Reactor) handleSnapshotMessage(envelope p2p.Envelope) error { // handleChunkMessage handles envelopes sent from peers on the ChunkChannel. // It returns an error only if the Envelope.Message is unknown for this channel. // This should never be called outside of handleMessage. -func (r *Reactor) handleChunkMessage(envelope p2p.Envelope) error { +func (r *Reactor) handleChunkMessage(ctx context.Context, envelope p2p.Envelope) error { switch msg := envelope.Message.(type) { case *ssproto.ChunkRequest: r.logger.Debug( @@ -611,7 +609,7 @@ func (r *Reactor) handleChunkMessage(envelope p2p.Envelope) error { "chunk", msg.Index, "peer", envelope.From, ) - r.chunkCh.Out <- p2p.Envelope{ + if err := r.chunkCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &ssproto.ChunkResponse{ Height: msg.Height, @@ -620,6 +618,8 @@ func (r *Reactor) handleChunkMessage(envelope p2p.Envelope) error { Chunk: resp.Chunk, Missing: resp.Chunk == nil, }, + }); err != nil { + return err } case *ssproto.ChunkResponse: @@ -664,7 +664,7 @@ func (r *Reactor) handleChunkMessage(envelope p2p.Envelope) error { return nil } -func (r *Reactor) handleLightBlockMessage(envelope p2p.Envelope) error { +func (r *Reactor) handleLightBlockMessage(ctx context.Context, envelope p2p.Envelope) error { switch msg := envelope.Message.(type) { case *ssproto.LightBlockRequest: r.logger.Info("received light block request", "height", msg.Height) @@ -674,11 +674,13 @@ func (r *Reactor) handleLightBlockMessage(envelope p2p.Envelope) error { return err } if lb == nil { - r.blockCh.Out <- p2p.Envelope{ + if err := r.blockCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &ssproto.LightBlockResponse{ LightBlock: nil, }, + }); err != nil { + return err } return nil } @@ -691,13 +693,14 @@ func (r *Reactor) handleLightBlockMessage(envelope p2p.Envelope) error { // NOTE: If we don't have the light block we will send a nil light block // back to the requested node, indicating that we don't have it. - r.blockCh.Out <- p2p.Envelope{ + if err := r.blockCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &ssproto.LightBlockResponse{ LightBlock: lbproto, }, + }); err != nil { + return err } - case *ssproto.LightBlockResponse: var height int64 if msg.LightBlock != nil { @@ -715,7 +718,7 @@ func (r *Reactor) handleLightBlockMessage(envelope p2p.Envelope) error { return nil } -func (r *Reactor) handleParamsMessage(envelope p2p.Envelope) error { +func (r *Reactor) handleParamsMessage(ctx context.Context, envelope p2p.Envelope) error { switch msg := envelope.Message.(type) { case *ssproto.ParamsRequest: r.logger.Debug("received consensus params request", "height", msg.Height) @@ -726,14 +729,15 @@ func (r *Reactor) handleParamsMessage(envelope p2p.Envelope) error { } cpproto := cp.ToProto() - r.paramsCh.Out <- p2p.Envelope{ + if err := r.paramsCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &ssproto.ParamsResponse{ Height: msg.Height, ConsensusParams: cpproto, }, + }); err != nil { + return err } - case *ssproto.ParamsResponse: r.mtx.RLock() defer r.mtx.RUnlock() @@ -761,7 +765,7 @@ func (r *Reactor) handleParamsMessage(envelope p2p.Envelope) error { // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope p2p.Envelope) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) @@ -777,17 +781,13 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err switch chID { case SnapshotChannel: - err = r.handleSnapshotMessage(envelope) - + err = r.handleSnapshotMessage(ctx, envelope) case ChunkChannel: - err = r.handleChunkMessage(envelope) - + err = r.handleChunkMessage(ctx, envelope) case LightBlockChannel: - err = r.handleLightBlockMessage(envelope) - + err = r.handleLightBlockMessage(ctx, envelope) case ParamsChannel: - err = r.handleParamsMessage(envelope) - + err = r.handleParamsMessage(ctx, envelope) default: err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) } @@ -806,7 +806,7 @@ func (r *Reactor) processCh(ctx context.Context, ch *p2p.Channel, chName string) r.logger.Debug("channel closed", "channel", chName) return case envelope := <-ch.In: - if err := r.handleMessage(ch.ID, envelope); err != nil { + if err := r.handleMessage(ctx, ch.ID, envelope); err != nil { r.logger.Error("failed to process message", "err", err, "channel", chName, @@ -999,7 +999,7 @@ func (r *Reactor) initStateProvider(ctx context.Context, chainID string, initial providers[idx] = NewBlockProvider(p, chainID, r.dispatcher) } - r.stateProvider, err = NewP2PStateProvider(ctx, chainID, initialHeight, providers, to, r.paramsCh.Out, spLogger) + r.stateProvider, err = NewP2PStateProvider(ctx, chainID, initialHeight, providers, to, r.paramsCh, spLogger) if err != nil { return fmt.Errorf("failed to initialize P2P state provider: %w", err) } diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index b1863f17b..e6273aca3 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -170,8 +170,8 @@ func setup( conn, connQuery, stateProvider, - rts.snapshotOutCh, - rts.chunkOutCh, + rts.snapshotChannel, + rts.chunkChannel, "", rts.reactor.metrics, ) diff --git a/internal/statesync/stateprovider.go b/internal/statesync/stateprovider.go index b622824cd..4f398ce77 100644 --- a/internal/statesync/stateprovider.go +++ b/internal/statesync/stateprovider.go @@ -200,7 +200,7 @@ type stateProviderP2P struct { tmsync.Mutex // light.Client is not concurrency-safe lc *light.Client initialHeight int64 - paramsSendCh chan<- p2p.Envelope + paramsSendCh *p2p.Channel paramsRecvCh chan types.ConsensusParams } @@ -212,7 +212,7 @@ func NewP2PStateProvider( initialHeight int64, providers []lightprovider.Provider, trustOptions light.TrustOptions, - paramsSendCh chan<- p2p.Envelope, + paramsSendCh *p2p.Channel, logger log.Logger, ) (StateProvider, error) { if len(providers) < 2 { @@ -382,15 +382,13 @@ func (s *stateProviderP2P) tryGetConsensusParamsFromWitnesses( return nil, fmt.Errorf("invalid provider (%s) node id: %w", p.String(), err) } - select { - case s.paramsSendCh <- p2p.Envelope{ + if err := s.paramsSendCh.Send(ctx, p2p.Envelope{ To: peer, Message: &ssproto.ParamsRequest{ Height: uint64(height), }, - }: - case <-ctx.Done(): - return nil, ctx.Err() + }); err != nil { + return nil, err } select { diff --git a/internal/statesync/syncer.go b/internal/statesync/syncer.go index a0f79494a..b5ea158a4 100644 --- a/internal/statesync/syncer.go +++ b/internal/statesync/syncer.go @@ -57,8 +57,8 @@ type syncer struct { conn proxy.AppConnSnapshot connQuery proxy.AppConnQuery snapshots *snapshotPool - snapshotCh chan<- p2p.Envelope - chunkCh chan<- p2p.Envelope + snapshotCh *p2p.Channel + chunkCh *p2p.Channel tempDir string fetchers int32 retryTimeout time.Duration @@ -79,8 +79,8 @@ func newSyncer( conn proxy.AppConnSnapshot, connQuery proxy.AppConnQuery, stateProvider StateProvider, - snapshotCh chan<- p2p.Envelope, - chunkCh chan<- p2p.Envelope, + snapshotCh *p2p.Channel, + chunkCh *p2p.Channel, tempDir string, metrics *Metrics, ) *syncer { @@ -138,29 +138,13 @@ func (s *syncer) AddSnapshot(peerID types.NodeID, snapshot *snapshot) (bool, err // AddPeer adds a peer to the pool. For now we just keep it simple and send a // single request to discover snapshots, later we may want to do retries and stuff. -func (s *syncer) AddPeer(ctx context.Context, peerID types.NodeID) (err error) { - defer func() { - // TODO: remove panic recover once AddPeer can no longer accientally send on - // closed channel. - // This recover was added to protect against the p2p message being sent - // to the snapshot channel after the snapshot channel was closed. - if r := recover(); r != nil { - err = fmt.Errorf("panic sending peer snapshot request: %v", r) - } - }() - +func (s *syncer) AddPeer(ctx context.Context, peerID types.NodeID) error { s.logger.Debug("Requesting snapshots from peer", "peer", peerID) - msg := p2p.Envelope{ + return s.snapshotCh.Send(ctx, p2p.Envelope{ To: peerID, Message: &ssproto.SnapshotsRequest{}, - } - - select { - case <-ctx.Done(): - case s.snapshotCh <- msg: - } - return err + }) } // RemovePeer removes a peer from the pool. @@ -175,14 +159,16 @@ func (s *syncer) RemovePeer(peerID types.NodeID) { func (s *syncer) SyncAny( ctx context.Context, discoveryTime time.Duration, - requestSnapshots func(), + requestSnapshots func() error, ) (sm.State, *types.Commit, error) { if discoveryTime != 0 && discoveryTime < minimumDiscoveryTime { discoveryTime = minimumDiscoveryTime } if discoveryTime > 0 { - requestSnapshots() + if err := requestSnapshots(); err != nil { + return sm.State{}, nil, err + } s.logger.Info(fmt.Sprintf("Discovering snapshots for %v", discoveryTime)) time.Sleep(discoveryTime) } @@ -506,7 +492,9 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch ticker := time.NewTicker(s.retryTimeout) defer ticker.Stop() - s.requestChunk(ctx, snapshot, index) + if err := s.requestChunk(ctx, snapshot, index); err != nil { + return + } select { case <-chunks.WaitFor(index): @@ -524,12 +512,16 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch } // requestChunk requests a chunk from a peer. -func (s *syncer) requestChunk(ctx context.Context, snapshot *snapshot, chunk uint32) { +// +// returns nil if there are no peers for the given snapshot or the +// request is successfully made and an error if the request cannot be +// completed +func (s *syncer) requestChunk(ctx context.Context, snapshot *snapshot, chunk uint32) error { peer := s.snapshots.GetPeer(snapshot) if peer == "" { s.logger.Error("No valid peers found for snapshot", "height", snapshot.Height, "format", snapshot.Format, "hash", snapshot.Hash) - return + return nil } s.logger.Debug( @@ -549,10 +541,10 @@ func (s *syncer) requestChunk(ctx context.Context, snapshot *snapshot, chunk uin }, } - select { - case s.chunkCh <- msg: - case <-ctx.Done(): + if err := s.chunkCh.Send(ctx, msg); err != nil { + return err } + return nil } // verifyApp verifies the sync, checking the app hash and last block height. It returns the diff --git a/internal/statesync/syncer_test.go b/internal/statesync/syncer_test.go index 816e6301a..bd4640fe0 100644 --- a/internal/statesync/syncer_test.go +++ b/internal/statesync/syncer_test.go @@ -184,7 +184,7 @@ func TestSyncer_SyncAny(t *testing.T) { LastBlockAppHash: []byte("app_hash"), }, nil) - newState, lastCommit, err := rts.syncer.SyncAny(ctx, 0, func() {}) + newState, lastCommit, err := rts.syncer.SyncAny(ctx, 0, func() error { return nil }) require.NoError(t, err) wg.Wait() @@ -223,7 +223,7 @@ func TestSyncer_SyncAny_noSnapshots(t *testing.T) { rts := setup(ctx, t, nil, nil, stateProvider, 2) - _, _, err := rts.syncer.SyncAny(ctx, 0, func() {}) + _, _, err := rts.syncer.SyncAny(ctx, 0, func() error { return nil }) require.Equal(t, errNoSnapshots, err) } @@ -246,7 +246,7 @@ func TestSyncer_SyncAny_abort(t *testing.T) { Snapshot: toABCI(s), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil) - _, _, err = rts.syncer.SyncAny(ctx, 0, func() {}) + _, _, err = rts.syncer.SyncAny(ctx, 0, func() error { return nil }) require.Equal(t, errAbort, err) rts.conn.AssertExpectations(t) } @@ -288,7 +288,7 @@ func TestSyncer_SyncAny_reject(t *testing.T) { Snapshot: toABCI(s11), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) - _, _, err = rts.syncer.SyncAny(ctx, 0, func() {}) + _, _, err = rts.syncer.SyncAny(ctx, 0, func() error { return nil }) require.Equal(t, errNoSnapshots, err) rts.conn.AssertExpectations(t) } @@ -326,7 +326,7 @@ func TestSyncer_SyncAny_reject_format(t *testing.T) { Snapshot: toABCI(s11), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil) - _, _, err = rts.syncer.SyncAny(ctx, 0, func() {}) + _, _, err = rts.syncer.SyncAny(ctx, 0, func() error { return nil }) require.Equal(t, errAbort, err) rts.conn.AssertExpectations(t) } @@ -375,7 +375,7 @@ func TestSyncer_SyncAny_reject_sender(t *testing.T) { Snapshot: toABCI(sa), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) - _, _, err = rts.syncer.SyncAny(ctx, 0, func() {}) + _, _, err = rts.syncer.SyncAny(ctx, 0, func() error { return nil }) require.Equal(t, errNoSnapshots, err) rts.conn.AssertExpectations(t) } @@ -401,7 +401,7 @@ func TestSyncer_SyncAny_abciError(t *testing.T) { Snapshot: toABCI(s), AppHash: []byte("app_hash"), }).Once().Return(nil, errBoom) - _, _, err = rts.syncer.SyncAny(ctx, 0, func() {}) + _, _, err = rts.syncer.SyncAny(ctx, 0, func() error { return nil }) require.True(t, errors.Is(err, errBoom)) rts.conn.AssertExpectations(t) } diff --git a/libs/events/event_cache.go b/libs/events/event_cache.go index f508e873d..41633cbef 100644 --- a/libs/events/event_cache.go +++ b/libs/events/event_cache.go @@ -1,5 +1,7 @@ package events +import "context" + // An EventCache buffers events for a Fireable // All events are cached. Filtering happens on Flush type EventCache struct { @@ -28,9 +30,9 @@ func (evc *EventCache) FireEvent(event string, data EventData) { // Fire events by running evsw.FireEvent on all cached events. Blocks. // Clears cached events -func (evc *EventCache) Flush() { +func (evc *EventCache) Flush(ctx context.Context) { for _, ei := range evc.events { - evc.evsw.FireEvent(ei.event, ei.data) + evc.evsw.FireEvent(ctx, ei.event, ei.data) } // Clear the buffer, since we only add to it with append it's safe to just set it to nil and maybe safe an allocation evc.events = nil diff --git a/libs/events/event_cache_test.go b/libs/events/event_cache_test.go index a5bb975c9..13ab341f6 100644 --- a/libs/events/event_cache_test.go +++ b/libs/events/event_cache_test.go @@ -16,23 +16,25 @@ func TestEventCache_Flush(t *testing.T) { err := evsw.Start(ctx) require.NoError(t, err) - err = evsw.AddListenerForEvent("nothingness", "", func(data EventData) { + err = evsw.AddListenerForEvent("nothingness", "", func(_ context.Context, data EventData) error { // Check we are not initializing an empty buffer full of zeroed eventInfos in the EventCache require.FailNow(t, "We should never receive a message on this switch since none are fired") + return nil }) require.NoError(t, err) evc := NewEventCache(evsw) - evc.Flush() + evc.Flush(ctx) // Check after reset - evc.Flush() + evc.Flush(ctx) fail := true pass := false - err = evsw.AddListenerForEvent("somethingness", "something", func(data EventData) { + err = evsw.AddListenerForEvent("somethingness", "something", func(_ context.Context, data EventData) error { if fail { require.FailNow(t, "Shouldn't see a message until flushed") } pass = true + return nil }) require.NoError(t, err) @@ -40,6 +42,6 @@ func TestEventCache_Flush(t *testing.T) { evc.FireEvent("something", struct{ int }{2}) evc.FireEvent("something", struct{ int }{3}) fail = false - evc.Flush() + evc.Flush(ctx) assert.True(t, pass) } diff --git a/libs/events/events.go b/libs/events/events.go index f6151e734..29ebd672f 100644 --- a/libs/events/events.go +++ b/libs/events/events.go @@ -33,7 +33,7 @@ type Eventable interface { // // FireEvent fires an event with the given name and data. type Fireable interface { - FireEvent(eventValue string, data EventData) + FireEvent(ctx context.Context, eventValue string, data EventData) } // EventSwitch is the interface for synchronous pubsub, where listeners @@ -148,7 +148,7 @@ func (evsw *eventSwitch) RemoveListenerForEvent(event string, listenerID string) } } -func (evsw *eventSwitch) FireEvent(event string, data EventData) { +func (evsw *eventSwitch) FireEvent(ctx context.Context, event string, data EventData) { // Get the eventCell evsw.mtx.RLock() eventCell := evsw.eventCells[event] @@ -159,7 +159,7 @@ func (evsw *eventSwitch) FireEvent(event string, data EventData) { } // Fire event for all listeners in eventCell - eventCell.FireEvent(data) + eventCell.FireEvent(ctx, data) } //----------------------------------------------------------------------------- @@ -190,7 +190,7 @@ func (cell *eventCell) RemoveListener(listenerID string) int { return numListeners } -func (cell *eventCell) FireEvent(data EventData) { +func (cell *eventCell) FireEvent(ctx context.Context, data EventData) { cell.mtx.RLock() eventCallbacks := make([]EventCallback, 0, len(cell.listeners)) for _, cb := range cell.listeners { @@ -199,13 +199,16 @@ func (cell *eventCell) FireEvent(data EventData) { cell.mtx.RUnlock() for _, cb := range eventCallbacks { - cb(data) + if err := cb(ctx, data); err != nil { + // should we log or abort here? + continue + } } } //----------------------------------------------------------------------------- -type EventCallback func(data EventData) +type EventCallback func(ctx context.Context, data EventData) error type eventListener struct { id string diff --git a/libs/events/events_test.go b/libs/events/events_test.go index 0e8667908..db9385ec3 100644 --- a/libs/events/events_test.go +++ b/libs/events/events_test.go @@ -24,12 +24,17 @@ func TestAddListenerForEventFireOnce(t *testing.T) { messages := make(chan EventData) require.NoError(t, evsw.AddListenerForEvent("listener", "event", - func(data EventData) { + func(ctx context.Context, data EventData) error { // test there's no deadlock if we remove the listener inside a callback evsw.RemoveListener("listener") - messages <- data + select { + case messages <- data: + return nil + case <-ctx.Done(): + return ctx.Err() + } })) - go evsw.FireEvent("event", "data") + go evsw.FireEvent(ctx, "event", "data") received := <-messages if received != "data" { t.Errorf("message received does not match: %v", received) @@ -51,13 +56,18 @@ func TestAddListenerForEventFireMany(t *testing.T) { numbers := make(chan uint64, 4) // subscribe one listener for one event require.NoError(t, evsw.AddListenerForEvent("listener", "event", - func(data EventData) { - numbers <- data.(uint64) + func(ctx context.Context, data EventData) error { + select { + case numbers <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } })) // collect received events go sumReceivedNumbers(numbers, doneSum) // go fire events - go fireEvents(evsw, "event", doneSending, uint64(1)) + go fireEvents(ctx, evsw, "event", doneSending, uint64(1)) checkSum := <-doneSending close(numbers) eventSum := <-doneSum @@ -84,23 +94,38 @@ func TestAddListenerForDifferentEvents(t *testing.T) { numbers := make(chan uint64, 4) // subscribe one listener to three events require.NoError(t, evsw.AddListenerForEvent("listener", "event1", - func(data EventData) { - numbers <- data.(uint64) + func(ctx context.Context, data EventData) error { + select { + case numbers <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } })) require.NoError(t, evsw.AddListenerForEvent("listener", "event2", - func(data EventData) { - numbers <- data.(uint64) + func(ctx context.Context, data EventData) error { + select { + case numbers <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } })) require.NoError(t, evsw.AddListenerForEvent("listener", "event3", - func(data EventData) { - numbers <- data.(uint64) + func(ctx context.Context, data EventData) error { + select { + case numbers <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } })) // collect received events go sumReceivedNumbers(numbers, doneSum) // go fire events - go fireEvents(evsw, "event1", doneSending1, uint64(1)) - go fireEvents(evsw, "event2", doneSending2, uint64(1)) - go fireEvents(evsw, "event3", doneSending3, uint64(1)) + go fireEvents(ctx, evsw, "event1", doneSending1, uint64(1)) + go fireEvents(ctx, evsw, "event2", doneSending2, uint64(1)) + go fireEvents(ctx, evsw, "event3", doneSending3, uint64(1)) var checkSum uint64 checkSum += <-doneSending1 checkSum += <-doneSending2 @@ -134,33 +159,58 @@ func TestAddDifferentListenerForDifferentEvents(t *testing.T) { numbers2 := make(chan uint64, 4) // subscribe two listener to three events require.NoError(t, evsw.AddListenerForEvent("listener1", "event1", - func(data EventData) { - numbers1 <- data.(uint64) + func(ctx context.Context, data EventData) error { + select { + case numbers1 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } })) require.NoError(t, evsw.AddListenerForEvent("listener1", "event2", - func(data EventData) { - numbers1 <- data.(uint64) + func(ctx context.Context, data EventData) error { + select { + case numbers1 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } })) require.NoError(t, evsw.AddListenerForEvent("listener1", "event3", - func(data EventData) { - numbers1 <- data.(uint64) + func(ctx context.Context, data EventData) error { + select { + case numbers1 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } })) require.NoError(t, evsw.AddListenerForEvent("listener2", "event2", - func(data EventData) { - numbers2 <- data.(uint64) + func(ctx context.Context, data EventData) error { + select { + case numbers2 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } })) require.NoError(t, evsw.AddListenerForEvent("listener2", "event3", - func(data EventData) { - numbers2 <- data.(uint64) + func(ctx context.Context, data EventData) error { + select { + case numbers2 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } })) // collect received events for listener1 go sumReceivedNumbers(numbers1, doneSum1) // collect received events for listener2 go sumReceivedNumbers(numbers2, doneSum2) // go fire events - go fireEvents(evsw, "event1", doneSending1, uint64(1)) - go fireEvents(evsw, "event2", doneSending2, uint64(1001)) - go fireEvents(evsw, "event3", doneSending3, uint64(2001)) + go fireEvents(ctx, evsw, "event1", doneSending1, uint64(1)) + go fireEvents(ctx, evsw, "event2", doneSending2, uint64(1001)) + go fireEvents(ctx, evsw, "event3", doneSending3, uint64(2001)) checkSumEvent1 := <-doneSending1 checkSumEvent2 := <-doneSending2 checkSumEvent3 := <-doneSending3 @@ -209,9 +259,10 @@ func TestAddAndRemoveListenerConcurrency(t *testing.T) { // we explicitly ignore errors here, since the listener will sometimes be removed // (that's what we're testing) _ = evsw.AddListenerForEvent("listener", fmt.Sprintf("event%d", index), - func(data EventData) { + func(ctx context.Context, data EventData) error { t.Errorf("should not run callback for %d.\n", index) stopInputEvent = true + return nil }) } }() @@ -222,7 +273,7 @@ func TestAddAndRemoveListenerConcurrency(t *testing.T) { evsw.RemoveListener("listener") // remove the last listener for i := 0; i < roundCount && !stopInputEvent; i++ { - evsw.FireEvent(fmt.Sprintf("event%d", i), uint64(1001)) + evsw.FireEvent(ctx, fmt.Sprintf("event%d", i), uint64(1001)) } } @@ -245,23 +296,33 @@ func TestAddAndRemoveListener(t *testing.T) { numbers2 := make(chan uint64, 4) // subscribe two listener to three events require.NoError(t, evsw.AddListenerForEvent("listener", "event1", - func(data EventData) { - numbers1 <- data.(uint64) + func(ctx context.Context, data EventData) error { + select { + case numbers1 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } })) require.NoError(t, evsw.AddListenerForEvent("listener", "event2", - func(data EventData) { - numbers2 <- data.(uint64) + func(ctx context.Context, data EventData) error { + select { + case numbers2 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } })) // collect received events for event1 go sumReceivedNumbers(numbers1, doneSum1) // collect received events for event2 go sumReceivedNumbers(numbers2, doneSum2) // go fire events - go fireEvents(evsw, "event1", doneSending1, uint64(1)) + go fireEvents(ctx, evsw, "event1", doneSending1, uint64(1)) checkSumEvent1 := <-doneSending1 // after sending all event1, unsubscribe for all events evsw.RemoveListener("listener") - go fireEvents(evsw, "event2", doneSending2, uint64(1001)) + go fireEvents(ctx, evsw, "event2", doneSending2, uint64(1001)) checkSumEvent2 := <-doneSending2 close(numbers1) close(numbers2) @@ -287,17 +348,19 @@ func TestRemoveListener(t *testing.T) { sum1, sum2 := 0, 0 // add some listeners and make sure they work require.NoError(t, evsw.AddListenerForEvent("listener", "event1", - func(data EventData) { + func(ctx context.Context, data EventData) error { sum1++ + return nil })) require.NoError(t, evsw.AddListenerForEvent("listener", "event2", - func(data EventData) { + func(ctx context.Context, data EventData) error { sum2++ + return nil })) for i := 0; i < count; i++ { - evsw.FireEvent("event1", true) - evsw.FireEvent("event2", true) + evsw.FireEvent(ctx, "event1", true) + evsw.FireEvent(ctx, "event2", true) } assert.Equal(t, count, sum1) assert.Equal(t, count, sum2) @@ -305,8 +368,8 @@ func TestRemoveListener(t *testing.T) { // remove one by event and make sure it is gone evsw.RemoveListenerForEvent("event2", "listener") for i := 0; i < count; i++ { - evsw.FireEvent("event1", true) - evsw.FireEvent("event2", true) + evsw.FireEvent(ctx, "event1", true) + evsw.FireEvent(ctx, "event2", true) } assert.Equal(t, count*2, sum1) assert.Equal(t, count, sum2) @@ -314,8 +377,8 @@ func TestRemoveListener(t *testing.T) { // remove the listener entirely and make sure both gone evsw.RemoveListener("listener") for i := 0; i < count; i++ { - evsw.FireEvent("event1", true) - evsw.FireEvent("event2", true) + evsw.FireEvent(ctx, "event1", true) + evsw.FireEvent(ctx, "event2", true) } assert.Equal(t, count*2, sum1) assert.Equal(t, count, sum2) @@ -347,28 +410,58 @@ func TestRemoveListenersAsync(t *testing.T) { numbers2 := make(chan uint64, 4) // subscribe two listener to three events require.NoError(t, evsw.AddListenerForEvent("listener1", "event1", - func(data EventData) { - numbers1 <- data.(uint64) + func(ctx context.Context, data EventData) error { + select { + case numbers1 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } })) require.NoError(t, evsw.AddListenerForEvent("listener1", "event2", - func(data EventData) { - numbers1 <- data.(uint64) + func(ctx context.Context, data EventData) error { + select { + case numbers1 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } })) require.NoError(t, evsw.AddListenerForEvent("listener1", "event3", - func(data EventData) { - numbers1 <- data.(uint64) + func(ctx context.Context, data EventData) error { + select { + case numbers1 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } })) require.NoError(t, evsw.AddListenerForEvent("listener2", "event1", - func(data EventData) { - numbers2 <- data.(uint64) + func(ctx context.Context, data EventData) error { + select { + case numbers2 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } })) require.NoError(t, evsw.AddListenerForEvent("listener2", "event2", - func(data EventData) { - numbers2 <- data.(uint64) + func(ctx context.Context, data EventData) error { + select { + case numbers2 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } })) require.NoError(t, evsw.AddListenerForEvent("listener2", "event3", - func(data EventData) { - numbers2 <- data.(uint64) + func(ctx context.Context, data EventData) error { + select { + case numbers2 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } })) // collect received events for event1 go sumReceivedNumbers(numbers1, doneSum1) @@ -382,7 +475,7 @@ func TestRemoveListenersAsync(t *testing.T) { eventNumber := r1.Intn(3) + 1 go evsw.AddListenerForEvent(fmt.Sprintf("listener%v", listenerNumber), //nolint:errcheck // ignore for tests fmt.Sprintf("event%v", eventNumber), - func(_ EventData) {}) + func(context.Context, EventData) error { return nil }) } } removeListenersStress := func() { @@ -395,10 +488,10 @@ func TestRemoveListenersAsync(t *testing.T) { } addListenersStress() // go fire events - go fireEvents(evsw, "event1", doneSending1, uint64(1)) + go fireEvents(ctx, evsw, "event1", doneSending1, uint64(1)) removeListenersStress() - go fireEvents(evsw, "event2", doneSending2, uint64(1001)) - go fireEvents(evsw, "event3", doneSending3, uint64(2001)) + go fireEvents(ctx, evsw, "event2", doneSending2, uint64(1001)) + go fireEvents(ctx, evsw, "event3", doneSending3, uint64(2001)) checkSumEvent1 := <-doneSending1 checkSumEvent2 := <-doneSending2 checkSumEvent3 := <-doneSending3 @@ -437,13 +530,21 @@ func sumReceivedNumbers(numbers, doneSum chan uint64) { // to `offset` + 999. It additionally returns the addition of all integers // sent on `doneChan` for assertion that all events have been sent, and enabling // the test to assert all events have also been received. -func fireEvents(evsw Fireable, event string, doneChan chan uint64, - offset uint64) { +func fireEvents(ctx context.Context, evsw Fireable, event string, doneChan chan uint64, offset uint64) { + defer close(doneChan) + var sentSum uint64 for i := offset; i <= offset+uint64(999); i++ { + if ctx.Err() != nil { + break + } + + evsw.FireEvent(ctx, event, i) sentSum += i - evsw.FireEvent(event, i) } - doneChan <- sentSum - close(doneChan) + + select { + case <-ctx.Done(): + case doneChan <- sentSum: + } } From 3e92899bd917a624fa0d3aca5c1fe7109fe5adab Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Fri, 10 Dec 2021 09:36:43 -0800 Subject: [PATCH 16/33] internal/libs/protoio: optimize MarshalDelimited by plain byteslice allocations+sync.Pool (#7325) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Noticed in profiles that invoking *VoteSignBytes always created a bytes.Buffer, then discarded it inside protoio.MarshalDelimited. I dug further and examined the call paths and noticed that we unconditionally create the bytes.Buffer, even though we might have proto messages (in the common case) that implement MarshalTo([]byte), and invoked varintWriter. Instead by inlining this case, we skip a bunch of allocations and CPU cycles, which then reflects properly on all calling functions. Here are the benchmark results: ```shell $ benchstat before.txt after.txt name old time/op new time/op delta types.VoteSignBytes-8 705ns ± 3% 573ns ± 6% -18.74% (p=0.000 n=18+20) types.CommitVoteSignBytes-8 8.15µs ± 9% 6.81µs ± 4% -16.51% (p=0.000 n=20+19) protoio.MarshalDelimitedWithMarshalTo-8 788ns ± 8% 772ns ± 3% -2.01% (p=0.050 n=20+20) protoio.MarshalDelimitedNoMarshalTo-8 989ns ± 4% 845ns ± 2% -14.51% (p=0.000 n=20+18) name old alloc/op new alloc/op delta types.VoteSignBytes-8 792B ± 0% 600B ± 0% -24.24% (p=0.000 n=20+20) types.CommitVoteSignBytes-8 9.52kB ± 0% 7.60kB ± 0% -20.17% (p=0.000 n=20+20) protoio.MarshalDelimitedNoMarshalTo-8 808B ± 0% 440B ± 0% -45.54% (p=0.000 n=20+20) name old allocs/op new allocs/op delta types.VoteSignBytes-8 13.0 ± 0% 10.0 ± 0% -23.08% (p=0.000 n=20+20) types.CommitVoteSignBytes-8 140 ± 0% 110 ± 0% -21.43% (p=0.000 n=20+20) protoio.MarshalDelimitedNoMarshalTo-8 10.0 ± 0% 7.0 ± 0% -30.00% (p=0.000 n=20+20) ``` Thanks to Tharsis who tasked me to help them increase TPS and who are keen on improving Tendermint and efficiency. --- CHANGELOG_PENDING.md | 1 + internal/libs/protoio/writer.go | 42 +++++++++++-- internal/libs/protoio/writer_test.go | 91 ++++++++++++++++++++++++++++ types/vote_test.go | 49 +++++++++++++++ 4 files changed, 179 insertions(+), 4 deletions(-) create mode 100644 internal/libs/protoio/writer_test.go diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 9f39edaa6..70b307961 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -44,6 +44,7 @@ Special thanks to external contributors on this release: - [mempool, rpc] \#7041 Add removeTx operation to the RPC layer. (@tychoish) ### IMPROVEMENTS +- [internal/protoio] \#7325 Optimized `MarshalDelimited` by inlining the common case and using a `sync.Pool` in the worst case. (@odeke-em) - [pubsub] \#7319 Performance improvements for the event query API (@creachadair) diff --git a/internal/libs/protoio/writer.go b/internal/libs/protoio/writer.go index d4c66798f..93be1f851 100644 --- a/internal/libs/protoio/writer.go +++ b/internal/libs/protoio/writer.go @@ -34,6 +34,7 @@ import ( "bytes" "encoding/binary" "io" + "sync" "github.com/gogo/protobuf/proto" ) @@ -90,11 +91,44 @@ func (w *varintWriter) Close() error { return nil } -func MarshalDelimited(msg proto.Message) ([]byte, error) { - var buf bytes.Buffer - _, err := NewDelimitedWriter(&buf).WriteMsg(msg) +func varintWrittenBytes(m marshaler, size int) ([]byte, error) { + buf := make([]byte, size+binary.MaxVarintLen64) + n := binary.PutUvarint(buf, uint64(size)) + nw, err := m.MarshalTo(buf[n:]) if err != nil { return nil, err } - return buf.Bytes(), nil + return buf[:n+nw], nil +} + +var bufPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +func MarshalDelimited(msg proto.Message) ([]byte, error) { + // The goal here is to write proto message as is knowning already if + // the exact size can be retrieved and if so just use that. + if m, ok := msg.(marshaler); ok { + size, ok := getSize(msg) + if ok { + return varintWrittenBytes(m, size) + } + } + + // Otherwise, go down the route of using proto.Marshal, + // and use the buffer pool to retrieve a writer. + buf := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(buf) + buf.Reset() + _, err := NewDelimitedWriter(buf).WriteMsg(msg) + if err != nil { + return nil, err + } + // Given that we are reusing buffers, we should + // make a copy of the returned bytes. + bytesCopy := make([]byte, buf.Len()) + copy(bytesCopy, buf.Bytes()) + return bytesCopy, nil } diff --git a/internal/libs/protoio/writer_test.go b/internal/libs/protoio/writer_test.go new file mode 100644 index 000000000..a4c0b6552 --- /dev/null +++ b/internal/libs/protoio/writer_test.go @@ -0,0 +1,91 @@ +package protoio_test + +import ( + "testing" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/internal/libs/protoio" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/types" +) + +func aVote() *types.Vote { + var stamp, err = time.Parse(types.TimeFormat, "2017-12-25T03:00:01.234Z") + if err != nil { + panic(err) + } + + return &types.Vote{ + Type: tmproto.SignedMsgType(byte(tmproto.PrevoteType)), + Height: 12345, + Round: 2, + Timestamp: stamp, + BlockID: types.BlockID{ + Hash: tmhash.Sum([]byte("blockID_hash")), + PartSetHeader: types.PartSetHeader{ + Total: 1000000, + Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), + }, + }, + ValidatorAddress: crypto.AddressHash([]byte("validator_address")), + ValidatorIndex: 56789, + } +} + +type excludedMarshalTo struct { + msg proto.Message +} + +func (emt *excludedMarshalTo) ProtoMessage() {} +func (emt *excludedMarshalTo) String() string { + return emt.msg.String() +} +func (emt *excludedMarshalTo) Reset() { + emt.msg.Reset() +} +func (emt *excludedMarshalTo) Marshal() ([]byte, error) { + return proto.Marshal(emt.msg) +} + +var _ proto.Message = (*excludedMarshalTo)(nil) + +var sink interface{} + +func BenchmarkMarshalDelimitedWithMarshalTo(b *testing.B) { + msgs := []proto.Message{ + aVote().ToProto(), + } + benchmarkMarshalDelimited(b, msgs) +} + +func BenchmarkMarshalDelimitedNoMarshalTo(b *testing.B) { + msgs := []proto.Message{ + &excludedMarshalTo{aVote().ToProto()}, + } + benchmarkMarshalDelimited(b, msgs) +} + +func benchmarkMarshalDelimited(b *testing.B, msgs []proto.Message) { + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + for _, msg := range msgs { + blob, err := protoio.MarshalDelimited(msg) + require.Nil(b, err) + sink = blob + } + } + + if sink == nil { + b.Fatal("Benchmark did not run") + } + + // Reset the sink. + sink = (interface{})(nil) +} diff --git a/types/vote_test.go b/types/vote_test.go index 3ffb60324..29c29baac 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -294,3 +294,52 @@ func TestVoteProtobuf(t *testing.T) { } } } + +var sink interface{} + +var protoVote *tmproto.Vote +var sampleCommit *Commit + +func init() { + protoVote = examplePrecommit().ToProto() + + lastID := makeBlockIDRandom() + voteSet, _, vals := randVoteSet(2, 1, tmproto.PrecommitType, 10, 1) + commit, err := makeCommit(lastID, 2, 1, voteSet, vals, time.Now()) + if err != nil { + panic(err) + } + sampleCommit = commit +} + +func BenchmarkVoteSignBytes(b *testing.B) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + sink = VoteSignBytes("test_chain_id", protoVote) + } + + if sink == nil { + b.Fatal("Benchmark did not run") + } + + // Reset the sink. + sink = (interface{})(nil) +} + +func BenchmarkCommitVoteSignBytes(b *testing.B) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + for index := range sampleCommit.Signatures { + sink = sampleCommit.VoteSignBytes("test_chain_id", int32(index)) + } + } + + if sink == nil { + b.Fatal("Benchmark did not run") + } + + // Reset the sink. + sink = (interface{})(nil) +} From a925f4fa84fe3f423538aa5ff986d41ab91ebf6b Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 10 Dec 2021 10:03:42 -0800 Subject: [PATCH 17/33] Fix a panic in the indexer service test. (#7424) The test service was starting up without a logger and crashing while trying to log. --- CHANGELOG_PENDING.md | 2 +- internal/state/indexer/indexer_service_test.go | 14 +++++--------- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 70b307961..503931dd8 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -32,7 +32,7 @@ Special thanks to external contributors on this release: - [blocksync] \#7046 Remove v2 implementation of the blocksync service and recactor, which was disabled in the previous release. (@tychoish) - [p2p] \#7064 Remove WDRR queue implementation. (@tychoish) - [config] \#7169 `WriteConfigFile` now returns an error. (@tychoish) - - [libs/service] \#7288 Remove SetLogger method on `service.Service` interface. (@tychosih) + - [libs/service] \#7288 Remove SetLogger method on `service.Service` interface. (@tychoish) - Blockchain Protocol diff --git a/internal/state/indexer/indexer_service_test.go b/internal/state/indexer/indexer_service_test.go index d24744aa9..79ac00b35 100644 --- a/internal/state/indexer/indexer_service_test.go +++ b/internal/state/indexer/indexer_service_test.go @@ -39,14 +39,6 @@ var ( dbName = "postgres" ) -// NewIndexerService returns a new service instance. -func NewIndexerService(es []indexer.EventSink, eventBus *eventbus.EventBus) *indexer.Service { - return indexer.NewService(indexer.ServiceArgs{ - Sinks: es, - EventBus: eventBus, - }) -} - func TestIndexerServiceIndexesBlocks(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -70,7 +62,11 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { assert.True(t, indexer.KVSinkEnabled(eventSinks)) assert.True(t, indexer.IndexingEnabled(eventSinks)) - service := NewIndexerService(eventSinks, eventBus) + service := indexer.NewService(indexer.ServiceArgs{ + Logger: logger, + Sinks: eventSinks, + EventBus: eventBus, + }) require.NoError(t, service.Start(ctx)) t.Cleanup(service.Wait) From f8bf2cb912d666d2eccc47e9572ae6aa5e4cf936 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Dec 2021 10:30:04 -0800 Subject: [PATCH 18/33] build(deps): Bump github.com/adlio/schema from 1.1.15 to 1.2.2 (#7423) * build(deps): Bump github.com/adlio/schema from 1.1.15 to 1.2.2 Bumps [github.com/adlio/schema](https://github.com/adlio/schema) from 1.1.15 to 1.2.2. - [Release notes](https://github.com/adlio/schema/releases) - [Commits](https://github.com/adlio/schema/compare/v1.1.15...v1.2.2) --- updated-dependencies: - dependency-name: github.com/adlio/schema dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Work around API changes in the migrator package. A recent update inadvertently broke the API by changing the receiver types of the methods without updating the constructor. See: https://github.com/adlio/schema/issues/13 Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: M. J. Fromberger --- go.mod | 4 +-- go.sum | 30 +++++++++++-------- .../state/indexer/indexer_service_test.go | 3 +- internal/state/indexer/sink/psql/psql_test.go | 4 ++- 4 files changed, 25 insertions(+), 16 deletions(-) diff --git a/go.mod b/go.mod index 01b2930c8..671ed3798 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.16 require ( github.com/BurntSushi/toml v0.4.1 - github.com/adlio/schema v1.1.15 + github.com/adlio/schema v1.2.2 github.com/btcsuite/btcd v0.22.0-beta github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect @@ -37,7 +37,7 @@ require ( github.com/tendermint/tm-db v0.6.6 github.com/vektra/mockery/v2 v2.9.4 golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 - golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b + golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b golang.org/x/sync v0.0.0-20210220032951-036812b2e83c google.golang.org/grpc v1.42.0 gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect diff --git a/go.sum b/go.sum index 5aaff4e4b..32de9ee79 100644 --- a/go.sum +++ b/go.sum @@ -61,6 +61,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= @@ -75,8 +77,8 @@ github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3Q github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU= -github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= +github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= @@ -88,8 +90,8 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/adlio/schema v1.1.15 h1:ap+yp+RFcfDs1Eq1D89LX4KR/UDqxjRnLRGuwsxGyOo= -github.com/adlio/schema v1.1.15/go.mod h1:ThQUeMpGSGpfzeElY/f3wW1S7jIgnYAQ+5vON7w1T4o= +github.com/adlio/schema v1.2.2 h1:moF5Ncfn7tz9ft07kJmtGoxU4DRb8hvGjCtzgqQMdoI= +github.com/adlio/schema v1.2.2/go.mod h1:nD7ZWmMMbwU12Pqwg+qL0rTvHBrBXfNz+5UQxTfy38M= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= @@ -188,8 +190,8 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= -github.com/containerd/continuity v0.2.0 h1:j/9Wnn+hrEWjLvHuIxUU1YI5JjEjVlT2AA68cse9rwY= -github.com/containerd/continuity v0.2.0/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg= +github.com/containerd/continuity v0.2.1 h1:/EeEo2EtN3umhbbgCveyjifoMYg0pS+nMMEemaYw634= +github.com/containerd/continuity v0.2.1/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -303,6 +305,8 @@ github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEK github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-toolsmith/astcast v1.0.0 h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g= @@ -660,6 +664,8 @@ github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.9 h1:10HX2Td0ocZpYEjhilsuo6WWtUqttj2Kb0KtD86/KYA= +github.com/mattn/go-sqlite3 v1.14.9/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -759,8 +765,8 @@ github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAl github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v1.0.3 h1:1hbqejyQWCJBvtKAfdO0b1FmaEf2z/bxnjqbARass5k= github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -1170,8 +1176,8 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b h1:SXy8Ld8oKlcogOvUAh0J5Pm5RKzgYBMMxLxt6n5XW50= -golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b h1:MWaHNqZy3KTpuTMAGvv+Kw+ylsEpmyJZizz1dqxnu28= +golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1291,9 +1297,9 @@ golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211013075003-97ac67df715c h1:taxlMj0D/1sOAuv/CbSD+MMDof2vbyPTqz5FNYKpXt8= golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211205182925-97ca703d548d h1:FjkYO/PPp4Wi0EAUOVLxePm7qVW4r4ctbWpURyuOD0E= +golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/internal/state/indexer/indexer_service_test.go b/internal/state/indexer/indexer_service_test.go index 79ac00b35..00a8b3fd4 100644 --- a/internal/state/indexer/indexer_service_test.go +++ b/internal/state/indexer/indexer_service_test.go @@ -180,7 +180,8 @@ func setupDB(t *testing.T) (*dockertest.Pool, error) { sm, err := readSchema() assert.Nil(t, err) - err = schema.NewMigrator().Apply(psqldb, sm) + migrator := schema.NewMigrator() + err = migrator.Apply(psqldb, sm) assert.Nil(t, err) return pool, nil diff --git a/internal/state/indexer/sink/psql/psql_test.go b/internal/state/indexer/sink/psql/psql_test.go index 650579f9b..021ca74e0 100644 --- a/internal/state/indexer/sink/psql/psql_test.go +++ b/internal/state/indexer/sink/psql/psql_test.go @@ -110,7 +110,9 @@ func TestMain(m *testing.M) { sm, err := readSchema() if err != nil { log.Fatalf("Reading schema: %v", err) - } else if err := schema.NewMigrator().Apply(db, sm); err != nil { + } + migrator := schema.NewMigrator() + if err := migrator.Apply(db, sm); err != nil { log.Fatalf("Applying schema: %v", err) } From 4da0a4b8ed7dca561600594ccc3986fc44e58e1b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Dec 2021 12:36:35 +0000 Subject: [PATCH 19/33] build(deps): Bump github.com/adlio/schema from 1.2.2 to 1.2.3 (#7432) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/adlio/schema](https://github.com/adlio/schema) from 1.2.2 to 1.2.3.
Release notes

Sourced from github.com/adlio/schema's releases.

1.2.3

What's Changed

Full Changelog: https://github.com/adlio/schema/compare/v1.2.2...v1.2.3

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/adlio/schema&package-manager=go_modules&previous-version=1.2.2&new-version=1.2.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 671ed3798..3332cd16b 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.16 require ( github.com/BurntSushi/toml v0.4.1 - github.com/adlio/schema v1.2.2 + github.com/adlio/schema v1.2.3 github.com/btcsuite/btcd v0.22.0-beta github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect diff --git a/go.sum b/go.sum index 32de9ee79..95e1fcf34 100644 --- a/go.sum +++ b/go.sum @@ -90,8 +90,8 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/adlio/schema v1.2.2 h1:moF5Ncfn7tz9ft07kJmtGoxU4DRb8hvGjCtzgqQMdoI= -github.com/adlio/schema v1.2.2/go.mod h1:nD7ZWmMMbwU12Pqwg+qL0rTvHBrBXfNz+5UQxTfy38M= +github.com/adlio/schema v1.2.3 h1:GfKThfEsjS9cCz7gaF8zdXv4cpTdUqdljkKGDTbJjys= +github.com/adlio/schema v1.2.3/go.mod h1:nD7ZWmMMbwU12Pqwg+qL0rTvHBrBXfNz+5UQxTfy38M= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= From f80c235842c337eb3c93064e78380daa86915ef1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Dec 2021 14:27:38 +0000 Subject: [PATCH 20/33] build(deps): Bump github.com/spf13/viper from 1.9.0 to 1.10.0 (#7434) Bumps [github.com/spf13/viper](https://github.com/spf13/viper) from 1.9.0 to 1.10.0.
Release notes

Sourced from github.com/spf13/viper's releases.

v1.10.0

This is a maintenance release primarily containing minor fixes and improvements.

Changes

Added

  • Experimental finder based on io/fs
  • Tests are executed on Windows
  • Tests are executed on Go 1.17
  • Logger interface to decouple Viper from JWW

In addition to the above changes, this release comes with minor improvements, documentation changes an dependency updates.

Many thanks to everyone who contributed to this release!

Commits
  • a4bfcd9 chore(deps): update crypt
  • 1cb6606 build(deps): bump gopkg.in/ini.v1 from 1.65.0 to 1.66.2
  • a785a79 refactor: replace jww with the new logger interface
  • f1f6b21 feat: add logger interface and default implementation
  • c43197d build(deps): bump github.com/mitchellh/mapstructure from 1.4.2 to 1.4.3
  • 2abe0dd build(deps): bump gopkg.in/ini.v1 from 1.64.0 to 1.65.0
  • 8ec82f8 chore(deps): update crypt
  • 35877c8 chore: fix lint
  • 655a0aa chore(deps): update golangci-lint
  • 946ae75 ci: fix github script
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/spf13/viper&package-manager=go_modules&previous-version=1.9.0&new-version=1.10.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 64 +++++++++++++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 60 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 3332cd16b..e72ed9355 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa github.com/spf13/cobra v1.2.1 - github.com/spf13/viper v1.9.0 + github.com/spf13/viper v1.10.0 github.com/stretchr/testify v1.7.0 github.com/tendermint/tm-db v0.6.6 github.com/vektra/mockery/v2 v2.9.4 diff --git a/go.sum b/go.sum index 95e1fcf34..95957ff3e 100644 --- a/go.sum +++ b/go.sum @@ -28,6 +28,10 @@ cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSU cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -38,6 +42,7 @@ cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7 cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU= +cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -111,6 +116,7 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/ashanbrown/forbidigo v1.2.0 h1:RMlEFupPCxQ1IogYOQUnIQwGEUGK8g5vAPMRyJoSxbc= @@ -162,6 +168,7 @@ github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEe github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -187,7 +194,9 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= github.com/containerd/continuity v0.2.1 h1:/EeEo2EtN3umhbbgCveyjifoMYg0pS+nMMEemaYw634= @@ -247,8 +256,10 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= github.com/esimonov/ifshort v1.0.3 h1:JD6x035opqGec5fZ0TLjXeROD2p5H7oLGn8MKfy9HTM= github.com/esimonov/ifshort v1.0.3/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= @@ -454,6 +465,7 @@ github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= @@ -501,6 +513,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqC github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= @@ -510,6 +523,7 @@ github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= @@ -535,14 +549,18 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -632,6 +650,7 @@ github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOS github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= @@ -647,8 +666,9 @@ github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.11 h1:nQ+aFkoE2TMGc0b68U2OKSexC+eq46+XwZzWXHRmPYs= github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= @@ -678,6 +698,7 @@ github.com/mgechev/revive v1.1.2/go.mod h1:bnXsMr+ZTH09V5rssEI+jHAZ4z+ZdyhgO/zsy github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= @@ -696,8 +717,9 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= @@ -873,6 +895,7 @@ github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8 github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= +github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA= github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa h1:0U2s5loxrTy6/VgfVoLuVLFJcURKLH49ie0zSch7gh4= @@ -910,6 +933,7 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= @@ -932,8 +956,9 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= -github.com/spf13/viper v1.9.0 h1:yR6EXjTp0y0cLN8OZg1CRZmOBdI88UcGkhgyJhu6nZk= github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4= +github.com/spf13/viper v1.10.0 h1:mXH0UwHS4D2HwWZa75im4xIQynLfblmWV7qcWpfv0yk= +github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -1027,8 +1052,11 @@ go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -1169,12 +1197,14 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b h1:MWaHNqZy3KTpuTMAGvv+Kw+ylsEpmyJZizz1dqxnu28= golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1194,6 +1224,8 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1293,11 +1325,15 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211205182925-97ca703d548d h1:FjkYO/PPp4Wi0EAUOVLxePm7qVW4r4ctbWpURyuOD0E= golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -1461,7 +1497,12 @@ google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtuk google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1528,8 +1569,19 @@ google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKr google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4 h1:ysnBoUyeL/H6RCvNRhWHjKoDEmguI+mPU+qHgK8qv/w= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1560,6 +1612,7 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= @@ -1593,8 +1646,9 @@ gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8 gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.63.2 h1:tGK/CyBg7SMzb60vP1M03vNZ3VDu3wGQJwn7Sxi9r3c= gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= From 4e2aa63bb35f1634d917b5655c45a91432dd26d5 Mon Sep 17 00:00:00 2001 From: Jacob Gadikian Date: Mon, 13 Dec 2021 21:55:57 +0700 Subject: [PATCH 21/33] Go 1.17 (#7429) Update tendermint to Go 1.17 because imports are easier to audit. * Update README.md * go mod tidy Co-authored-by: M. J. Fromberger --- README.md | 2 +- go.mod | 174 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 170 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 4082752ad..12ac9084b 100644 --- a/README.md +++ b/README.md @@ -50,7 +50,7 @@ to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe | Requirement | Notes | |-------------|------------------| -| Go version | Go1.16 or higher | +| Go version | Go1.17 or higher | ## Documentation diff --git a/go.mod b/go.mod index e72ed9355..2e222e9fe 100644 --- a/go.mod +++ b/go.mod @@ -1,15 +1,12 @@ module github.com/tendermint/tendermint -go 1.16 +go 1.17 require ( github.com/BurntSushi/toml v0.4.1 github.com/adlio/schema v1.2.3 github.com/btcsuite/btcd v0.22.0-beta github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce - github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect - github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect - github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect github.com/fortytw2/leaktest v1.3.0 github.com/go-kit/kit v0.12.0 github.com/gogo/protobuf v1.3.2 @@ -40,6 +37,173 @@ require ( golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b golang.org/x/sync v0.0.0-20210220032951-036812b2e83c google.golang.org/grpc v1.42.0 - gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect pgregory.net/rapid v0.4.7 ) + +require ( + 4d63.com/gochecknoglobals v0.1.0 // indirect + github.com/Antonboom/errname v0.1.5 // indirect + github.com/Antonboom/nilnil v0.1.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/DataDog/zstd v1.4.1 // indirect + github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect + github.com/Masterminds/semver v1.5.0 // indirect + github.com/Microsoft/go-winio v0.5.1 // indirect + github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect + github.com/OpenPeeDeeP/depguard v1.0.1 // indirect + github.com/alexkohler/prealloc v1.0.0 // indirect + github.com/ashanbrown/forbidigo v1.2.0 // indirect + github.com/ashanbrown/makezero v0.0.0-20210520155254-b6261585ddde // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bkielbasa/cyclop v1.2.0 // indirect + github.com/blizzy78/varnamelen v0.3.0 // indirect + github.com/bombsimon/wsl/v3 v3.3.0 // indirect + github.com/breml/bidichk v0.1.1 // indirect + github.com/butuzov/ireturn v0.1.1 // indirect + github.com/cenkalti/backoff v2.2.1+incompatible // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/charithe/durationcheck v0.0.9 // indirect + github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af // indirect + github.com/containerd/continuity v0.2.1 // indirect + github.com/daixiang0/gci v0.2.9 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/denis-tingajkin/go-header v0.4.2 // indirect + github.com/dgraph-io/badger/v2 v2.2007.2 // indirect + github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de // indirect + github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.4.0 // indirect + github.com/dustin/go-humanize v1.0.0 // indirect + github.com/esimonov/ifshort v1.0.3 // indirect + github.com/ettle/strcase v0.1.1 // indirect + github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect + github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect + github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/fatih/structtag v1.2.0 // indirect + github.com/fsnotify/fsnotify v1.5.1 // indirect + github.com/fzipp/gocyclo v0.3.1 // indirect + github.com/go-critic/go-critic v0.6.1 // indirect + github.com/go-toolsmith/astcast v1.0.0 // indirect + github.com/go-toolsmith/astcopy v1.0.0 // indirect + github.com/go-toolsmith/astequal v1.0.1 // indirect + github.com/go-toolsmith/astfmt v1.0.0 // indirect + github.com/go-toolsmith/astp v1.0.0 // indirect + github.com/go-toolsmith/strparse v1.0.0 // indirect + github.com/go-toolsmith/typep v1.0.2 // indirect + github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/gofrs/flock v0.8.1 // indirect + github.com/golang/snappy v0.0.3 // indirect + github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect + github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect + github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 // indirect + github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a // indirect + github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect + github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect + github.com/golangci/misspell v0.3.5 // indirect + github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2 // indirect + github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect + github.com/google/btree v1.0.0 // indirect + github.com/google/go-cmp v0.5.6 // indirect + github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254 // indirect + github.com/gostaticanalysis/analysisutil v0.7.1 // indirect + github.com/gostaticanalysis/comment v1.4.2 // indirect + github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5 // indirect + github.com/gostaticanalysis/nilerr v0.1.1 // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jgautheron/goconst v1.5.1 // indirect + github.com/jingyugao/rowserrcheck v1.1.1 // indirect + github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect + github.com/jmhodges/levigo v1.0.0 // indirect + github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d // indirect + github.com/kisielk/errcheck v1.6.0 // indirect + github.com/kisielk/gotool v1.0.0 // indirect + github.com/kulti/thelper v0.4.0 // indirect + github.com/kunwardeep/paralleltest v1.0.3 // indirect + github.com/kyoh86/exportloopref v0.1.8 // indirect + github.com/ldez/gomoddirectives v0.2.2 // indirect + github.com/ldez/tagliatelle v0.2.0 // indirect + github.com/magiconair/properties v1.8.5 // indirect + github.com/maratori/testpackage v1.0.1 // indirect + github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/mbilski/exhaustivestruct v1.2.0 // indirect + github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 // indirect + github.com/mgechev/revive v1.1.2 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.4.3 // indirect + github.com/moricho/tparallel v0.2.1 // indirect + github.com/nakabonne/nestif v0.3.1 // indirect + github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect + github.com/nishanths/exhaustive v0.2.3 // indirect + github.com/nishanths/predeclared v0.2.1 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.0.2 // indirect + github.com/opencontainers/runc v1.0.3 // indirect + github.com/pelletier/go-toml v1.9.4 // indirect + github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect + github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/polyfloyd/go-errorlint v0.0.0-20210722154253-910bb7978349 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.30.0 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/quasilyte/go-ruleguard v0.3.13 // indirect + github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect + github.com/ryancurrah/gomodguard v1.2.3 // indirect + github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect + github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect + github.com/securego/gosec/v2 v2.9.1 // indirect + github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect + github.com/sirupsen/logrus v1.8.1 // indirect + github.com/sivchari/tenv v1.4.7 // indirect + github.com/sonatard/noctx v0.0.1 // indirect + github.com/sourcegraph/go-diff v0.6.1 // indirect + github.com/spf13/afero v1.6.0 // indirect + github.com/spf13/cast v1.4.1 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect + github.com/stretchr/objx v0.1.1 // indirect + github.com/subosito/gotenv v1.2.0 // indirect + github.com/sylvia7788/contextcheck v1.0.4 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca // indirect + github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b // indirect + github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect + github.com/tetafro/godot v1.4.11 // indirect + github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94 // indirect + github.com/tomarrell/wrapcheck/v2 v2.4.0 // indirect + github.com/tommy-muehle/go-mnd/v2 v2.4.0 // indirect + github.com/ultraware/funlen v0.0.3 // indirect + github.com/ultraware/whitespace v0.0.4 // indirect + github.com/uudashr/gocognit v1.0.5 // indirect + github.com/yeya24/promlinter v0.1.0 // indirect + go.etcd.io/bbolt v1.3.6 // indirect + golang.org/x/mod v0.5.0 // indirect + golang.org/x/sys v0.0.0-20211205182925-97ca703d548d // indirect + golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect + golang.org/x/text v0.3.7 // indirect + golang.org/x/tools v0.1.7 // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect + google.golang.org/protobuf v1.27.1 // indirect + gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect + gopkg.in/ini.v1 v1.66.2 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + honnef.co/go/tools v0.2.1 // indirect + mvdan.cc/gofumpt v0.1.1 // indirect + mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect + mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect + mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7 // indirect +) From 65c0aaee5e7d55b79535ad275faac288177c5f07 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Mon, 13 Dec 2021 11:04:44 -0500 Subject: [PATCH 22/33] p2p: use recieve for channel iteration (#7425) --- internal/blocksync/reactor.go | 40 ++++++----- internal/consensus/reactor.go | 113 ++++++++++++++----------------- internal/evidence/reactor.go | 28 ++++---- internal/mempool/reactor.go | 28 ++++---- internal/p2p/channel.go | 32 ++++----- internal/p2p/channel_test.go | 2 +- internal/p2p/p2ptest/network.go | 2 +- internal/p2p/p2ptest/require.go | 71 +++++++++++-------- internal/p2p/pex/reactor.go | 78 ++++++++++++--------- internal/p2p/pex/reactor_test.go | 54 ++++++++------- internal/p2p/pqueue.go | 8 +-- internal/p2p/pqueue_test.go | 2 +- internal/p2p/router.go | 6 +- internal/p2p/router_test.go | 74 ++++++++++---------- internal/statesync/reactor.go | 42 ++++++------ 15 files changed, 291 insertions(+), 289 deletions(-) diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index 53a63fb84..7ff785c81 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -166,6 +166,7 @@ func (r *Reactor) OnStart(ctx context.Context) error { } go r.processBlockSyncCh(ctx) + go r.processBlockSyncBridge(ctx) go r.processPeerUpdates(ctx) return nil @@ -212,7 +213,7 @@ func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest, // handleBlockSyncMessage handles envelopes sent from peers on the // BlockSyncChannel. It returns an error only if the Envelope.Message is unknown // for this channel. This should never be called outside of handleMessage. -func (r *Reactor) handleBlockSyncMessage(ctx context.Context, envelope p2p.Envelope) error { +func (r *Reactor) handleBlockSyncMessage(ctx context.Context, envelope *p2p.Envelope) error { logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { @@ -251,7 +252,7 @@ func (r *Reactor) handleBlockSyncMessage(ctx context.Context, envelope p2p.Envel // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope *p2p.Envelope) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) @@ -282,25 +283,30 @@ func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelop // When the reactor is stopped, we will catch the signal and close the p2p Channel // gracefully. func (r *Reactor) processBlockSyncCh(ctx context.Context) { + iter := r.blockSyncCh.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + if err := r.handleMessage(ctx, r.blockSyncCh.ID, envelope); err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return + } + + r.logger.Error("failed to process message", "ch_id", r.blockSyncCh.ID, "envelope", envelope, "err", err) + if serr := r.blockSyncCh.SendError(ctx, p2p.PeerError{ + NodeID: envelope.From, + Err: err, + }); serr != nil { + return + } + } + } +} + +func (r *Reactor) processBlockSyncBridge(ctx context.Context) { for { select { case <-ctx.Done(): - r.logger.Debug("stopped listening on block sync channel; closing...") return - case envelope := <-r.blockSyncCh.In: - if err := r.handleMessage(ctx, r.blockSyncCh.ID, envelope); err != nil { - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return - } - - r.logger.Error("failed to process message", "ch_id", r.blockSyncCh.ID, "envelope", envelope, "err", err) - if serr := r.blockSyncCh.SendError(ctx, p2p.PeerError{ - NodeID: envelope.From, - Err: err, - }); serr != nil { - return - } - } case envelope := <-r.blockSyncOutBridgeCh: if err := r.blockSyncCh.Send(ctx, envelope); err != nil { return diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index 88a831ede..ad6a108be 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -1070,7 +1070,7 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda // If we fail to find the peer state for the envelope sender, we perform a no-op // and return. This can happen when we process the envelope after the peer is // removed. -func (r *Reactor) handleStateMessage(ctx context.Context, envelope p2p.Envelope, msgI Message) error { +func (r *Reactor) handleStateMessage(ctx context.Context, envelope *p2p.Envelope, msgI Message) error { ps, ok := r.GetPeerState(envelope.From) if !ok || ps == nil { r.logger.Debug("failed to find peer state", "peer", envelope.From, "ch_id", "StateChannel") @@ -1156,7 +1156,7 @@ func (r *Reactor) handleStateMessage(ctx context.Context, envelope p2p.Envelope, // fail to find the peer state for the envelope sender, we perform a no-op and // return. This can happen when we process the envelope after the peer is // removed. -func (r *Reactor) handleDataMessage(ctx context.Context, envelope p2p.Envelope, msgI Message) error { +func (r *Reactor) handleDataMessage(ctx context.Context, envelope *p2p.Envelope, msgI Message) error { logger := r.logger.With("peer", envelope.From, "ch_id", "DataChannel") ps, ok := r.GetPeerState(envelope.From) @@ -1205,7 +1205,7 @@ func (r *Reactor) handleDataMessage(ctx context.Context, envelope p2p.Envelope, // fail to find the peer state for the envelope sender, we perform a no-op and // return. This can happen when we process the envelope after the peer is // removed. -func (r *Reactor) handleVoteMessage(ctx context.Context, envelope p2p.Envelope, msgI Message) error { +func (r *Reactor) handleVoteMessage(ctx context.Context, envelope *p2p.Envelope, msgI Message) error { logger := r.logger.With("peer", envelope.From, "ch_id", "VoteChannel") ps, ok := r.GetPeerState(envelope.From) @@ -1246,7 +1246,7 @@ func (r *Reactor) handleVoteMessage(ctx context.Context, envelope p2p.Envelope, // VoteSetBitsChannel. If we fail to find the peer state for the envelope sender, // we perform a no-op and return. This can happen when we process the envelope // after the peer is removed. -func (r *Reactor) handleVoteSetBitsMessage(ctx context.Context, envelope p2p.Envelope, msgI Message) error { +func (r *Reactor) handleVoteSetBitsMessage(ctx context.Context, envelope *p2p.Envelope, msgI Message) error { logger := r.logger.With("peer", envelope.From, "ch_id", "VoteSetBitsChannel") ps, ok := r.GetPeerState(envelope.From) @@ -1304,7 +1304,7 @@ func (r *Reactor) handleVoteSetBitsMessage(ctx context.Context, envelope p2p.Env // the p2p channel. // // NOTE: We block on consensus state for proposals, block parts, and votes. -func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope *p2p.Envelope) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) @@ -1359,20 +1359,16 @@ func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelop // the reactor is stopped, we will catch the signal and close the p2p Channel // gracefully. func (r *Reactor) processStateCh(ctx context.Context) { - for { - select { - case <-ctx.Done(): - r.logger.Debug("stopped listening on StateChannel; closing...") - return - case envelope := <-r.stateCh.In: - if err := r.handleMessage(ctx, r.stateCh.ID, envelope); err != nil { - r.logger.Error("failed to process message", "ch_id", r.stateCh.ID, "envelope", envelope, "err", err) - if serr := r.stateCh.SendError(ctx, p2p.PeerError{ - NodeID: envelope.From, - Err: err, - }); serr != nil { - return - } + iter := r.stateCh.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + if err := r.handleMessage(ctx, r.stateCh.ID, envelope); err != nil { + r.logger.Error("failed to process message", "ch_id", r.stateCh.ID, "envelope", envelope, "err", err) + if serr := r.stateCh.SendError(ctx, p2p.PeerError{ + NodeID: envelope.From, + Err: err, + }); serr != nil { + return } } } @@ -1384,20 +1380,16 @@ func (r *Reactor) processStateCh(ctx context.Context) { // the reactor is stopped, we will catch the signal and close the p2p Channel // gracefully. func (r *Reactor) processDataCh(ctx context.Context) { - for { - select { - case <-ctx.Done(): - r.logger.Debug("stopped listening on DataChannel; closing...") - return - case envelope := <-r.dataCh.In: - if err := r.handleMessage(ctx, r.dataCh.ID, envelope); err != nil { - r.logger.Error("failed to process message", "ch_id", r.dataCh.ID, "envelope", envelope, "err", err) - if serr := r.dataCh.SendError(ctx, p2p.PeerError{ - NodeID: envelope.From, - Err: err, - }); serr != nil { - return - } + iter := r.dataCh.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + if err := r.handleMessage(ctx, r.dataCh.ID, envelope); err != nil { + r.logger.Error("failed to process message", "ch_id", r.dataCh.ID, "envelope", envelope, "err", err) + if serr := r.dataCh.SendError(ctx, p2p.PeerError{ + NodeID: envelope.From, + Err: err, + }); serr != nil { + return } } } @@ -1409,20 +1401,16 @@ func (r *Reactor) processDataCh(ctx context.Context) { // the reactor is stopped, we will catch the signal and close the p2p Channel // gracefully. func (r *Reactor) processVoteCh(ctx context.Context) { - for { - select { - case <-ctx.Done(): - r.logger.Debug("stopped listening on VoteChannel; closing...") - return - case envelope := <-r.voteCh.In: - if err := r.handleMessage(ctx, r.voteCh.ID, envelope); err != nil { - r.logger.Error("failed to process message", "ch_id", r.voteCh.ID, "envelope", envelope, "err", err) - if serr := r.voteCh.SendError(ctx, p2p.PeerError{ - NodeID: envelope.From, - Err: err, - }); serr != nil { - return - } + iter := r.voteCh.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + if err := r.handleMessage(ctx, r.voteCh.ID, envelope); err != nil { + r.logger.Error("failed to process message", "ch_id", r.voteCh.ID, "envelope", envelope, "err", err) + if serr := r.voteCh.SendError(ctx, p2p.PeerError{ + NodeID: envelope.From, + Err: err, + }); serr != nil { + return } } } @@ -1434,24 +1422,21 @@ func (r *Reactor) processVoteCh(ctx context.Context) { // When the reactor is stopped, we will catch the signal and close the p2p // Channel gracefully. func (r *Reactor) processVoteSetBitsCh(ctx context.Context) { - for { - select { - case <-ctx.Done(): - r.logger.Debug("stopped listening on VoteSetBitsChannel; closing...") - return - case envelope := <-r.voteSetBitsCh.In: - if err := r.handleMessage(ctx, r.voteSetBitsCh.ID, envelope); err != nil { - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return - } + iter := r.voteSetBitsCh.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() - r.logger.Error("failed to process message", "ch_id", r.voteSetBitsCh.ID, "envelope", envelope, "err", err) - if serr := r.voteSetBitsCh.SendError(ctx, p2p.PeerError{ - NodeID: envelope.From, - Err: err, - }); serr != nil { - return - } + if err := r.handleMessage(ctx, r.voteSetBitsCh.ID, envelope); err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return + } + + r.logger.Error("failed to process message", "ch_id", r.voteSetBitsCh.ID, "envelope", envelope, "err", err) + if serr := r.voteSetBitsCh.SendError(ctx, p2p.PeerError{ + NodeID: envelope.From, + Err: err, + }); serr != nil { + return } } } diff --git a/internal/evidence/reactor.go b/internal/evidence/reactor.go index 7302773ae..62272a810 100644 --- a/internal/evidence/reactor.go +++ b/internal/evidence/reactor.go @@ -110,7 +110,7 @@ func (r *Reactor) OnStop() { // It returns an error only if the Envelope.Message is unknown for this channel // or if the given evidence is invalid. This should never be called outside of // handleMessage. -func (r *Reactor) handleEvidenceMessage(envelope p2p.Envelope) error { +func (r *Reactor) handleEvidenceMessage(envelope *p2p.Envelope) error { logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { @@ -146,7 +146,7 @@ func (r *Reactor) handleEvidenceMessage(envelope p2p.Envelope) error { // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope *p2p.Envelope) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) @@ -174,20 +174,16 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err // processEvidenceCh implements a blocking event loop where we listen for p2p // Envelope messages from the evidenceCh. func (r *Reactor) processEvidenceCh(ctx context.Context) { - for { - select { - case <-ctx.Done(): - r.logger.Debug("stopped listening on evidence channel; closing...") - return - case envelope := <-r.evidenceCh.In: - if err := r.handleMessage(r.evidenceCh.ID, envelope); err != nil { - r.logger.Error("failed to process message", "ch_id", r.evidenceCh.ID, "envelope", envelope, "err", err) - if serr := r.evidenceCh.SendError(ctx, p2p.PeerError{ - NodeID: envelope.From, - Err: err, - }); serr != nil { - return - } + iter := r.evidenceCh.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + if err := r.handleMessage(r.evidenceCh.ID, envelope); err != nil { + r.logger.Error("failed to process message", "ch_id", r.evidenceCh.ID, "envelope", envelope, "err", err) + if serr := r.evidenceCh.SendError(ctx, p2p.PeerError{ + NodeID: envelope.From, + Err: err, + }); serr != nil { + return } } } diff --git a/internal/mempool/reactor.go b/internal/mempool/reactor.go index 2e1a94f01..7119cdbbb 100644 --- a/internal/mempool/reactor.go +++ b/internal/mempool/reactor.go @@ -140,7 +140,7 @@ func (r *Reactor) OnStop() { // For every tx in the message, we execute CheckTx. It returns an error if an // empty set of txs are sent in an envelope or if we receive an unexpected // message type. -func (r *Reactor) handleMempoolMessage(ctx context.Context, envelope p2p.Envelope) error { +func (r *Reactor) handleMempoolMessage(ctx context.Context, envelope *p2p.Envelope) error { logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { @@ -171,7 +171,7 @@ func (r *Reactor) handleMempoolMessage(ctx context.Context, envelope p2p.Envelop // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope *p2p.Envelope) (err error) { defer func() { if e := recover(); e != nil { r.observePanic(e) @@ -200,21 +200,17 @@ func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelop // processMempoolCh implements a blocking event loop where we listen for p2p // Envelope messages from the mempoolCh. func (r *Reactor) processMempoolCh(ctx context.Context) { - for { - select { - case envelope := <-r.mempoolCh.In: - if err := r.handleMessage(ctx, r.mempoolCh.ID, envelope); err != nil { - r.logger.Error("failed to process message", "ch_id", r.mempoolCh.ID, "envelope", envelope, "err", err) - if serr := r.mempoolCh.SendError(ctx, p2p.PeerError{ - NodeID: envelope.From, - Err: err, - }); serr != nil { - return - } + iter := r.mempoolCh.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + if err := r.handleMessage(ctx, r.mempoolCh.ID, envelope); err != nil { + r.logger.Error("failed to process message", "ch_id", r.mempoolCh.ID, "envelope", envelope, "err", err) + if serr := r.mempoolCh.SendError(ctx, p2p.PeerError{ + NodeID: envelope.From, + Err: err, + }); serr != nil { + return } - case <-ctx.Done(): - r.logger.Debug("stopped listening on mempool channel; closing...") - return } } } diff --git a/internal/p2p/channel.go b/internal/p2p/channel.go index da6955596..1faa2a6d0 100644 --- a/internal/p2p/channel.go +++ b/internal/p2p/channel.go @@ -15,15 +15,7 @@ type Envelope struct { To types.NodeID // receiver (empty if inbound) Broadcast bool // send to all connected peers (ignores To) Message proto.Message // message payload - - // channelID is for internal Router use, set on outbound messages to inform - // the sendPeer() goroutine which transport channel to use. - // - // FIXME: If we migrate the Transport API to a byte-oriented multi-stream - // API, this will no longer be necessary since each channel will be mapped - // onto a stream during channel/peer setup. See: - // https://github.com/tendermint/spec/pull/227 - channelID ChannelID + ChannelID ChannelID } // Wrapper is a Protobuf message that can contain a variety of inner messages @@ -62,7 +54,7 @@ func (pe PeerError) Unwrap() error { return pe.Err } // Each message is wrapped in an Envelope to specify its sender and receiver. type Channel struct { ID ChannelID - In <-chan Envelope // inbound messages (peers to reactors) + inCh <-chan Envelope // inbound messages (peers to reactors) outCh chan<- Envelope // outbound messages (reactors to peers) errCh chan<- PeerError // peer error reporting @@ -81,7 +73,7 @@ func NewChannel( return &Channel{ ID: id, messageType: messageType, - In: inCh, + inCh: inCh, outCh: outCh, errCh: errCh, } @@ -138,7 +130,7 @@ func iteratorWorker(ctx context.Context, ch *Channel, pipe chan Envelope) { select { case <-ctx.Done(): return - case envelope := <-ch.In: + case envelope := <-ch.inCh: select { case <-ctx.Done(): return @@ -192,6 +184,14 @@ func MergedChannelIterator(ctx context.Context, chs ...*Channel) *ChannelIterato } wg := new(sync.WaitGroup) + for _, ch := range chs { + wg.Add(1) + go func(ch *Channel) { + defer wg.Done() + iteratorWorker(ctx, ch, iter.pipe) + }(ch) + } + done := make(chan struct{}) go func() { defer close(done); wg.Wait() }() @@ -204,13 +204,5 @@ func MergedChannelIterator(ctx context.Context, chs ...*Channel) *ChannelIterato <-done }() - for _, ch := range chs { - wg.Add(1) - go func(ch *Channel) { - defer wg.Done() - iteratorWorker(ctx, ch, iter.pipe) - }(ch) - } - return iter } diff --git a/internal/p2p/channel_test.go b/internal/p2p/channel_test.go index 525eb18fb..e06e3e77e 100644 --- a/internal/p2p/channel_test.go +++ b/internal/p2p/channel_test.go @@ -23,7 +23,7 @@ func testChannel(size int) (*channelInternal, *Channel) { Error: make(chan PeerError, size), } ch := &Channel{ - In: in.In, + inCh: in.In, outCh: in.Out, errCh: in.Error, } diff --git a/internal/p2p/p2ptest/network.go b/internal/p2p/p2ptest/network.go index bde96ba66..3117472be 100644 --- a/internal/p2p/p2ptest/network.go +++ b/internal/p2p/p2ptest/network.go @@ -306,7 +306,7 @@ func (n *Node) MakeChannel( require.NoError(t, err) require.Contains(t, n.Router.NodeInfo().Channels, byte(chDesc.ID)) t.Cleanup(func() { - RequireEmpty(t, channel) + RequireEmpty(ctx, t, channel) cancel() }) return channel diff --git a/internal/p2p/p2ptest/require.go b/internal/p2p/p2ptest/require.go index 22a1d2a81..f492ff09e 100644 --- a/internal/p2p/p2ptest/require.go +++ b/internal/p2p/p2ptest/require.go @@ -7,6 +7,7 @@ import ( "time" "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/internal/p2p" @@ -14,53 +15,63 @@ import ( ) // RequireEmpty requires that the given channel is empty. -func RequireEmpty(t *testing.T, channels ...*p2p.Channel) { - for _, channel := range channels { - select { - case e := <-channel.In: - require.Fail(t, "unexpected message", "channel %v should be empty, got %v", channel.ID, e) - case <-time.After(10 * time.Millisecond): - } +func RequireEmpty(ctx context.Context, t *testing.T, channels ...*p2p.Channel) { + t.Helper() + + ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer cancel() + + iter := p2p.MergedChannelIterator(ctx, channels...) + count := 0 + for iter.Next(ctx) { + count++ + require.Nil(t, iter.Envelope()) } + require.Zero(t, count) + require.Error(t, ctx.Err()) } // RequireReceive requires that the given envelope is received on the channel. -func RequireReceive(t *testing.T, channel *p2p.Channel, expect p2p.Envelope) { +func RequireReceive(ctx context.Context, t *testing.T, channel *p2p.Channel, expect p2p.Envelope) { t.Helper() - timer := time.NewTimer(time.Second) // not time.After due to goroutine leaks - defer timer.Stop() + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() - select { - case e := <-channel.In: - require.Equal(t, expect, e) - case <-timer.C: - require.Fail(t, "timed out waiting for message", "%v on channel %v", expect, channel.ID) + iter := channel.Receive(ctx) + count := 0 + for iter.Next(ctx) { + count++ + envelope := iter.Envelope() + require.Equal(t, expect.From, envelope.From) + require.Equal(t, expect.Message, envelope.Message) + } + + if !assert.True(t, count >= 1) { + require.NoError(t, ctx.Err(), "timed out waiting for message %v", expect) } } // RequireReceiveUnordered requires that the given envelopes are all received on // the channel, ignoring order. -func RequireReceiveUnordered(t *testing.T, channel *p2p.Channel, expect []p2p.Envelope) { - timer := time.NewTimer(time.Second) // not time.After due to goroutine leaks - defer timer.Stop() +func RequireReceiveUnordered(ctx context.Context, t *testing.T, channel *p2p.Channel, expect []*p2p.Envelope) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() - actual := []p2p.Envelope{} - for { - select { - case e := <-channel.In: - actual = append(actual, e) - if len(actual) == len(expect) { - require.ElementsMatch(t, expect, actual) - return - } + actual := []*p2p.Envelope{} - case <-timer.C: - require.ElementsMatch(t, expect, actual) + iter := channel.Receive(ctx) + for iter.Next(ctx) { + actual = append(actual, iter.Envelope()) + if len(actual) == len(expect) { + require.ElementsMatch(t, expect, actual, "len=%d", len(actual)) return } } + if errors.Is(ctx.Err(), context.DeadlineExceeded) { + require.ElementsMatch(t, expect, actual) + } } // RequireSend requires that the given envelope is sent on the channel. @@ -88,7 +99,7 @@ func RequireSendReceive( receive proto.Message, ) { RequireSend(ctx, t, channel, p2p.Envelope{To: peerID, Message: send}) - RequireReceive(t, channel, p2p.Envelope{From: peerID, Message: send}) + RequireReceive(ctx, t, channel, p2p.Envelope{From: peerID, Message: send}) } // RequireNoUpdates requires that a PeerUpdates subscription is empty. diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index f5eb2ab7f..fe57dc85b 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -100,9 +100,6 @@ type Reactor struct { // minReceiveRequestInterval). lastReceivedRequests map[types.NodeID]time.Time - // the time when another request will be sent - nextRequestTime time.Time - // keep track of how many new peers to existing peers we have received to // extrapolate the size of the network newPeers uint32 @@ -155,8 +152,26 @@ func (r *Reactor) OnStop() {} func (r *Reactor) processPexCh(ctx context.Context) { timer := time.NewTimer(0) defer timer.Stop() + var ( + duration = r.calculateNextRequestTime() + err error + ) + + incoming := make(chan *p2p.Envelope) + go func() { + defer close(incoming) + iter := r.pexCh.Receive(ctx) + for iter.Next(ctx) { + select { + case <-ctx.Done(): + return + case incoming <- iter.Envelope(): + } + } + }() + for { - timer.Reset(time.Until(r.nextRequestTime)) + timer.Reset(duration) select { case <-ctx.Done(): @@ -165,12 +180,15 @@ func (r *Reactor) processPexCh(ctx context.Context) { // outbound requests for new peers case <-timer.C: - r.sendRequestForPeers(ctx) - + duration, err = r.sendRequestForPeers(ctx) + if err != nil { + return + } // inbound requests for new peers or responses to requests sent by this // reactor - case envelope := <-r.pexCh.In: - if err := r.handleMessage(ctx, r.pexCh.ID, envelope); err != nil { + case envelope := <-incoming: + duration, err = r.handleMessage(ctx, r.pexCh.ID, envelope) + if err != nil { r.logger.Error("failed to process message", "ch_id", r.pexCh.ID, "envelope", envelope, "err", err) if serr := r.pexCh.SendError(ctx, p2p.PeerError{ NodeID: envelope.From, @@ -179,6 +197,7 @@ func (r *Reactor) processPexCh(ctx context.Context) { return } } + } } } @@ -199,7 +218,7 @@ func (r *Reactor) processPeerUpdates(ctx context.Context) { } // handlePexMessage handles envelopes sent from peers on the PexChannel. -func (r *Reactor) handlePexMessage(ctx context.Context, envelope p2p.Envelope) error { +func (r *Reactor) handlePexMessage(ctx context.Context, envelope *p2p.Envelope) (time.Duration, error) { logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { @@ -207,7 +226,7 @@ func (r *Reactor) handlePexMessage(ctx context.Context, envelope p2p.Envelope) e // check if the peer hasn't sent a prior request too close to this one // in time if err := r.markPeerRequest(envelope.From); err != nil { - return err + return time.Minute, err } // request peers from the peer manager and parse the NodeAddresses into @@ -223,18 +242,19 @@ func (r *Reactor) handlePexMessage(ctx context.Context, envelope p2p.Envelope) e To: envelope.From, Message: &protop2p.PexResponse{Addresses: pexAddresses}, }); err != nil { - return err + return 0, err } + return time.Second, nil case *protop2p.PexResponse: // check if the response matches a request that was made to that peer if err := r.markPeerResponse(envelope.From); err != nil { - return err + return time.Minute, err } // check the size of the response if len(msg.Addresses) > int(maxAddresses) { - return fmt.Errorf("peer sent too many addresses (max: %d, got: %d)", + return 10 * time.Minute, fmt.Errorf("peer sent too many addresses (max: %d, got: %d)", maxAddresses, len(msg.Addresses), ) @@ -256,17 +276,16 @@ func (r *Reactor) handlePexMessage(ctx context.Context, envelope p2p.Envelope) e r.totalPeers++ } + return 10 * time.Minute, nil default: - return fmt.Errorf("received unknown message: %T", msg) + return time.Second, fmt.Errorf("received unknown message: %T", msg) } - - return nil } // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope *p2p.Envelope) (duration time.Duration, err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) @@ -282,13 +301,12 @@ func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelop switch chID { case p2p.ChannelID(PexChannel): - err = r.handlePexMessage(ctx, envelope) - + duration, err = r.handlePexMessage(ctx, envelope) default: err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) } - return err + return } // processPeerUpdate processes a PeerUpdate. For added peers, PeerStatusUp, we @@ -314,15 +332,13 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { // peer a request for more peer addresses. The function then moves the // peer into the requestsSent bucket and calculates when the next request // time should be -func (r *Reactor) sendRequestForPeers(ctx context.Context) { +func (r *Reactor) sendRequestForPeers(ctx context.Context) (time.Duration, error) { r.mtx.Lock() defer r.mtx.Unlock() if len(r.availablePeers) == 0 { // no peers are available r.logger.Debug("no available peers to send request to, waiting...") - r.nextRequestTime = time.Now().Add(noAvailablePeersWaitPeriod) - - return + return noAvailablePeersWaitPeriod, nil } var peerID types.NodeID @@ -336,15 +352,16 @@ func (r *Reactor) sendRequestForPeers(ctx context.Context) { To: peerID, Message: &protop2p.PexRequest{}, }); err != nil { - return + return 0, err } // remove the peer from the abvailable peers list and mark it in the requestsSent map delete(r.availablePeers, peerID) r.requestsSent[peerID] = struct{}{} - r.calculateNextRequestTime() - r.logger.Debug("peer request sent", "next_request_time", r.nextRequestTime) + dur := r.calculateNextRequestTime() + r.logger.Debug("peer request sent", "next_request_time", dur) + return dur, nil } // calculateNextRequestTime implements something of a proportional controller @@ -357,14 +374,13 @@ func (r *Reactor) sendRequestForPeers(ctx context.Context) { // new nodes will plummet to a very small number, meaning the interval expands // to its upper bound. // CONTRACT: Must use a write lock as nextRequestTime is updated -func (r *Reactor) calculateNextRequestTime() { +func (r *Reactor) calculateNextRequestTime() time.Duration { // check if the peer store is full. If so then there is no need // to send peer requests too often if ratio := r.peerManager.PeerRatio(); ratio >= 0.95 { r.logger.Debug("peer manager near full ratio, sleeping...", "sleep_period", fullCapacityInterval, "ratio", ratio) - r.nextRequestTime = time.Now().Add(fullCapacityInterval) - return + return fullCapacityInterval } // baseTime represents the shortest interval that we can send peer requests @@ -390,7 +406,7 @@ func (r *Reactor) calculateNextRequestTime() { } // NOTE: As ratio is always >= 1, discovery ratio is >= 1. Therefore we don't need to worry // about the next request time being less than the minimum time - r.nextRequestTime = time.Now().Add(baseTime * time.Duration(r.discoveryRatio)) + return baseTime * time.Duration(r.discoveryRatio) } func (r *Reactor) markPeerRequest(peer types.NodeID) error { diff --git a/internal/p2p/pex/reactor_test.go b/internal/p2p/pex/reactor_test.go index 3f0adcf89..d83b6d3af 100644 --- a/internal/p2p/pex/reactor_test.go +++ b/internal/p2p/pex/reactor_test.go @@ -2,6 +2,7 @@ package pex_test import ( "context" + "errors" "strings" "testing" "time" @@ -41,12 +42,12 @@ func TestReactorBasic(t *testing.T) { testNet.start(ctx, t) // assert that the mock node receives a request from the real node - testNet.listenForRequest(t, secondNode, firstNode, shortWait) + testNet.listenForRequest(ctx, t, secondNode, firstNode, shortWait) // assert that when a mock node sends a request it receives a response (and // the correct one) testNet.sendRequest(ctx, t, firstNode, secondNode) - testNet.listenForResponse(t, secondNode, firstNode, shortWait, []p2pproto.PexAddress(nil)) + testNet.listenForResponse(ctx, t, secondNode, firstNode, shortWait, []p2pproto.PexAddress(nil)) } func TestReactorConnectFullNetwork(t *testing.T) { @@ -440,38 +441,42 @@ func (r *reactorTestSuite) addNodes(ctx context.Context, t *testing.T, nodes int } func (r *reactorTestSuite) listenFor( + ctx context.Context, t *testing.T, node types.NodeID, - conditional func(msg p2p.Envelope) bool, - assertion func(t *testing.T, msg p2p.Envelope) bool, + conditional func(msg *p2p.Envelope) bool, + assertion func(t *testing.T, msg *p2p.Envelope) bool, waitPeriod time.Duration, ) { - timesUp := time.After(waitPeriod) - for { - select { - case envelope := <-r.pexChannels[node].In: - if conditional(envelope) && assertion(t, envelope) { - return - } - case <-timesUp: - require.Fail(t, "timed out waiting for message", - "node=%v, waitPeriod=%s", node, waitPeriod) + ctx, cancel := context.WithTimeout(ctx, waitPeriod) + defer cancel() + iter := r.pexChannels[node].Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + if conditional(envelope) && assertion(t, envelope) { + return } } + + if errors.Is(ctx.Err(), context.DeadlineExceeded) { + require.Fail(t, "timed out waiting for message", + "node=%v, waitPeriod=%s", node, waitPeriod) + } + } -func (r *reactorTestSuite) listenForRequest(t *testing.T, fromNode, toNode int, waitPeriod time.Duration) { +func (r *reactorTestSuite) listenForRequest(ctx context.Context, t *testing.T, fromNode, toNode int, waitPeriod time.Duration) { r.logger.Info("Listening for request", "from", fromNode, "to", toNode) to, from := r.checkNodePair(t, toNode, fromNode) - conditional := func(msg p2p.Envelope) bool { + conditional := func(msg *p2p.Envelope) bool { _, ok := msg.Message.(*p2pproto.PexRequest) return ok && msg.From == from } - assertion := func(t *testing.T, msg p2p.Envelope) bool { + assertion := func(t *testing.T, msg *p2p.Envelope) bool { require.Equal(t, &p2pproto.PexRequest{}, msg.Message) return true } - r.listenFor(t, to, conditional, assertion, waitPeriod) + r.listenFor(ctx, t, to, conditional, assertion, waitPeriod) } func (r *reactorTestSuite) pingAndlistenForNAddresses( @@ -484,11 +489,11 @@ func (r *reactorTestSuite) pingAndlistenForNAddresses( t.Helper() r.logger.Info("Listening for addresses", "from", fromNode, "to", toNode) to, from := r.checkNodePair(t, toNode, fromNode) - conditional := func(msg p2p.Envelope) bool { + conditional := func(msg *p2p.Envelope) bool { _, ok := msg.Message.(*p2pproto.PexResponse) return ok && msg.From == from } - assertion := func(t *testing.T, msg p2p.Envelope) bool { + assertion := func(t *testing.T, msg *p2p.Envelope) bool { m, ok := msg.Message.(*p2pproto.PexResponse) if !ok { require.Fail(t, "expected pex response v2") @@ -505,10 +510,11 @@ func (r *reactorTestSuite) pingAndlistenForNAddresses( return false } r.sendRequest(ctx, t, toNode, fromNode) - r.listenFor(t, to, conditional, assertion, waitPeriod) + r.listenFor(ctx, t, to, conditional, assertion, waitPeriod) } func (r *reactorTestSuite) listenForResponse( + ctx context.Context, t *testing.T, fromNode, toNode int, waitPeriod time.Duration, @@ -516,16 +522,16 @@ func (r *reactorTestSuite) listenForResponse( ) { r.logger.Info("Listening for response", "from", fromNode, "to", toNode) to, from := r.checkNodePair(t, toNode, fromNode) - conditional := func(msg p2p.Envelope) bool { + conditional := func(msg *p2p.Envelope) bool { _, ok := msg.Message.(*p2pproto.PexResponse) r.logger.Info("message", msg, "ok", ok) return ok && msg.From == from } - assertion := func(t *testing.T, msg p2p.Envelope) bool { + assertion := func(t *testing.T, msg *p2p.Envelope) bool { require.Equal(t, &p2pproto.PexResponse{Addresses: addresses}, msg.Message) return true } - r.listenFor(t, to, conditional, assertion, waitPeriod) + r.listenFor(ctx, t, to, conditional, assertion, waitPeriod) } func (r *reactorTestSuite) listenForPeerUpdate( diff --git a/internal/p2p/pqueue.go b/internal/p2p/pqueue.go index b43bb806f..ebfa2885b 100644 --- a/internal/p2p/pqueue.go +++ b/internal/p2p/pqueue.go @@ -160,11 +160,11 @@ func (s *pqScheduler) process(ctx context.Context) { for { select { case e := <-s.enqueueCh: - chIDStr := strconv.Itoa(int(e.channelID)) + chIDStr := strconv.Itoa(int(e.ChannelID)) pqEnv := &pqEnvelope{ envelope: e, size: uint(proto.Size(e.Message)), - priority: s.chPriorities[e.channelID], + priority: s.chPriorities[e.ChannelID], timestamp: time.Now().UTC(), } @@ -203,7 +203,7 @@ func (s *pqScheduler) process(ctx context.Context) { if tmpSize+pqEnv.size <= s.capacity { canEnqueue = true } else { - pqEnvTmpChIDStr := strconv.Itoa(int(pqEnvTmp.envelope.channelID)) + pqEnvTmpChIDStr := strconv.Itoa(int(pqEnvTmp.envelope.ChannelID)) s.metrics.PeerQueueDroppedMsgs.With("ch_id", pqEnvTmpChIDStr).Add(1) s.logger.Debug( "dropped envelope", @@ -277,7 +277,7 @@ func (s *pqScheduler) process(ctx context.Context) { } func (s *pqScheduler) push(pqEnv *pqEnvelope) { - chIDStr := strconv.Itoa(int(pqEnv.envelope.channelID)) + chIDStr := strconv.Itoa(int(pqEnv.envelope.ChannelID)) // enqueue the incoming Envelope heap.Push(s.pq, pqEnv) diff --git a/internal/p2p/pqueue_test.go b/internal/p2p/pqueue_test.go index 181e6e7f7..03841d000 100644 --- a/internal/p2p/pqueue_test.go +++ b/internal/p2p/pqueue_test.go @@ -20,7 +20,7 @@ func TestCloseWhileDequeueFull(t *testing.T) { for i := 0; i < enqueueLength; i++ { pqueue.enqueue() <- Envelope{ - channelID: 0x01, + ChannelID: 0x01, Message: &testMessage{Value: "foo"}, // 5 bytes } } diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 87842bee6..c18ac2c85 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -328,7 +328,7 @@ func (r *Router) routeChannel( // Mark the envelope with the channel ID to allow sendPeer() to pass // it on to Transport.SendMessage(). - envelope.channelID = chID + envelope.ChannelID = chID // wrap the message in a wrapper message, if requested if wrapper != nil { @@ -859,7 +859,7 @@ func (r *Router) receivePeer(ctx context.Context, peerID types.NodeID, conn Conn start := time.Now().UTC() select { - case queue.enqueue() <- Envelope{From: peerID, Message: msg}: + case queue.enqueue() <- Envelope{From: peerID, Message: msg, ChannelID: chID}: r.metrics.PeerReceiveBytesTotal.With( "chID", fmt.Sprint(chID), "peer_id", string(peerID), @@ -895,7 +895,7 @@ func (r *Router) sendPeer(ctx context.Context, peerID types.NodeID, conn Connect continue } - if err = conn.SendMessage(ctx, envelope.channelID, bz); err != nil { + if err = conn.SendMessage(ctx, envelope.ChannelID, bz); err != nil { return err } diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index a6d5fdc03..e4d78529a 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -28,17 +28,14 @@ import ( ) func echoReactor(ctx context.Context, channel *p2p.Channel) { - for { - select { - case envelope := <-channel.In: - value := envelope.Message.(*p2ptest.Message).Value - if err := channel.Send(ctx, p2p.Envelope{ - To: envelope.From, - Message: &p2ptest.Message{Value: value}, - }); err != nil { - return - } - case <-ctx.Done(): + iter := channel.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + value := envelope.Message.(*p2ptest.Message).Value + if err := channel.Send(ctx, p2p.Envelope{ + To: envelope.From, + Message: &p2ptest.Message{Value: value}, + }); err != nil { return } } @@ -76,14 +73,15 @@ func TestRouter_Network(t *testing.T) { Broadcast: true, Message: &p2ptest.Message{Value: "bar"}, }) - expect := []p2p.Envelope{} + expect := []*p2p.Envelope{} for _, peer := range peers { - expect = append(expect, p2p.Envelope{ - From: peer.NodeID, - Message: &p2ptest.Message{Value: "bar"}, + expect = append(expect, &p2p.Envelope{ + From: peer.NodeID, + ChannelID: 1, + Message: &p2ptest.Message{Value: "bar"}, }) } - p2ptest.RequireReceiveUnordered(t, channel, expect) + p2ptest.RequireReceiveUnordered(ctx, t, channel, expect) // We then submit an error for a peer, and watch it get disconnected and // then reconnected as the router retries it. @@ -162,7 +160,7 @@ func TestRouter_Channel_Basic(t *testing.T) { To: selfID, Message: &p2ptest.Message{Value: "self"}, }) - p2ptest.RequireEmpty(t, channel) + p2ptest.RequireEmpty(ctx, t, channel) } // Channel tests are hairy to mock, so we use an in-memory network instead. @@ -186,45 +184,45 @@ func TestRouter_Channel_SendReceive(t *testing.T) { // Sending a message a->b should work, and not send anything // further to a, b, or c. p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: &p2ptest.Message{Value: "foo"}}) - p2ptest.RequireReceive(t, b, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "foo"}}) - p2ptest.RequireEmpty(t, a, b, c) + p2ptest.RequireReceive(ctx, t, b, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "foo"}}) + p2ptest.RequireEmpty(ctx, t, a, b, c) // Sending a nil message a->b should be dropped. p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: nil}) - p2ptest.RequireEmpty(t, a, b, c) + p2ptest.RequireEmpty(ctx, t, a, b, c) // Sending a different message type should be dropped. p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: &gogotypes.BoolValue{Value: true}}) - p2ptest.RequireEmpty(t, a, b, c) + p2ptest.RequireEmpty(ctx, t, a, b, c) // Sending to an unknown peer should be dropped. p2ptest.RequireSend(ctx, t, a, p2p.Envelope{ To: types.NodeID(strings.Repeat("a", 40)), Message: &p2ptest.Message{Value: "a"}, }) - p2ptest.RequireEmpty(t, a, b, c) + p2ptest.RequireEmpty(ctx, t, a, b, c) // Sending without a recipient should be dropped. p2ptest.RequireSend(ctx, t, a, p2p.Envelope{Message: &p2ptest.Message{Value: "noto"}}) - p2ptest.RequireEmpty(t, a, b, c) + p2ptest.RequireEmpty(ctx, t, a, b, c) // Sending to self should be dropped. p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: aID, Message: &p2ptest.Message{Value: "self"}}) - p2ptest.RequireEmpty(t, a, b, c) + p2ptest.RequireEmpty(ctx, t, a, b, c) // Removing b and sending to it should be dropped. network.Remove(ctx, t, bID) p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: &p2ptest.Message{Value: "nob"}}) - p2ptest.RequireEmpty(t, a, b, c) + p2ptest.RequireEmpty(ctx, t, a, b, c) // After all this, sending a message c->a should work. p2ptest.RequireSend(ctx, t, c, p2p.Envelope{To: aID, Message: &p2ptest.Message{Value: "bar"}}) - p2ptest.RequireReceive(t, a, p2p.Envelope{From: cID, Message: &p2ptest.Message{Value: "bar"}}) - p2ptest.RequireEmpty(t, a, b, c) + p2ptest.RequireReceive(ctx, t, a, p2p.Envelope{From: cID, Message: &p2ptest.Message{Value: "bar"}}) + p2ptest.RequireEmpty(ctx, t, a, b, c) // None of these messages should have made it onto the other channels. for _, other := range otherChannels { - p2ptest.RequireEmpty(t, other) + p2ptest.RequireEmpty(ctx, t, other) } } @@ -246,17 +244,17 @@ func TestRouter_Channel_Broadcast(t *testing.T) { // Sending a broadcast from b should work. p2ptest.RequireSend(ctx, t, b, p2p.Envelope{Broadcast: true, Message: &p2ptest.Message{Value: "foo"}}) - p2ptest.RequireReceive(t, a, p2p.Envelope{From: bID, Message: &p2ptest.Message{Value: "foo"}}) - p2ptest.RequireReceive(t, c, p2p.Envelope{From: bID, Message: &p2ptest.Message{Value: "foo"}}) - p2ptest.RequireReceive(t, d, p2p.Envelope{From: bID, Message: &p2ptest.Message{Value: "foo"}}) - p2ptest.RequireEmpty(t, a, b, c, d) + p2ptest.RequireReceive(ctx, t, a, p2p.Envelope{From: bID, Message: &p2ptest.Message{Value: "foo"}}) + p2ptest.RequireReceive(ctx, t, c, p2p.Envelope{From: bID, Message: &p2ptest.Message{Value: "foo"}}) + p2ptest.RequireReceive(ctx, t, d, p2p.Envelope{From: bID, Message: &p2ptest.Message{Value: "foo"}}) + p2ptest.RequireEmpty(ctx, t, a, b, c, d) // Removing one node from the network shouldn't prevent broadcasts from working. network.Remove(ctx, t, dID) p2ptest.RequireSend(ctx, t, a, p2p.Envelope{Broadcast: true, Message: &p2ptest.Message{Value: "bar"}}) - p2ptest.RequireReceive(t, b, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "bar"}}) - p2ptest.RequireReceive(t, c, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "bar"}}) - p2ptest.RequireEmpty(t, a, b, c, d) + p2ptest.RequireReceive(ctx, t, b, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "bar"}}) + p2ptest.RequireReceive(ctx, t, c, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "bar"}}) + p2ptest.RequireEmpty(ctx, t, a, b, c, d) } func TestRouter_Channel_Wrapper(t *testing.T) { @@ -287,11 +285,11 @@ func TestRouter_Channel_Wrapper(t *testing.T) { // should automatically wrap and unwrap sent messages -- we prepend the // wrapper actions to the message value to signal this. p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: &p2ptest.Message{Value: "foo"}}) - p2ptest.RequireReceive(t, b, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "unwrap:wrap:foo"}}) + p2ptest.RequireReceive(ctx, t, b, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "unwrap:wrap:foo"}}) // If we send a different message that can't be wrapped, it should be dropped. p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: &gogotypes.BoolValue{Value: true}}) - p2ptest.RequireEmpty(t, b) + p2ptest.RequireEmpty(ctx, t, b) // If we send the wrapper message itself, it should also be passed through // since WrapperMessage supports it, and should only be unwrapped at the receiver. @@ -299,7 +297,7 @@ func TestRouter_Channel_Wrapper(t *testing.T) { To: bID, Message: &wrapperMessage{Message: p2ptest.Message{Value: "foo"}}, }) - p2ptest.RequireReceive(t, b, p2p.Envelope{ + p2ptest.RequireReceive(ctx, t, b, p2p.Envelope{ From: aID, Message: &p2ptest.Message{Value: "unwrap:foo"}, }) diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 09716fb23..34281919e 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -503,7 +503,7 @@ func (r *Reactor) backfill( // handleSnapshotMessage handles envelopes sent from peers on the // SnapshotChannel. It returns an error only if the Envelope.Message is unknown // for this channel. This should never be called outside of handleMessage. -func (r *Reactor) handleSnapshotMessage(ctx context.Context, envelope p2p.Envelope) error { +func (r *Reactor) handleSnapshotMessage(ctx context.Context, envelope *p2p.Envelope) error { logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { @@ -575,7 +575,7 @@ func (r *Reactor) handleSnapshotMessage(ctx context.Context, envelope p2p.Envelo // handleChunkMessage handles envelopes sent from peers on the ChunkChannel. // It returns an error only if the Envelope.Message is unknown for this channel. // This should never be called outside of handleMessage. -func (r *Reactor) handleChunkMessage(ctx context.Context, envelope p2p.Envelope) error { +func (r *Reactor) handleChunkMessage(ctx context.Context, envelope *p2p.Envelope) error { switch msg := envelope.Message.(type) { case *ssproto.ChunkRequest: r.logger.Debug( @@ -664,7 +664,7 @@ func (r *Reactor) handleChunkMessage(ctx context.Context, envelope p2p.Envelope) return nil } -func (r *Reactor) handleLightBlockMessage(ctx context.Context, envelope p2p.Envelope) error { +func (r *Reactor) handleLightBlockMessage(ctx context.Context, envelope *p2p.Envelope) error { switch msg := envelope.Message.(type) { case *ssproto.LightBlockRequest: r.logger.Info("received light block request", "height", msg.Height) @@ -718,7 +718,7 @@ func (r *Reactor) handleLightBlockMessage(ctx context.Context, envelope p2p.Enve return nil } -func (r *Reactor) handleParamsMessage(ctx context.Context, envelope p2p.Envelope) error { +func (r *Reactor) handleParamsMessage(ctx context.Context, envelope *p2p.Envelope) error { switch msg := envelope.Message.(type) { case *ssproto.ParamsRequest: r.logger.Debug("received consensus params request", "height", msg.Height) @@ -765,7 +765,7 @@ func (r *Reactor) handleParamsMessage(ctx context.Context, envelope p2p.Envelope // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope *p2p.Envelope) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) @@ -800,24 +800,20 @@ func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelop // the respective channel. When the reactor is stopped, we will catch the signal // and close the p2p Channel gracefully. func (r *Reactor) processCh(ctx context.Context, ch *p2p.Channel, chName string) { - for { - select { - case <-ctx.Done(): - r.logger.Debug("channel closed", "channel", chName) - return - case envelope := <-ch.In: - if err := r.handleMessage(ctx, ch.ID, envelope); err != nil { - r.logger.Error("failed to process message", - "err", err, - "channel", chName, - "ch_id", ch.ID, - "envelope", envelope) - if serr := ch.SendError(ctx, p2p.PeerError{ - NodeID: envelope.From, - Err: err, - }); serr != nil { - return - } + iter := ch.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + if err := r.handleMessage(ctx, ch.ID, envelope); err != nil { + r.logger.Error("failed to process message", + "err", err, + "channel", chName, + "ch_id", ch.ID, + "envelope", envelope) + if serr := ch.SendError(ctx, p2p.PeerError{ + NodeID: envelope.From, + Err: err, + }); serr != nil { + return } } } From 2b35d8191ca98cc5adbdfbe3e34f62fdfde8fe3c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Dec 2021 12:51:47 -0500 Subject: [PATCH 23/33] build(deps-dev): Bump watchpack from 2.3.0 to 2.3.1 in /docs (#7430) Bumps [watchpack](https://github.com/webpack/watchpack) from 2.3.0 to 2.3.1. - [Release notes](https://github.com/webpack/watchpack/releases) - [Commits](https://github.com/webpack/watchpack/compare/v2.3.0...v2.3.1) --- updated-dependencies: - dependency-name: watchpack dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sam Kleinman Co-authored-by: M. J. Fromberger --- docs/package-lock.json | 6 +++--- docs/package.json | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index 204ffc9ba..1a5216735 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -10069,9 +10069,9 @@ } }, "watchpack": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.3.0.tgz", - "integrity": "sha512-MnN0Q1OsvB/GGHETrFeZPQaOelWh/7O+EiFlj8sM9GPjtQkis7k01aAxrg/18kTfoIVcLL+haEVFlXDaSRwKRw==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.3.1.tgz", + "integrity": "sha512-x0t0JuydIo8qCNctdDrn1OzH/qDzk2+rdCOC3YzumZ42fiMqmQ7T3xQurykYMhYfHaPHTp4ZxAx2NfUo1K6QaA==", "dev": true, "requires": { "glob-to-regexp": "^0.4.1", diff --git a/docs/package.json b/docs/package.json index 2ae446af9..39917dad1 100644 --- a/docs/package.json +++ b/docs/package.json @@ -7,7 +7,7 @@ "vuepress-theme-cosmos": "^1.0.182" }, "devDependencies": { - "watchpack": "^2.3.0" + "watchpack": "^2.3.1" }, "scripts": { "preserve": "./pre.sh", From d0e03f01fc160e13475ea8c677d6e156c12adf54 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Mon, 13 Dec 2021 13:35:32 -0500 Subject: [PATCH 24/33] sync: remove special mutexes (#7438) --- abci/client/client.go | 3 +-- abci/client/creators.go | 4 ++-- abci/client/grpc_client.go | 3 +-- abci/client/local_client.go | 8 ++++---- abci/client/socket_client.go | 4 ++-- abci/server/socket_server.go | 6 +++--- internal/blocksync/pool.go | 6 +++--- internal/consensus/byzantine_test.go | 3 +-- internal/consensus/common_test.go | 3 +-- internal/consensus/peer_state.go | 2 +- internal/consensus/reactor.go | 4 ++-- internal/consensus/reactor_test.go | 3 +-- internal/consensus/state.go | 4 ++-- internal/evidence/reactor.go | 2 +- internal/libs/clist/clist.go | 6 ++---- internal/libs/flowrate/flowrate.go | 5 ++--- internal/libs/sync/deadlock.go | 18 ------------------ internal/libs/sync/sync.go | 16 ---------------- internal/libs/tempfile/tempfile.go | 5 ++--- internal/libs/timer/throttle_timer.go | 5 ++--- internal/libs/timer/throttle_timer_test.go | 5 ++--- internal/mempool/cache.go | 4 ++-- internal/mempool/ids.go | 4 ++-- internal/mempool/mempool.go | 4 ++-- internal/mempool/priority_queue.go | 5 ++--- internal/mempool/reactor.go | 2 +- internal/mempool/tx.go | 6 +++--- internal/p2p/conn/connection.go | 4 ++-- internal/p2p/conn/secret_connection.go | 6 +++--- internal/statesync/chunks.go | 4 ++-- internal/statesync/reactor.go | 4 ++-- internal/statesync/snapshots.go | 4 ++-- internal/statesync/stateprovider.go | 6 +++--- internal/statesync/syncer.go | 4 ++-- internal/statesync/syncer_test.go | 3 +-- libs/events/events.go | 8 ++++---- libs/json/structs.go | 5 ++--- libs/json/types.go | 5 ++--- light/client.go | 3 +-- light/store/db/db.go | 4 ++-- privval/secret_connection.go | 6 +++--- privval/signer_endpoint.go | 4 ++-- privval/signer_listener_endpoint.go | 4 ++-- privval/signer_server.go | 4 ++-- rpc/client/http/ws.go | 4 ++-- rpc/jsonrpc/client/http_json_client.go | 6 +++--- rpc/jsonrpc/client/ws_client.go | 3 +-- rpc/jsonrpc/client/ws_client_test.go | 4 ++-- types/block.go | 4 ++-- types/part_set.go | 4 ++-- types/vote_set.go | 4 ++-- 51 files changed, 98 insertions(+), 149 deletions(-) delete mode 100644 internal/libs/sync/deadlock.go delete mode 100644 internal/libs/sync/sync.go diff --git a/abci/client/client.go b/abci/client/client.go index 1f0017557..d588922f6 100644 --- a/abci/client/client.go +++ b/abci/client/client.go @@ -6,7 +6,6 @@ import ( "sync" "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) @@ -88,7 +87,7 @@ type ReqRes struct { *sync.WaitGroup *types.Response // Not set atomically, so be sure to use WaitGroup. - mtx tmsync.Mutex + mtx sync.Mutex done bool // Gets set to true once *after* WaitGroup.Done(). cb func(*types.Response) // A single callback that may be set. } diff --git a/abci/client/creators.go b/abci/client/creators.go index 7cabb2e43..c7220e928 100644 --- a/abci/client/creators.go +++ b/abci/client/creators.go @@ -2,9 +2,9 @@ package abciclient import ( "fmt" + "sync" "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" ) @@ -14,7 +14,7 @@ type Creator func(log.Logger) (Client, error) // NewLocalCreator returns a Creator for the given app, // which will be running locally. func NewLocalCreator(app types.Application) Creator { - mtx := new(tmsync.Mutex) + mtx := new(sync.Mutex) return func(_ log.Logger) (Client, error) { return NewLocalClient(mtx, app), nil diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index 3f5da63f7..ee35646f9 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -10,7 +10,6 @@ import ( "google.golang.org/grpc" "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" @@ -27,7 +26,7 @@ type grpcClient struct { conn *grpc.ClientConn chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool - mtx tmsync.Mutex + mtx sync.Mutex addr string err error resCb func(*types.Request, *types.Response) // listens to all callbacks diff --git a/abci/client/local_client.go b/abci/client/local_client.go index f534a1716..8f2fab4e7 100644 --- a/abci/client/local_client.go +++ b/abci/client/local_client.go @@ -2,9 +2,9 @@ package abciclient import ( "context" + "sync" types "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/service" ) @@ -15,7 +15,7 @@ import ( type localClient struct { service.BaseService - mtx *tmsync.Mutex + mtx *sync.Mutex types.Application Callback } @@ -26,9 +26,9 @@ var _ Client = (*localClient)(nil) // methods of the given app. // // Both Async and Sync methods ignore the given context.Context parameter. -func NewLocalClient(mtx *tmsync.Mutex, app types.Application) Client { +func NewLocalClient(mtx *sync.Mutex, app types.Application) Client { if mtx == nil { - mtx = new(tmsync.Mutex) + mtx = new(sync.Mutex) } cli := &localClient{ mtx: mtx, diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index 562124e6c..84a851f4d 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -9,10 +9,10 @@ import ( "io" "net" "reflect" + "sync" "time" "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" @@ -41,7 +41,7 @@ type socketClient struct { reqQueue chan *reqResWithContext - mtx tmsync.Mutex + mtx sync.Mutex err error reqSent *list.List // list of requests sent, waiting for response resCb func(*types.Request, *types.Response) // called on all requests, if set. diff --git a/abci/server/socket_server.go b/abci/server/socket_server.go index dd71a5df8..eb959b5b4 100644 --- a/abci/server/socket_server.go +++ b/abci/server/socket_server.go @@ -7,9 +7,9 @@ import ( "io" "net" "runtime" + "sync" "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" @@ -25,11 +25,11 @@ type SocketServer struct { addr string listener net.Listener - connsMtx tmsync.Mutex + connsMtx sync.Mutex conns map[int]net.Conn nextConnID int - appMtx tmsync.Mutex + appMtx sync.Mutex app types.Application } diff --git a/internal/blocksync/pool.go b/internal/blocksync/pool.go index 4db0fd900..a06c841fc 100644 --- a/internal/blocksync/pool.go +++ b/internal/blocksync/pool.go @@ -5,11 +5,11 @@ import ( "errors" "fmt" "math" + "sync" "sync/atomic" "time" "github.com/tendermint/tendermint/internal/libs/flowrate" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/types" @@ -73,7 +73,7 @@ type BlockPool struct { lastAdvance time.Time - mtx tmsync.RWMutex + mtx sync.RWMutex // block requests requesters map[int64]*bpRequester height int64 // the lowest key in requesters. @@ -560,7 +560,7 @@ type bpRequester struct { gotBlockCh chan struct{} redoCh chan types.NodeID // redo may send multitime, add peerId to identify repeat - mtx tmsync.Mutex + mtx sync.Mutex peerID types.NodeID block *types.Block } diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index 3133e3659..a14af999b 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -16,7 +16,6 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/evidence" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" sm "github.com/tendermint/tendermint/internal/state" @@ -68,7 +67,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { blockStore := store.NewBlockStore(blockDB) // one for mempool, one for consensus - mtx := new(tmsync.Mutex) + mtx := new(sync.Mutex) proxyAppConnMem := abciclient.NewLocalClient(mtx, app) proxyAppConnCon := abciclient.NewLocalClient(mtx, app) diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index 27f9628d1..b8548cdc8 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -21,7 +21,6 @@ import ( "github.com/tendermint/tendermint/config" cstypes "github.com/tendermint/tendermint/internal/consensus/types" "github.com/tendermint/tendermint/internal/eventbus" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" @@ -440,7 +439,7 @@ func newStateWithConfigAndBlockStore( blockStore *store.BlockStore, ) *State { // one for mempool, one for consensus - mtx := new(tmsync.Mutex) + mtx := new(sync.Mutex) proxyAppConnMem := abciclient.NewLocalClient(mtx, app) proxyAppConnCon := abciclient.NewLocalClient(mtx, app) diff --git a/internal/consensus/peer_state.go b/internal/consensus/peer_state.go index 6a64e8e10..ada4b270e 100644 --- a/internal/consensus/peer_state.go +++ b/internal/consensus/peer_state.go @@ -40,7 +40,7 @@ type PeerState struct { logger log.Logger // NOTE: Modify below using setters, never directly. - mtx tmsync.RWMutex + mtx sync.RWMutex running bool PRS cstypes.PeerRoundState `json:"round_state"` Stats *peerStateStats `json:"stats"` diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index ad6a108be..e7ec24159 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -5,11 +5,11 @@ import ( "errors" "fmt" "runtime/debug" + "sync" "time" cstypes "github.com/tendermint/tendermint/internal/consensus/types" "github.com/tendermint/tendermint/internal/eventbus" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/bits" @@ -117,7 +117,7 @@ type Reactor struct { eventBus *eventbus.EventBus Metrics *Metrics - mtx tmsync.RWMutex + mtx sync.RWMutex peers map[types.NodeID]*PeerState waitSync bool diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index 1788c0d20..ff218cb5f 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -21,7 +21,6 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/internal/eventbus" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" @@ -392,7 +391,7 @@ func TestReactorWithEvidence(t *testing.T) { blockStore := store.NewBlockStore(blockDB) // one for mempool, one for consensus - mtx := new(tmsync.Mutex) + mtx := new(sync.Mutex) proxyAppConnMem := abciclient.NewLocalClient(mtx, app) proxyAppConnCon := abciclient.NewLocalClient(mtx, app) diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 051b7afba..f45088352 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -8,6 +8,7 @@ import ( "io" "os" "runtime/debug" + "sync" "time" "github.com/gogo/protobuf/proto" @@ -17,7 +18,6 @@ import ( cstypes "github.com/tendermint/tendermint/internal/consensus/types" "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/libs/fail" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" sm "github.com/tendermint/tendermint/internal/state" tmevents "github.com/tendermint/tendermint/libs/events" tmjson "github.com/tendermint/tendermint/libs/json" @@ -100,7 +100,7 @@ type State struct { evpool evidencePool // internal state - mtx tmsync.RWMutex + mtx sync.RWMutex cstypes.RoundState state sm.State // State until height-1. // privValidator pubkey, memoized for the duration of one block diff --git a/internal/evidence/reactor.go b/internal/evidence/reactor.go index 62272a810..385308884 100644 --- a/internal/evidence/reactor.go +++ b/internal/evidence/reactor.go @@ -53,7 +53,7 @@ type Reactor struct { peerWG sync.WaitGroup - mtx tmsync.Mutex + mtx sync.Mutex peerRoutines map[types.NodeID]*tmsync.Closer } diff --git a/internal/libs/clist/clist.go b/internal/libs/clist/clist.go index 6cf515706..145c4e4f1 100644 --- a/internal/libs/clist/clist.go +++ b/internal/libs/clist/clist.go @@ -14,8 +14,6 @@ to ensure garbage collection of removed elements. import ( "fmt" "sync" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" ) // MaxLength is the max allowed number of elements a linked list is @@ -44,7 +42,7 @@ waiting on NextWait() (since it's just a read operation). */ type CElement struct { - mtx tmsync.RWMutex + mtx sync.RWMutex prev *CElement prevWg *sync.WaitGroup prevWaitCh chan struct{} @@ -220,7 +218,7 @@ func (e *CElement) SetRemoved() { // Operations are goroutine-safe. // Panics if length grows beyond the max. type CList struct { - mtx tmsync.RWMutex + mtx sync.RWMutex wg *sync.WaitGroup waitCh chan struct{} head *CElement // first element diff --git a/internal/libs/flowrate/flowrate.go b/internal/libs/flowrate/flowrate.go index 522c46cc7..2a053805c 100644 --- a/internal/libs/flowrate/flowrate.go +++ b/internal/libs/flowrate/flowrate.go @@ -8,14 +8,13 @@ package flowrate import ( "math" + "sync" "time" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" ) // Monitor monitors and limits the transfer rate of a data stream. type Monitor struct { - mu tmsync.Mutex // Mutex guarding access to all internal fields + mu sync.Mutex // Mutex guarding access to all internal fields active bool // Flag indicating an active transfer start time.Duration // Transfer start time (clock() value) bytes int64 // Total number of bytes transferred diff --git a/internal/libs/sync/deadlock.go b/internal/libs/sync/deadlock.go deleted file mode 100644 index 21b5130ba..000000000 --- a/internal/libs/sync/deadlock.go +++ /dev/null @@ -1,18 +0,0 @@ -//go:build deadlock -// +build deadlock - -package sync - -import ( - deadlock "github.com/sasha-s/go-deadlock" -) - -// A Mutex is a mutual exclusion lock. -type Mutex struct { - deadlock.Mutex -} - -// An RWMutex is a reader/writer mutual exclusion lock. -type RWMutex struct { - deadlock.RWMutex -} diff --git a/internal/libs/sync/sync.go b/internal/libs/sync/sync.go deleted file mode 100644 index c6e7101c6..000000000 --- a/internal/libs/sync/sync.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !deadlock -// +build !deadlock - -package sync - -import "sync" - -// A Mutex is a mutual exclusion lock. -type Mutex struct { - sync.Mutex -} - -// An RWMutex is a reader/writer mutual exclusion lock. -type RWMutex struct { - sync.RWMutex -} diff --git a/internal/libs/tempfile/tempfile.go b/internal/libs/tempfile/tempfile.go index 0c594bb20..e30d5a8c6 100644 --- a/internal/libs/tempfile/tempfile.go +++ b/internal/libs/tempfile/tempfile.go @@ -7,9 +7,8 @@ import ( "path/filepath" "strconv" "strings" + "sync" "time" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" ) const ( @@ -32,7 +31,7 @@ const ( var ( atomicWriteFileRand uint64 - atomicWriteFileRandMu tmsync.Mutex + atomicWriteFileRandMu sync.Mutex ) func writeFileRandReseed() uint64 { diff --git a/internal/libs/timer/throttle_timer.go b/internal/libs/timer/throttle_timer.go index 3f21e3cc0..76db87ee8 100644 --- a/internal/libs/timer/throttle_timer.go +++ b/internal/libs/timer/throttle_timer.go @@ -1,9 +1,8 @@ package timer import ( + "sync" "time" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" ) /* @@ -18,7 +17,7 @@ type ThrottleTimer struct { quit chan struct{} dur time.Duration - mtx tmsync.Mutex + mtx sync.Mutex timer *time.Timer isSet bool } diff --git a/internal/libs/timer/throttle_timer_test.go b/internal/libs/timer/throttle_timer_test.go index a56dcadfd..7ea392c3a 100644 --- a/internal/libs/timer/throttle_timer_test.go +++ b/internal/libs/timer/throttle_timer_test.go @@ -1,19 +1,18 @@ package timer import ( + "sync" "testing" "time" // make govet noshadow happy... asrt "github.com/stretchr/testify/assert" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" ) type thCounter struct { input chan struct{} - mtx tmsync.Mutex + mtx sync.Mutex count int } diff --git a/internal/mempool/cache.go b/internal/mempool/cache.go index 3cd45d2bc..c69fc80dd 100644 --- a/internal/mempool/cache.go +++ b/internal/mempool/cache.go @@ -2,8 +2,8 @@ package mempool import ( "container/list" + "sync" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/types" ) @@ -29,7 +29,7 @@ var _ TxCache = (*LRUTxCache)(nil) // LRUTxCache maintains a thread-safe LRU cache of raw transactions. The cache // only stores the hash of the raw transaction. type LRUTxCache struct { - mtx tmsync.Mutex + mtx sync.Mutex size int cacheMap map[types.TxKey]*list.Element list *list.List diff --git a/internal/mempool/ids.go b/internal/mempool/ids.go index 656f5b74c..3788afcbc 100644 --- a/internal/mempool/ids.go +++ b/internal/mempool/ids.go @@ -2,13 +2,13 @@ package mempool import ( "fmt" + "sync" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/types" ) type IDs struct { - mtx tmsync.RWMutex + mtx sync.RWMutex peerMap map[types.NodeID]uint16 nextID uint16 // assumes that a node will never have over 65536 active peers activeIDs map[uint16]struct{} // used to check if a given peerID key is used diff --git a/internal/mempool/mempool.go b/internal/mempool/mempool.go index f5d1c926d..82aa3d7c7 100644 --- a/internal/mempool/mempool.go +++ b/internal/mempool/mempool.go @@ -6,13 +6,13 @@ import ( "errors" "fmt" "reflect" + "sync" "sync/atomic" "time" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/clist" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" @@ -86,7 +86,7 @@ type TxMempool struct { // from the mempool. A read-lock is implicitly acquired when executing CheckTx, // however, a caller must explicitly grab a write-lock via Lock when updating // the mempool via Update(). - mtx tmsync.RWMutex + mtx sync.RWMutex preCheck PreCheckFunc postCheck PostCheckFunc } diff --git a/internal/mempool/priority_queue.go b/internal/mempool/priority_queue.go index f59715abb..e31997397 100644 --- a/internal/mempool/priority_queue.go +++ b/internal/mempool/priority_queue.go @@ -3,15 +3,14 @@ package mempool import ( "container/heap" "sort" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "sync" ) var _ heap.Interface = (*TxPriorityQueue)(nil) // TxPriorityQueue defines a thread-safe priority queue for valid transactions. type TxPriorityQueue struct { - mtx tmsync.RWMutex + mtx sync.RWMutex txs []*WrappedTx } diff --git a/internal/mempool/reactor.go b/internal/mempool/reactor.go index 7119cdbbb..14b52e917 100644 --- a/internal/mempool/reactor.go +++ b/internal/mempool/reactor.go @@ -57,7 +57,7 @@ type Reactor struct { // Reactor. observePanic is called with the recovered value. observePanic func(interface{}) - mtx tmsync.Mutex + mtx sync.Mutex peerRoutines map[types.NodeID]*tmsync.Closer } diff --git a/internal/mempool/tx.go b/internal/mempool/tx.go index af48c9ccc..c7113c951 100644 --- a/internal/mempool/tx.go +++ b/internal/mempool/tx.go @@ -2,10 +2,10 @@ package mempool import ( "sort" + "sync" "time" "github.com/tendermint/tendermint/internal/libs/clist" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/types" ) @@ -76,7 +76,7 @@ func (wtx *WrappedTx) Size() int { // access is not allowed. Regardless, it is not expected for the mempool to // need mutative access. type TxStore struct { - mtx tmsync.RWMutex + mtx sync.RWMutex hashTxs map[types.TxKey]*WrappedTx // primary index senderTxs map[string]*WrappedTx // sender is defined by the ABCI application } @@ -217,7 +217,7 @@ func (txs *TxStore) GetOrSetPeerByTxHash(hash types.TxKey, peerID uint16) (*Wrap // references which is used during Insert in order to determine sorted order. If // less returns true, a <= b. type WrappedTxList struct { - mtx tmsync.RWMutex + mtx sync.RWMutex txs []*WrappedTx less func(*WrappedTx, *WrappedTx) bool } diff --git a/internal/p2p/conn/connection.go b/internal/p2p/conn/connection.go index fa21358c1..a51585d3f 100644 --- a/internal/p2p/conn/connection.go +++ b/internal/p2p/conn/connection.go @@ -10,6 +10,7 @@ import ( "net" "reflect" "runtime/debug" + "sync" "sync/atomic" "time" @@ -17,7 +18,6 @@ import ( "github.com/tendermint/tendermint/internal/libs/flowrate" "github.com/tendermint/tendermint/internal/libs/protoio" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/libs/timer" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" @@ -100,7 +100,7 @@ type MConnection struct { // used to ensure FlushStop and OnStop // are safe to call concurrently. - stopMtx tmsync.Mutex + stopMtx sync.Mutex cancel context.CancelFunc diff --git a/internal/p2p/conn/secret_connection.go b/internal/p2p/conn/secret_connection.go index 35fac488a..f67c89e5b 100644 --- a/internal/p2p/conn/secret_connection.go +++ b/internal/p2p/conn/secret_connection.go @@ -11,6 +11,7 @@ import ( "io" "math" "net" + "sync" "time" gogotypes "github.com/gogo/protobuf/types" @@ -25,7 +26,6 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/internal/libs/protoio" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/async" tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ) @@ -76,11 +76,11 @@ type SecretConnection struct { // are independent, so we can use two mtxs. // All .Read are covered by recvMtx, // all .Write are covered by sendMtx. - recvMtx tmsync.Mutex + recvMtx sync.Mutex recvBuffer []byte recvNonce *[aeadNonceSize]byte - sendMtx tmsync.Mutex + sendMtx sync.Mutex sendNonce *[aeadNonceSize]byte } diff --git a/internal/statesync/chunks.go b/internal/statesync/chunks.go index 2075adae5..6f6387637 100644 --- a/internal/statesync/chunks.go +++ b/internal/statesync/chunks.go @@ -6,9 +6,9 @@ import ( "os" "path/filepath" "strconv" + "sync" "time" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/types" ) @@ -28,7 +28,7 @@ type chunk struct { // iterator over all chunks, but callers can request chunks to be retried, optionally after // refetching. type chunkQueue struct { - tmsync.Mutex + sync.Mutex snapshot *snapshot // if this is nil, the queue has been closed dir string // temp dir for on-disk chunk storage chunkFiles map[uint32]string // path to temporary chunk file diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 34281919e..b161225a8 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -8,11 +8,11 @@ import ( "reflect" "runtime/debug" "sort" + "sync" "time" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" @@ -151,7 +151,7 @@ type Reactor struct { // These will only be set when a state sync is in progress. It is used to feed // received snapshots and chunks into the syncer and manage incoming and outgoing // providers. - mtx tmsync.RWMutex + mtx sync.RWMutex syncer *syncer providers map[types.NodeID]*BlockProvider stateProvider StateProvider diff --git a/internal/statesync/snapshots.go b/internal/statesync/snapshots.go index a0620e450..0e3bbb47a 100644 --- a/internal/statesync/snapshots.go +++ b/internal/statesync/snapshots.go @@ -6,8 +6,8 @@ import ( "math/rand" "sort" "strings" + "sync" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/types" ) @@ -41,7 +41,7 @@ func (s *snapshot) Key() snapshotKey { // snapshotPool discovers and aggregates snapshots across peers. type snapshotPool struct { - tmsync.Mutex + sync.Mutex snapshots map[snapshotKey]*snapshot snapshotPeers map[snapshotKey]map[types.NodeID]types.NodeID diff --git a/internal/statesync/stateprovider.go b/internal/statesync/stateprovider.go index 4f398ce77..b798eb9ad 100644 --- a/internal/statesync/stateprovider.go +++ b/internal/statesync/stateprovider.go @@ -6,11 +6,11 @@ import ( "errors" "fmt" "strings" + "sync" "time" dbm "github.com/tendermint/tm-db" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" @@ -40,7 +40,7 @@ type StateProvider interface { } type stateProviderRPC struct { - tmsync.Mutex // light.Client is not concurrency-safe + sync.Mutex // light.Client is not concurrency-safe lc *light.Client initialHeight int64 providers map[lightprovider.Provider]string @@ -197,7 +197,7 @@ func rpcClient(server string) (*rpchttp.HTTP, error) { } type stateProviderP2P struct { - tmsync.Mutex // light.Client is not concurrency-safe + sync.Mutex // light.Client is not concurrency-safe lc *light.Client initialHeight int64 paramsSendCh *p2p.Channel diff --git a/internal/statesync/syncer.go b/internal/statesync/syncer.go index b5ea158a4..a09d84d9f 100644 --- a/internal/statesync/syncer.go +++ b/internal/statesync/syncer.go @@ -5,11 +5,11 @@ import ( "context" "errors" "fmt" + "sync" "time" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" @@ -63,7 +63,7 @@ type syncer struct { fetchers int32 retryTimeout time.Duration - mtx tmsync.RWMutex + mtx sync.RWMutex chunks *chunkQueue metrics *Metrics diff --git a/internal/statesync/syncer_test.go b/internal/statesync/syncer_test.go index bd4640fe0..2e8556f68 100644 --- a/internal/statesync/syncer_test.go +++ b/internal/statesync/syncer_test.go @@ -12,7 +12,6 @@ import ( "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/proxy" proxymocks "github.com/tendermint/tendermint/internal/proxy/mocks" sm "github.com/tendermint/tendermint/internal/state" @@ -132,7 +131,7 @@ func TestSyncer_SyncAny(t *testing.T) { }).Times(2).Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, nil) chunkRequests := make(map[uint32]int) - chunkRequestsMtx := tmsync.Mutex{} + chunkRequestsMtx := sync.Mutex{} var wg sync.WaitGroup wg.Add(4) diff --git a/libs/events/events.go b/libs/events/events.go index 29ebd672f..b5b6f76df 100644 --- a/libs/events/events.go +++ b/libs/events/events.go @@ -4,8 +4,8 @@ package events import ( "context" "fmt" + "sync" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/service" ) @@ -56,7 +56,7 @@ type EventSwitch interface { type eventSwitch struct { service.BaseService - mtx tmsync.RWMutex + mtx sync.RWMutex eventCells map[string]*eventCell listeners map[string]*eventListener } @@ -166,7 +166,7 @@ func (evsw *eventSwitch) FireEvent(ctx context.Context, event string, data Event // eventCell handles keeping track of listener callbacks for a given event. type eventCell struct { - mtx tmsync.RWMutex + mtx sync.RWMutex listeners map[string]EventCallback } @@ -213,7 +213,7 @@ type EventCallback func(ctx context.Context, data EventData) error type eventListener struct { id string - mtx tmsync.RWMutex + mtx sync.RWMutex removed bool events []string } diff --git a/libs/json/structs.go b/libs/json/structs.go index b9521114a..b20873c33 100644 --- a/libs/json/structs.go +++ b/libs/json/structs.go @@ -4,9 +4,8 @@ import ( "fmt" "reflect" "strings" + "sync" "unicode" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" ) var ( @@ -16,7 +15,7 @@ var ( // structCache is a cache of struct info. type structInfoCache struct { - tmsync.RWMutex + sync.RWMutex structInfos map[reflect.Type]*structInfo } diff --git a/libs/json/types.go b/libs/json/types.go index 9f21e81eb..9c9493056 100644 --- a/libs/json/types.go +++ b/libs/json/types.go @@ -4,8 +4,7 @@ import ( "errors" "fmt" "reflect" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "sync" ) var ( @@ -39,7 +38,7 @@ type typeInfo struct { // types is a type registry. It is safe for concurrent use. type types struct { - tmsync.RWMutex + sync.RWMutex byType map[reflect.Type]*typeInfo byName map[string]*typeInfo } diff --git a/light/client.go b/light/client.go index 866de7627..99a44f498 100644 --- a/light/client.go +++ b/light/client.go @@ -9,7 +9,6 @@ import ( "sync" "time" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/light/provider" @@ -134,7 +133,7 @@ type Client struct { providerTimeout time.Duration // Mutex for locking during changes of the light clients providers - providerMutex tmsync.Mutex + providerMutex sync.Mutex // Primary provider of new headers. primary provider.Provider // Providers used to "witness" new headers. diff --git a/light/store/db/db.go b/light/store/db/db.go index acfda1f79..c364e1709 100644 --- a/light/store/db/db.go +++ b/light/store/db/db.go @@ -3,11 +3,11 @@ package db import ( "encoding/binary" "fmt" + "sync" "github.com/google/orderedcode" dbm "github.com/tendermint/tm-db" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/light/store" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -21,7 +21,7 @@ const ( type dbs struct { db dbm.DB - mtx tmsync.RWMutex + mtx sync.RWMutex size uint16 } diff --git a/privval/secret_connection.go b/privval/secret_connection.go index ffa5d36ed..9192c3114 100644 --- a/privval/secret_connection.go +++ b/privval/secret_connection.go @@ -11,6 +11,7 @@ import ( "io" "math" "net" + "sync" "time" gogotypes "github.com/gogo/protobuf/types" @@ -25,7 +26,6 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/internal/libs/protoio" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/async" tmprivval "github.com/tendermint/tendermint/proto/tendermint/privval" ) @@ -80,11 +80,11 @@ type SecretConnection struct { // are independent, so we can use two mtxs. // All .Read are covered by recvMtx, // all .Write are covered by sendMtx. - recvMtx tmsync.Mutex + recvMtx sync.Mutex recvBuffer []byte recvNonce *[aeadNonceSize]byte - sendMtx tmsync.Mutex + sendMtx sync.Mutex sendNonce *[aeadNonceSize]byte } diff --git a/privval/signer_endpoint.go b/privval/signer_endpoint.go index b48e79f94..5cf4f7be7 100644 --- a/privval/signer_endpoint.go +++ b/privval/signer_endpoint.go @@ -3,10 +3,10 @@ package privval import ( "fmt" "net" + "sync" "time" "github.com/tendermint/tendermint/internal/libs/protoio" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" @@ -20,7 +20,7 @@ type signerEndpoint struct { service.BaseService logger log.Logger - connMtx tmsync.Mutex + connMtx sync.Mutex conn net.Conn timeoutReadWrite time.Duration diff --git a/privval/signer_listener_endpoint.go b/privval/signer_listener_endpoint.go index 15622925d..ff2c0b7c2 100644 --- a/privval/signer_listener_endpoint.go +++ b/privval/signer_listener_endpoint.go @@ -4,9 +4,9 @@ import ( "context" "fmt" "net" + "sync" "time" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" @@ -39,7 +39,7 @@ type SignerListenerEndpoint struct { pingTimer *time.Ticker pingInterval time.Duration - instanceMtx tmsync.Mutex // Ensures instance public methods access, i.e. SendRequest + instanceMtx sync.Mutex // Ensures instance public methods access, i.e. SendRequest } // NewSignerListenerEndpoint returns an instance of SignerListenerEndpoint. diff --git a/privval/signer_server.go b/privval/signer_server.go index 4c4d6282a..e1235d5f3 100644 --- a/privval/signer_server.go +++ b/privval/signer_server.go @@ -3,8 +3,8 @@ package privval import ( "context" "io" + "sync" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/service" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" "github.com/tendermint/tendermint/types" @@ -24,7 +24,7 @@ type SignerServer struct { chainID string privVal types.PrivValidator - handlerMtx tmsync.Mutex + handlerMtx sync.Mutex validationRequestHandler ValidationRequestHandlerFunc } diff --git a/rpc/client/http/ws.go b/rpc/client/http/ws.go index dda8e4f46..320540450 100644 --- a/rpc/client/http/ws.go +++ b/rpc/client/http/ws.go @@ -5,9 +5,9 @@ import ( "errors" "fmt" "strings" + "sync" "time" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/pubsub" rpcclient "github.com/tendermint/tendermint/rpc/client" @@ -48,7 +48,7 @@ type wsEvents struct { *rpcclient.RunState ws *jsonrpcclient.WSClient - mtx tmsync.RWMutex + mtx sync.RWMutex subscriptions map[string]*wsSubscription } diff --git a/rpc/jsonrpc/client/http_json_client.go b/rpc/jsonrpc/client/http_json_client.go index 03fc19be4..45f516f04 100644 --- a/rpc/jsonrpc/client/http_json_client.go +++ b/rpc/jsonrpc/client/http_json_client.go @@ -11,9 +11,9 @@ import ( "net/http" "net/url" "strings" + "sync" "time" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) @@ -130,7 +130,7 @@ type Client struct { client *http.Client - mtx tmsync.Mutex + mtx sync.Mutex nextReqID int } @@ -304,7 +304,7 @@ type jsonRPCBufferedRequest struct { type RequestBatch struct { client *Client - mtx tmsync.Mutex + mtx sync.Mutex requests []*jsonRPCBufferedRequest } diff --git a/rpc/jsonrpc/client/ws_client.go b/rpc/jsonrpc/client/ws_client.go index 51891712f..98cff05ce 100644 --- a/rpc/jsonrpc/client/ws_client.go +++ b/rpc/jsonrpc/client/ws_client.go @@ -13,7 +13,6 @@ import ( "github.com/gorilla/websocket" metrics "github.com/rcrowley/go-metrics" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" tmclient "github.com/tendermint/tendermint/rpc/client" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) @@ -70,7 +69,7 @@ type WSClient struct { // nolint: maligned wg sync.WaitGroup - mtx tmsync.RWMutex + mtx sync.RWMutex sentLastPingAt time.Time reconnecting bool nextReqID int diff --git a/rpc/jsonrpc/client/ws_client_test.go b/rpc/jsonrpc/client/ws_client_test.go index d1d6c1fed..9cc65a758 100644 --- a/rpc/jsonrpc/client/ws_client_test.go +++ b/rpc/jsonrpc/client/ws_client_test.go @@ -6,6 +6,7 @@ import ( "net/http" "net/http/httptest" "runtime" + "sync" "testing" "time" @@ -13,7 +14,6 @@ import ( "github.com/gorilla/websocket" "github.com/stretchr/testify/require" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) @@ -22,7 +22,7 @@ var wsCallTimeout = 5 * time.Second type myHandler struct { closeConnAfterRead bool - mtx tmsync.RWMutex + mtx sync.RWMutex } var upgrader = websocket.Upgrader{ diff --git a/types/block.go b/types/block.go index 2f444be74..89054e100 100644 --- a/types/block.go +++ b/types/block.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "strings" + "sync" "time" "github.com/gogo/protobuf/proto" @@ -13,7 +14,6 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/crypto/tmhash" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/bits" tmbytes "github.com/tendermint/tendermint/libs/bytes" tmmath "github.com/tendermint/tendermint/libs/math" @@ -40,7 +40,7 @@ const ( // Block defines the atomic unit of a Tendermint blockchain. type Block struct { - mtx tmsync.Mutex + mtx sync.Mutex Header `json:"header"` Data `json:"data"` diff --git a/types/part_set.go b/types/part_set.go index 3a691083f..9699f2b32 100644 --- a/types/part_set.go +++ b/types/part_set.go @@ -5,9 +5,9 @@ import ( "errors" "fmt" "io" + "sync" "github.com/tendermint/tendermint/crypto/merkle" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/bits" tmbytes "github.com/tendermint/tendermint/libs/bytes" tmjson "github.com/tendermint/tendermint/libs/json" @@ -151,7 +151,7 @@ type PartSet struct { total uint32 hash []byte - mtx tmsync.Mutex + mtx sync.Mutex parts []*Part partsBitArray *bits.BitArray count uint32 diff --git a/types/vote_set.go b/types/vote_set.go index e014ae7bb..46e6d270d 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -4,8 +4,8 @@ import ( "bytes" "fmt" "strings" + "sync" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/bits" tmjson "github.com/tendermint/tendermint/libs/json" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -65,7 +65,7 @@ type VoteSet struct { signedMsgType tmproto.SignedMsgType valSet *ValidatorSet - mtx tmsync.Mutex + mtx sync.Mutex votesBitArray *bits.BitArray votes []*Vote // Primary votes to share sum int64 // Sum of voting power for seen votes, discounting conflicts From a872dd75b7d77ccba02a885f77bd4fc123200bd4 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 14 Dec 2021 00:27:11 -0800 Subject: [PATCH 25/33] Fix broken documentation link. (#7439) A follow-up to #7416 and #7412. --- README.md | 2 +- docs/tools/docker-compose.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 12ac9084b..14af3456e 100644 --- a/README.md +++ b/README.md @@ -63,7 +63,7 @@ See the [install instructions](/docs/introduction/install.md). ### Quick Start - [Single node](/docs/introduction/quick-start.md) -- [Local cluster using docker-compose](/docs/networks/docker-compose.md) +- [Local cluster using docker-compose](/docs/tools/docker-compose.md) - [Remote cluster using Terraform and Ansible](/docs/tools/terraform-and-ansible.md) - [Join the Cosmos testnet](https://cosmos.network/testnet) diff --git a/docs/tools/docker-compose.md b/docs/tools/docker-compose.md index b1592ed09..914f32bde 100644 --- a/docs/tools/docker-compose.md +++ b/docs/tools/docker-compose.md @@ -169,7 +169,7 @@ Override the [command](https://github.com/tendermint/tendermint/blob/master/netw ipv4_address: 192.167.10.2 ``` -Similarly do for node1, node2 and node3 then [run testnet](https://github.com/tendermint/tendermint/blob/master/docs/networks/docker-compose.md#run-a-testnet) +Similarly do for node1, node2 and node3 then [run testnet](#run-a-testnet). ## Logging From 2ff962a63a5c7c93f4c4a35c4f8e463f999443a6 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 14 Dec 2021 12:45:13 -0500 Subject: [PATCH 26/33] log: dissallow nil loggers (#7445) --- abci/client/creators.go | 4 ++-- abci/client/local_client.go | 5 +++-- internal/blocksync/pool.go | 6 +++--- internal/consensus/byzantine_test.go | 4 ++-- internal/consensus/common_test.go | 4 ++-- internal/consensus/reactor_test.go | 4 ++-- internal/consensus/state.go | 2 +- internal/statesync/stateprovider.go | 4 +++- libs/events/event_cache_test.go | 3 ++- libs/events/events.go | 5 +++-- libs/events/events_test.go | 17 +++++++++-------- libs/service/service.go | 4 ---- libs/service/service_test.go | 3 ++- light/proxy/proxy.go | 2 +- light/rpc/client.go | 5 +++-- 15 files changed, 38 insertions(+), 34 deletions(-) diff --git a/abci/client/creators.go b/abci/client/creators.go index c7220e928..a1b65f5fe 100644 --- a/abci/client/creators.go +++ b/abci/client/creators.go @@ -16,8 +16,8 @@ type Creator func(log.Logger) (Client, error) func NewLocalCreator(app types.Application) Creator { mtx := new(sync.Mutex) - return func(_ log.Logger) (Client, error) { - return NewLocalClient(mtx, app), nil + return func(logger log.Logger) (Client, error) { + return NewLocalClient(logger, mtx, app), nil } } diff --git a/abci/client/local_client.go b/abci/client/local_client.go index 8f2fab4e7..86e0e1d4c 100644 --- a/abci/client/local_client.go +++ b/abci/client/local_client.go @@ -5,6 +5,7 @@ import ( "sync" types "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) @@ -26,7 +27,7 @@ var _ Client = (*localClient)(nil) // methods of the given app. // // Both Async and Sync methods ignore the given context.Context parameter. -func NewLocalClient(mtx *sync.Mutex, app types.Application) Client { +func NewLocalClient(logger log.Logger, mtx *sync.Mutex, app types.Application) Client { if mtx == nil { mtx = new(sync.Mutex) } @@ -34,7 +35,7 @@ func NewLocalClient(mtx *sync.Mutex, app types.Application) Client { mtx: mtx, Application: app, } - cli.BaseService = *service.NewBaseService(nil, "localClient", cli) + cli.BaseService = *service.NewBaseService(logger, "localClient", cli) return cli } diff --git a/internal/blocksync/pool.go b/internal/blocksync/pool.go index a06c841fc..88d9acef6 100644 --- a/internal/blocksync/pool.go +++ b/internal/blocksync/pool.go @@ -418,7 +418,7 @@ func (pool *BlockPool) makeNextRequester(ctx context.Context) { return } - request := newBPRequester(pool, nextHeight) + request := newBPRequester(pool.logger, pool, nextHeight) pool.requesters[nextHeight] = request atomic.AddInt32(&pool.numPending, 1) @@ -565,7 +565,7 @@ type bpRequester struct { block *types.Block } -func newBPRequester(pool *BlockPool, height int64) *bpRequester { +func newBPRequester(logger log.Logger, pool *BlockPool, height int64) *bpRequester { bpr := &bpRequester{ logger: pool.logger, pool: pool, @@ -576,7 +576,7 @@ func newBPRequester(pool *BlockPool, height int64) *bpRequester { peerID: "", block: nil, } - bpr.BaseService = *service.NewBaseService(nil, "bpRequester", bpr) + bpr.BaseService = *service.NewBaseService(logger, "bpRequester", bpr) return bpr } diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index a14af999b..e80b79c84 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -68,8 +68,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { // one for mempool, one for consensus mtx := new(sync.Mutex) - proxyAppConnMem := abciclient.NewLocalClient(mtx, app) - proxyAppConnCon := abciclient.NewLocalClient(mtx, app) + proxyAppConnMem := abciclient.NewLocalClient(logger, mtx, app) + proxyAppConnCon := abciclient.NewLocalClient(logger, mtx, app) // Make Mempool mempool := mempool.NewTxMempool( diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index b8548cdc8..0a61c95a3 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -440,8 +440,8 @@ func newStateWithConfigAndBlockStore( ) *State { // one for mempool, one for consensus mtx := new(sync.Mutex) - proxyAppConnMem := abciclient.NewLocalClient(mtx, app) - proxyAppConnCon := abciclient.NewLocalClient(mtx, app) + proxyAppConnMem := abciclient.NewLocalClient(logger, mtx, app) + proxyAppConnCon := abciclient.NewLocalClient(logger, mtx, app) // Make Mempool diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index ff218cb5f..8b7b7eb51 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -392,8 +392,8 @@ func TestReactorWithEvidence(t *testing.T) { // one for mempool, one for consensus mtx := new(sync.Mutex) - proxyAppConnMem := abciclient.NewLocalClient(mtx, app) - proxyAppConnCon := abciclient.NewLocalClient(mtx, app) + proxyAppConnMem := abciclient.NewLocalClient(logger, mtx, app) + proxyAppConnCon := abciclient.NewLocalClient(logger, mtx, app) mempool := mempool.NewTxMempool( log.TestingLogger().With("module", "mempool"), diff --git a/internal/consensus/state.go b/internal/consensus/state.go index f45088352..e87d058eb 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -178,7 +178,7 @@ func NewState( doWALCatchup: true, wal: nilWAL{}, evpool: evpool, - evsw: tmevents.NewEventSwitch(), + evsw: tmevents.NewEventSwitch(logger), metrics: NopMetrics(), onStopCh: make(chan *cstypes.RoundState), } diff --git a/internal/statesync/stateprovider.go b/internal/statesync/stateprovider.go index b798eb9ad..dc54ee3e2 100644 --- a/internal/statesync/stateprovider.go +++ b/internal/statesync/stateprovider.go @@ -44,6 +44,7 @@ type stateProviderRPC struct { lc *light.Client initialHeight int64 providers map[lightprovider.Provider]string + logger log.Logger } // NewRPCStateProvider creates a new StateProvider using a light client and RPC clients. @@ -79,6 +80,7 @@ func NewRPCStateProvider( return nil, err } return &stateProviderRPC{ + logger: logger, lc: lc, initialHeight: initialHeight, providers: providerRemotes, @@ -176,7 +178,7 @@ func (s *stateProviderRPC) State(ctx context.Context, height uint64) (sm.State, if err != nil { return sm.State{}, fmt.Errorf("unable to create RPC client: %w", err) } - rpcclient := lightrpc.NewClient(primaryRPC, s.lc) + rpcclient := lightrpc.NewClient(s.logger, primaryRPC, s.lc) result, err := rpcclient.ConsensusParams(ctx, ¤tLightBlock.Height) if err != nil { return sm.State{}, fmt.Errorf("unable to fetch consensus parameters for height %v: %w", diff --git a/libs/events/event_cache_test.go b/libs/events/event_cache_test.go index 13ab341f6..358e53a7b 100644 --- a/libs/events/event_cache_test.go +++ b/libs/events/event_cache_test.go @@ -6,13 +6,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" ) func TestEventCache_Flush(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - evsw := NewEventSwitch() + evsw := NewEventSwitch(log.TestingLogger()) err := evsw.Start(ctx) require.NoError(t, err) diff --git a/libs/events/events.go b/libs/events/events.go index b5b6f76df..f97dfb1a1 100644 --- a/libs/events/events.go +++ b/libs/events/events.go @@ -6,6 +6,7 @@ import ( "fmt" "sync" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) @@ -61,12 +62,12 @@ type eventSwitch struct { listeners map[string]*eventListener } -func NewEventSwitch() EventSwitch { +func NewEventSwitch(logger log.Logger) EventSwitch { evsw := &eventSwitch{ eventCells: make(map[string]*eventCell), listeners: make(map[string]*eventListener), } - evsw.BaseService = *service.NewBaseService(nil, "EventSwitch", evsw) + evsw.BaseService = *service.NewBaseService(logger, "EventSwitch", evsw) return evsw } diff --git a/libs/events/events_test.go b/libs/events/events_test.go index db9385ec3..5ddf8d93d 100644 --- a/libs/events/events_test.go +++ b/libs/events/events_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/rand" ) @@ -18,7 +19,7 @@ func TestAddListenerForEventFireOnce(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - evsw := NewEventSwitch() + evsw := NewEventSwitch(log.TestingLogger()) require.NoError(t, evsw.Start(ctx)) t.Cleanup(evsw.Wait) @@ -47,7 +48,7 @@ func TestAddListenerForEventFireMany(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - evsw := NewEventSwitch() + evsw := NewEventSwitch(log.TestingLogger()) require.NoError(t, evsw.Start(ctx)) t.Cleanup(evsw.Wait) @@ -83,7 +84,7 @@ func TestAddListenerForDifferentEvents(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - evsw := NewEventSwitch() + evsw := NewEventSwitch(log.TestingLogger()) require.NoError(t, evsw.Start(ctx)) t.Cleanup(evsw.Wait) @@ -145,7 +146,7 @@ func TestAddDifferentListenerForDifferentEvents(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - evsw := NewEventSwitch() + evsw := NewEventSwitch(log.TestingLogger()) require.NoError(t, evsw.Start(ctx)) t.Cleanup(evsw.Wait) @@ -235,7 +236,7 @@ func TestAddAndRemoveListenerConcurrency(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - evsw := NewEventSwitch() + evsw := NewEventSwitch(log.TestingLogger()) require.NoError(t, evsw.Start(ctx)) t.Cleanup(evsw.Wait) @@ -284,7 +285,7 @@ func TestAddAndRemoveListener(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - evsw := NewEventSwitch() + evsw := NewEventSwitch(log.TestingLogger()) require.NoError(t, evsw.Start(ctx)) t.Cleanup(evsw.Wait) @@ -340,7 +341,7 @@ func TestAddAndRemoveListener(t *testing.T) { func TestRemoveListener(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - evsw := NewEventSwitch() + evsw := NewEventSwitch(log.TestingLogger()) require.NoError(t, evsw.Start(ctx)) t.Cleanup(evsw.Wait) @@ -397,7 +398,7 @@ func TestRemoveListener(t *testing.T) { func TestRemoveListenersAsync(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - evsw := NewEventSwitch() + evsw := NewEventSwitch(log.TestingLogger()) require.NoError(t, evsw.Start(ctx)) t.Cleanup(evsw.Wait) diff --git a/libs/service/service.go b/libs/service/service.go index 81c885ae5..d2594c3e0 100644 --- a/libs/service/service.go +++ b/libs/service/service.go @@ -104,10 +104,6 @@ type BaseService struct { // NewBaseService creates a new BaseService. func NewBaseService(logger log.Logger, name string, impl Implementation) *BaseService { - if logger == nil { - logger = log.NewNopLogger() - } - return &BaseService{ logger: logger, name: name, diff --git a/libs/service/service_test.go b/libs/service/service_test.go index 9630d358b..254491cae 100644 --- a/libs/service/service_test.go +++ b/libs/service/service_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" ) type testService struct { @@ -22,7 +23,7 @@ func TestBaseServiceWait(t *testing.T) { defer cancel() ts := &testService{} - ts.BaseService = *NewBaseService(nil, "TestService", ts) + ts.BaseService = *NewBaseService(log.TestingLogger(), "TestService", ts) err := ts.Start(ctx) require.NoError(t, err) diff --git a/light/proxy/proxy.go b/light/proxy/proxy.go index 9119d2f2a..60d542b7c 100644 --- a/light/proxy/proxy.go +++ b/light/proxy/proxy.go @@ -40,7 +40,7 @@ func NewProxy( return &Proxy{ Addr: listenAddr, Config: config, - Client: lrpc.NewClient(rpcClient, lightClient, opts...), + Client: lrpc.NewClient(logger, rpcClient, lightClient, opts...), Logger: logger, }, nil } diff --git a/light/rpc/client.go b/light/rpc/client.go index 7496d60a1..08ef27a6d 100644 --- a/light/rpc/client.go +++ b/light/rpc/client.go @@ -13,6 +13,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/merkle" tmbytes "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" service "github.com/tendermint/tendermint/libs/service" rpcclient "github.com/tendermint/tendermint/rpc/client" @@ -88,14 +89,14 @@ func DefaultMerkleKeyPathFn() KeyPathFunc { } // NewClient returns a new client. -func NewClient(next rpcclient.Client, lc LightClient, opts ...Option) *Client { +func NewClient(logger log.Logger, next rpcclient.Client, lc LightClient, opts ...Option) *Client { c := &Client{ next: next, lc: lc, prt: merkle.DefaultProofRuntime(), quitCh: make(chan struct{}), } - c.BaseService = *service.NewBaseService(nil, "Client", c) + c.BaseService = *service.NewBaseService(logger, "Client", c) for _, o := range opts { o(c) } From e3aaae570dbec3841f0b55a8260abc10f2fa5409 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 14 Dec 2021 14:56:28 -0500 Subject: [PATCH 27/33] node: minor package cleanups (#7444) --- node/node.go | 91 +++++++++++++++++++++++------------------------ node/node_test.go | 6 ++-- node/setup.go | 87 +++++++++++++------------------------------- 3 files changed, 74 insertions(+), 110 deletions(-) diff --git a/node/node.go b/node/node.go index 6ad02fbee..9878f60e5 100644 --- a/node/node.go +++ b/node/node.go @@ -171,17 +171,17 @@ func makeNode( nodeMetrics := defaultMetricsProvider(cfg.Instrumentation)(genDoc.ChainID) // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). - proxyApp, err := createAndStartProxyAppConns(ctx, clientCreator, logger, nodeMetrics.proxy) - if err != nil { - return nil, combineCloseError(err, makeCloser(closers)) + proxyApp := proxy.NewAppConns(clientCreator, logger.With("module", "proxy"), nodeMetrics.proxy) + if err := proxyApp.Start(ctx); err != nil { + return nil, fmt.Errorf("error starting proxy app connections: %v", err) } // EventBus and IndexerService must be started before the handshake because // we might need to index the txs of the replayed block as this might not have happened // when the node stopped last time (i.e. the node stopped after it saved the block // but before it indexed the txs, or, endblocker panicked) - eventBus, err := createAndStartEventBus(ctx, logger) - if err != nil { + eventBus := eventbus.NewDefault(logger.With("module", "events")) + if err := eventBus.Start(ctx); err != nil { return nil, combineCloseError(err, makeCloser(closers)) } @@ -556,8 +556,7 @@ func (n *nodeImpl) OnStart(ctx context.Context) error { n.rpcListeners = listeners } - if n.config.Instrumentation.Prometheus && - n.config.Instrumentation.PrometheusListenAddr != "" { + if n.config.Instrumentation.Prometheus && n.config.Instrumentation.PrometheusListenAddr != "" { n.prometheusSrv = n.startPrometheusServer(ctx, n.config.Instrumentation.PrometheusListenAddr) } @@ -623,50 +622,50 @@ func (n *nodeImpl) OnStart(ctx context.Context) error { n.logger.Error("failed to emit the statesync start event", "err", err) } - // FIXME: We shouldn't allow state sync to silently error out without - // bubbling up the error and gracefully shutting down the rest of the node - go func() { - n.logger.Info("starting state sync") - state, err := n.stateSyncReactor.Sync(ctx) - if err != nil { - n.logger.Error("state sync failed; shutting down this node", "err", err) - // stop the node - if err := n.Stop(); err != nil { - n.logger.Error("failed to shut down node", "err", err) - } - return + // RUN STATE SYNC NOW: + // + // TODO: Eventually this should run as part of some + // separate orchestrator + n.logger.Info("starting state sync") + ssState, err := n.stateSyncReactor.Sync(ctx) + if err != nil { + n.logger.Error("state sync failed; shutting down this node", "err", err) + // stop the node + if err := n.Stop(); err != nil { + n.logger.Error("failed to shut down node", "err", err) } + return err + } - n.consensusReactor.SetStateSyncingMetrics(0) + n.consensusReactor.SetStateSyncingMetrics(0) - if err := n.eventBus.PublishEventStateSyncStatus(ctx, - types.EventDataStateSyncStatus{ - Complete: true, - Height: state.LastBlockHeight, - }); err != nil { + if err := n.eventBus.PublishEventStateSyncStatus(ctx, + types.EventDataStateSyncStatus{ + Complete: true, + Height: ssState.LastBlockHeight, + }); err != nil { + n.logger.Error("failed to emit the statesync start event", "err", err) + return err + } - n.logger.Error("failed to emit the statesync start event", "err", err) - } + // TODO: Some form of orchestrator is needed here between the state + // advancing reactors to be able to control which one of the three + // is running + // FIXME Very ugly to have these metrics bleed through here. + n.consensusReactor.SetBlockSyncingMetrics(1) + if err := bcR.SwitchToBlockSync(ctx, ssState); err != nil { + n.logger.Error("failed to switch to block sync", "err", err) + return err + } - // TODO: Some form of orchestrator is needed here between the state - // advancing reactors to be able to control which one of the three - // is running - // FIXME Very ugly to have these metrics bleed through here. - n.consensusReactor.SetBlockSyncingMetrics(1) - if err := bcR.SwitchToBlockSync(ctx, state); err != nil { - n.logger.Error("failed to switch to block sync", "err", err) - return - } - - if err := n.eventBus.PublishEventBlockSyncStatus(ctx, - types.EventDataBlockSyncStatus{ - Complete: false, - Height: state.LastBlockHeight, - }); err != nil { - - n.logger.Error("failed to emit the block sync starting event", "err", err) - } - }() + if err := n.eventBus.PublishEventBlockSyncStatus(ctx, + types.EventDataBlockSyncStatus{ + Complete: false, + Height: ssState.LastBlockHeight, + }); err != nil { + n.logger.Error("failed to emit the block sync starting event", "err", err) + return err + } } return nil diff --git a/node/node_test.go b/node/node_test.go index d9806c9f1..666192a5c 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -20,6 +20,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/proxy" @@ -562,8 +563,9 @@ func TestNodeSetEventSink(t *testing.T) { logger := log.TestingLogger() setupTest := func(t *testing.T, conf *config.Config) []indexer.EventSink { - eventBus, err := createAndStartEventBus(ctx, logger) - require.NoError(t, err) + eventBus := eventbus.NewDefault(logger.With("module", "events")) + require.NoError(t, eventBus.Start(ctx)) + t.Cleanup(eventBus.Wait) genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) require.NoError(t, err) diff --git a/node/setup.go b/node/setup.go index 910eefad6..5e626a117 100644 --- a/node/setup.go +++ b/node/setup.go @@ -10,7 +10,6 @@ import ( dbm "github.com/tendermint/tm-db" - abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/internal/blocksync" @@ -93,29 +92,6 @@ func initDBs( return blockStore, stateDB, makeCloser(closers), nil } -func createAndStartProxyAppConns( - ctx context.Context, - clientCreator abciclient.Creator, - logger log.Logger, - metrics *proxy.Metrics, -) (proxy.AppConns, error) { - proxyApp := proxy.NewAppConns(clientCreator, logger.With("module", "proxy"), metrics) - - if err := proxyApp.Start(ctx); err != nil { - return nil, fmt.Errorf("error starting proxy app connections: %v", err) - } - - return proxyApp, nil -} - -func createAndStartEventBus(ctx context.Context, logger log.Logger) (*eventbus.EventBus, error) { - eventBus := eventbus.NewDefault(logger.With("module", "events")) - if err := eventBus.Start(ctx); err != nil { - return nil, err - } - return eventBus, nil -} - func createAndStartIndexerService( ctx context.Context, cfg *config.Config, @@ -368,21 +344,6 @@ func createConsensusReactor( return reactor, consensusState, nil } -func createTransport(logger log.Logger, cfg *config.Config) *p2p.MConnTransport { - conf := conn.DefaultMConnConfig() - conf.FlushThrottle = cfg.P2P.FlushThrottleTimeout - conf.SendRate = cfg.P2P.SendRate - conf.RecvRate = cfg.P2P.RecvRate - conf.MaxPacketMsgPayloadSize = cfg.P2P.MaxPacketMsgPayloadSize - - return p2p.NewMConnTransport( - logger, conf, []*p2p.ChannelDescriptor{}, - p2p.MConnTransportOptions{ - MaxAcceptedConnections: uint32(cfg.P2P.MaxConnections), - }, - ) -} - func createPeerManager( cfg *config.Config, dbProvider config.DBProvider, @@ -459,14 +420,25 @@ func createRouter( nodeInfo types.NodeInfo, nodeKey types.NodeKey, peerManager *p2p.PeerManager, - conf *config.Config, + cfg *config.Config, proxyApp proxy.AppConns, ) (*p2p.Router, error) { p2pLogger := logger.With("module", "p2p") - transport := createTransport(p2pLogger, conf) - ep, err := p2p.NewEndpoint(nodeKey.ID.AddressString(conf.P2P.ListenAddress)) + transportConf := conn.DefaultMConnConfig() + transportConf.FlushThrottle = cfg.P2P.FlushThrottleTimeout + transportConf.SendRate = cfg.P2P.SendRate + transportConf.RecvRate = cfg.P2P.RecvRate + transportConf.MaxPacketMsgPayloadSize = cfg.P2P.MaxPacketMsgPayloadSize + transport := p2p.NewMConnTransport( + p2pLogger, transportConf, []*p2p.ChannelDescriptor{}, + p2p.MConnTransportOptions{ + MaxAcceptedConnections: uint32(cfg.P2P.MaxConnections), + }, + ) + + ep, err := p2p.NewEndpoint(nodeKey.ID.AddressString(cfg.P2P.ListenAddress)) if err != nil { return nil, err } @@ -480,7 +452,7 @@ func createRouter( peerManager, []p2p.Transport{transport}, []p2p.Endpoint{ep}, - getRouterConfig(conf, proxyApp), + getRouterConfig(cfg, proxyApp), ) } @@ -506,14 +478,13 @@ func makeNodeInfo( genDoc *types.GenesisDoc, state sm.State, ) (types.NodeInfo, error) { + txIndexerStatus := "off" if indexer.IndexingEnabled(eventSinks) { txIndexerStatus = "on" } - bcChannel := byte(blocksync.BlockSyncChannel) - nodeInfo := types.NodeInfo{ ProtocolVersion: types.ProtocolVersion{ P2P: version.P2PProtocol, // global @@ -524,7 +495,7 @@ func makeNodeInfo( Network: genDoc.ChainID, Version: version.TMVersion, Channels: []byte{ - bcChannel, + byte(blocksync.BlockSyncChannel), byte(consensus.StateChannel), byte(consensus.DataChannel), byte(consensus.VoteChannel), @@ -547,16 +518,12 @@ func makeNodeInfo( nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) } - lAddr := cfg.P2P.ExternalAddress - - if lAddr == "" { - lAddr = cfg.P2P.ListenAddress + nodeInfo.ListenAddr = cfg.P2P.ExternalAddress + if nodeInfo.ListenAddr == "" { + nodeInfo.ListenAddr = cfg.P2P.ListenAddress } - nodeInfo.ListenAddr = lAddr - - err := nodeInfo.Validate() - return nodeInfo, err + return nodeInfo, nodeInfo.Validate() } func makeSeedNodeInfo( @@ -586,14 +553,10 @@ func makeSeedNodeInfo( nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) } - lAddr := cfg.P2P.ExternalAddress - - if lAddr == "" { - lAddr = cfg.P2P.ListenAddress + nodeInfo.ListenAddr = cfg.P2P.ExternalAddress + if nodeInfo.ListenAddr == "" { + nodeInfo.ListenAddr = cfg.P2P.ListenAddress } - nodeInfo.ListenAddr = lAddr - - err := nodeInfo.Validate() - return nodeInfo, err + return nodeInfo, nodeInfo.Validate() } From f56df58fe8a8615c75214ebe3424be3764651461 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 14 Dec 2021 16:30:06 -0500 Subject: [PATCH 28/33] testing,log: add testing.T logger connector (#7447) --- libs/log/testing.go | 56 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 50 insertions(+), 6 deletions(-) diff --git a/libs/log/testing.go b/libs/log/testing.go index 9894f6a50..b76bb77c6 100644 --- a/libs/log/testing.go +++ b/libs/log/testing.go @@ -1,10 +1,10 @@ package log import ( - "io" - "os" "sync" "testing" + + "github.com/rs/zerolog" ) var ( @@ -19,11 +19,11 @@ var ( // NOTE: // - A call to NewTestingLogger() must be made inside a test (not in the init func) // because verbose flag only set at the time of testing. +// - Repeated calls to this function within a single process will +// produce a single test log instance, and while the logger is safe +// for parallel use it it doesn't produce meaningful feedback for +// parallel tests. func TestingLogger() Logger { - return TestingLoggerWithOutput(os.Stdout) -} - -func TestingLoggerWithOutput(w io.Writer) Logger { testingLoggerMtx.Lock() defer testingLoggerMtx.Unlock() @@ -39,3 +39,47 @@ func TestingLoggerWithOutput(w io.Writer) Logger { return testingLogger } + +type testingWriter struct { + t testing.TB +} + +func (tw testingWriter) Write(in []byte) (int, error) { + tw.t.Log(string(in)) + return len(in), nil +} + +// NewTestingLogger converts a testing.T into a logging interface to +// make test failures and verbose provide better feedback associated +// with test failures. This logging instance is safe for use from +// multiple threads, but in general you should create one of these +// loggers ONCE for each *testing.T instance that you interact with. +// +// By default it collects only ERROR messages, or DEBUG messages in +// verbose mode, and relies on the underlying behavior of testing.T.Log() +func NewTestingLogger(t testing.TB) Logger { + level := LogLevelError + if testing.Verbose() { + level = LogLevelDebug + } + + return NewTestingLoggerWithLevel(t, level) +} + +// NewTestingLoggerWithLevel creates a testing logger instance at a +// specific level that wraps the behavior of testing.T.Log(). +func NewTestingLoggerWithLevel(t testing.TB, level string) Logger { + logLevel, err := zerolog.ParseLevel(level) + if err != nil { + t.Fatalf("failed to parse log level (%s): %v", level, err) + } + trace := false + if testing.Verbose() { + trace = true + } + + return defaultLogger{ + Logger: zerolog.New(newSyncWriter(testingWriter{t})).Level(logLevel), + trace: trace, + } +} From da697089d08b63355b7888bccffbe8a8c5ebcb0e Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 14 Dec 2021 14:05:42 -0800 Subject: [PATCH 29/33] Move libs/async to internal/libs/async. (#7449) --- CHANGELOG_PENDING.md | 1 + {libs => internal/libs}/async/async.go | 0 {libs => internal/libs}/async/async_test.go | 0 internal/p2p/conn/secret_connection.go | 2 +- internal/p2p/conn/secret_connection_test.go | 2 +- privval/secret_connection.go | 2 +- test/fuzz/p2p/secretconnection/read_write.go | 2 +- 7 files changed, 5 insertions(+), 4 deletions(-) rename {libs => internal/libs}/async/async.go (100%) rename {libs => internal/libs}/async/async_test.go (100%) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 503931dd8..4b3c53a4c 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -27,6 +27,7 @@ Special thanks to external contributors on this release: - Go API + - [libs/async] \#7449 Move library to internal. (@creachadair) - [pubsub] \#7231 Remove unbuffered subscriptions and rework the Subscription interface. (@creachadair) - [eventbus] \#7231 Move the EventBus type to the internal/eventbus package. (@creachadair) - [blocksync] \#7046 Remove v2 implementation of the blocksync service and recactor, which was disabled in the previous release. (@tychoish) diff --git a/libs/async/async.go b/internal/libs/async/async.go similarity index 100% rename from libs/async/async.go rename to internal/libs/async/async.go diff --git a/libs/async/async_test.go b/internal/libs/async/async_test.go similarity index 100% rename from libs/async/async_test.go rename to internal/libs/async/async_test.go diff --git a/internal/p2p/conn/secret_connection.go b/internal/p2p/conn/secret_connection.go index f67c89e5b..ad51237e4 100644 --- a/internal/p2p/conn/secret_connection.go +++ b/internal/p2p/conn/secret_connection.go @@ -25,8 +25,8 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/libs/async" "github.com/tendermint/tendermint/internal/libs/protoio" - "github.com/tendermint/tendermint/libs/async" tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ) diff --git a/internal/p2p/conn/secret_connection_test.go b/internal/p2p/conn/secret_connection_test.go index 08a7925fa..6e8db9be1 100644 --- a/internal/p2p/conn/secret_connection_test.go +++ b/internal/p2p/conn/secret_connection_test.go @@ -21,7 +21,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/sr25519" - "github.com/tendermint/tendermint/libs/async" + "github.com/tendermint/tendermint/internal/libs/async" tmrand "github.com/tendermint/tendermint/libs/rand" ) diff --git a/privval/secret_connection.go b/privval/secret_connection.go index 9192c3114..2bc927109 100644 --- a/privval/secret_connection.go +++ b/privval/secret_connection.go @@ -25,8 +25,8 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/libs/async" "github.com/tendermint/tendermint/internal/libs/protoio" - "github.com/tendermint/tendermint/libs/async" tmprivval "github.com/tendermint/tendermint/proto/tendermint/privval" ) diff --git a/test/fuzz/p2p/secretconnection/read_write.go b/test/fuzz/p2p/secretconnection/read_write.go index 9701460f5..87d547e55 100644 --- a/test/fuzz/p2p/secretconnection/read_write.go +++ b/test/fuzz/p2p/secretconnection/read_write.go @@ -7,8 +7,8 @@ import ( "log" "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/internal/libs/async" sc "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/async" ) func Fuzz(data []byte) int { From ab7da86b06233b325406d8b0fb5ff0902adec001 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 14 Dec 2021 15:14:30 -0800 Subject: [PATCH 30/33] Internalize libs/sync. (#7450) Inline the one usage of this library, and remove the lib. --- CHANGELOG_PENDING.md | 1 + internal/blocksync/reactor.go | 28 +++++++++++++++++++++++++--- libs/sync/atomic_bool.go | 33 --------------------------------- libs/sync/atomic_bool_test.go | 27 --------------------------- 4 files changed, 26 insertions(+), 63 deletions(-) delete mode 100644 libs/sync/atomic_bool.go delete mode 100644 libs/sync/atomic_bool_test.go diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 4b3c53a4c..3e54a230b 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -27,6 +27,7 @@ Special thanks to external contributors on this release: - Go API + - [libs/sync] \#7450 Internalize and remove the library. (@creachadair) - [libs/async] \#7449 Move library to internal. (@creachadair) - [pubsub] \#7231 Remove unbuffered subscriptions and rework the Subscription interface. (@creachadair) - [eventbus] \#7231 Move the EventBus type to the internal/eventbus package. (@creachadair) diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index 7ff785c81..ee745fb92 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -6,6 +6,7 @@ import ( "fmt" "runtime/debug" "sync" + "sync/atomic" "time" "github.com/tendermint/tendermint/internal/consensus" @@ -14,7 +15,6 @@ import ( "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - tmsync "github.com/tendermint/tendermint/libs/sync" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" "github.com/tendermint/tendermint/types" ) @@ -75,7 +75,7 @@ type Reactor struct { store *store.BlockStore pool *BlockPool consReactor consensusReactor - blockSync *tmsync.AtomicBool + blockSync *atomicBool blockSyncCh *p2p.Channel // blockSyncOutBridgeCh defines a channel that acts as a bridge between sending Envelope @@ -132,7 +132,7 @@ func NewReactor( store: store, pool: NewBlockPool(logger, startHeight, requestsCh, errorsCh), consReactor: consReactor, - blockSync: tmsync.NewBool(blockSync), + blockSync: newAtomicBool(blockSync), requestsCh: requestsCh, errorsCh: errorsCh, blockSyncCh: blockSyncCh, @@ -625,3 +625,25 @@ func (r *Reactor) GetRemainingSyncTime() time.Duration { return time.Duration(int64(remain * float64(time.Second))) } + +// atomicBool is an atomic Boolean, safe for concurrent use by multiple +// goroutines. +type atomicBool int32 + +// newAtomicBool creates an atomicBool with given initial value. +func newAtomicBool(ok bool) *atomicBool { + ab := new(atomicBool) + if ok { + ab.Set() + } + return ab +} + +// Set sets the Boolean to true. +func (ab *atomicBool) Set() { atomic.StoreInt32((*int32)(ab), 1) } + +// UnSet sets the Boolean to false. +func (ab *atomicBool) UnSet() { atomic.StoreInt32((*int32)(ab), 0) } + +// IsSet returns whether the Boolean is true. +func (ab *atomicBool) IsSet() bool { return atomic.LoadInt32((*int32)(ab))&1 == 1 } diff --git a/libs/sync/atomic_bool.go b/libs/sync/atomic_bool.go deleted file mode 100644 index 1a530b596..000000000 --- a/libs/sync/atomic_bool.go +++ /dev/null @@ -1,33 +0,0 @@ -package sync - -import "sync/atomic" - -// AtomicBool is an atomic Boolean. -// Its methods are all atomic, thus safe to be called by multiple goroutines simultaneously. -// Note: When embedding into a struct one should always use *AtomicBool to avoid copy. -// it's a simple implmentation from https://github.com/tevino/abool -type AtomicBool int32 - -// NewBool creates an AtomicBool with given default value. -func NewBool(ok bool) *AtomicBool { - ab := new(AtomicBool) - if ok { - ab.Set() - } - return ab -} - -// Set sets the Boolean to true. -func (ab *AtomicBool) Set() { - atomic.StoreInt32((*int32)(ab), 1) -} - -// UnSet sets the Boolean to false. -func (ab *AtomicBool) UnSet() { - atomic.StoreInt32((*int32)(ab), 0) -} - -// IsSet returns whether the Boolean is true. -func (ab *AtomicBool) IsSet() bool { - return atomic.LoadInt32((*int32)(ab))&1 == 1 -} diff --git a/libs/sync/atomic_bool_test.go b/libs/sync/atomic_bool_test.go deleted file mode 100644 index 9531815e8..000000000 --- a/libs/sync/atomic_bool_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package sync - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestDefaultValue(t *testing.T) { - t.Parallel() - v := NewBool(false) - assert.False(t, v.IsSet()) - - v = NewBool(true) - assert.True(t, v.IsSet()) -} - -func TestSetUnSet(t *testing.T) { - t.Parallel() - v := NewBool(false) - - v.Set() - assert.True(t, v.IsSet()) - - v.UnSet() - assert.False(t, v.IsSet()) -} From f3278e8b68dcd8aaa1d6aed6d5eafaf3051baadb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 Dec 2021 13:54:42 +0000 Subject: [PATCH 31/33] build(deps): Bump github.com/spf13/cobra from 1.2.1 to 1.3.0 (#7456) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.2.1 to 1.3.0.
Release notes

Sourced from github.com/spf13/cobra's releases.

v1.3.0 - The Fall 2021 release 🍁

Completion fixes & enhancements 💇🏼

In v1.2.0, we introduced a new model for completions. Thanks to everyone for trying it, giving feedback, and providing numerous fixes! Continue to work with the new model as the old one (as noted in code comments) will be deprecated in a coming release.

  • DisableFlagParsing now triggers custom completions for flag names #1161
  • Fixed unbound variables in bash completions causing edge case errors #1321
  • help completion formatting improvements & fixes #1444
  • All completions now follow the help example: short desc are now capitalized and removes extra spacing from long description #1455
  • Typo fixes in bash & zsh completions #1459
  • Fixed mixed tab/spaces indentation in completion scripts. Now just 4 spaces #1473
  • Support for different bash completion options. Bash completions v2 supports descriptions and requires descriptions to be removed for menu-complete, menu-complete-backward and insert-completions. These descriptions are now purposefully removed in support of this model. #1509
  • Fix for invalid shell completions when using ~/.cobra.yaml. Log message Using config file: ~/.cobra.yaml now printed to stderr #1510
  • Removes unnecessary trailing spaces from completion command descriptions #1520
  • Option to hid default completion command #1541
  • Remove __complete command for programs without subcommands #1563

Generator changes ⚙️

Thanks to @​spf13 for providing a number of changes to the Cobra generator tool, streamlining it for new users!

  • The Cobra generator now won't automatically include Viper and cleans up a number of unused imports when not using Viper.
  • The Cobra generator's default license is now none
  • The Cobra generator now works with Go modules
  • Documentation to reflect these changes

New Features ⭐

  • License can be specified by their SPDX identifiers #1159
  • MatchAll allows combining several PositionalArgs to work in concert. This now allows for enabling composing PositionalArgs #896

Bug Fixes 🐛

  • Fixed multiple error message from cobra init boilerplates #1463 #1552 #1557

Testing 👀

  • Now testing golang 1.16.x and 1.17.x in CI #1425
  • Fix for running diff test to ignore CR for windows #949
  • Added helper functions and reduced code reproduction in args_test #1426
  • Now using official golangci-lint github action #1477

Security 🔏

  • Added GitHub dependabot #1427
  • Now using Viper v1.10.0
    • There is a known CVE in an indirect dependency from viper: spf13/cobra#1538. This will be patched in a future release

Documentation 📝

  • Multiple projects added to the projects_using_cobra.md file: #1377 #1501 #1454
  • Removed ToC from main readme file as it is now automagically displayed by GitHub #1429
  • Documentation correct for when the --author flag is specified #1009
  • shell_completions.md has an easier to use snippet for copying and pasting shell completions #1372

... (truncated)

Commits
  • 178edbb Bump github.com/spf13/viper from 1.9.0 to 1.10.0 (#1561)
  • 9054739 Remove __complete cmd for program without subcmds (#1563)
  • 19c9c74 Always include the os package import when generating the root command (#1557)
  • 01e05b8 Bump github.com/spf13/viper from 1.8.1 to 1.9.0 (#1554)
  • 36bff0a fix root.go.golden (#1552)
  • 1854bb5 Fix some typos (mostly found by codespell) (#1514)
  • ff2c55e chore(ci): use golangci-lint-action (#1477)
  • 1beb476 fix: Duplicate error message from cobra init boilerplates (#1463)
  • 6f84ef4 Provide option to hide default 'completion' cmd (#1541)
  • ee75a2b Remove trailing spaces from bash completion command description (#1520)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/spf13/cobra&package-manager=go_modules&previous-version=1.2.1&new-version=1.3.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 4 +--- go.sum | 9 ++++----- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 2e222e9fe..df894ee4f 100644 --- a/go.mod +++ b/go.mod @@ -26,9 +26,8 @@ require ( github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/rs/cors v1.8.0 github.com/rs/zerolog v1.26.0 - github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa - github.com/spf13/cobra v1.2.1 + github.com/spf13/cobra v1.3.0 github.com/spf13/viper v1.10.0 github.com/stretchr/testify v1.7.0 github.com/tendermint/tm-db v0.6.6 @@ -150,7 +149,6 @@ require ( github.com/opencontainers/image-spec v1.0.2 // indirect github.com/opencontainers/runc v1.0.3 // indirect github.com/pelletier/go-toml v1.9.4 // indirect - github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect diff --git a/go.sum b/go.sum index 95957ff3e..8c303dca0 100644 --- a/go.sum +++ b/go.sum @@ -216,6 +216,7 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= @@ -813,8 +814,6 @@ github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhEC github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= @@ -889,6 +888,7 @@ github.com/rs/zerolog v1.26.0 h1:ORM4ibhEZeTeQlCojCK2kPz1ogAY4bGs4tD+SaAdGaE= github.com/rs/zerolog v1.26.0/go.mod h1:yBiM87lvSqX8h0Ww4sdzNSkVYZ8dL2xjZJG1lAuGZEo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryancurrah/gomodguard v1.2.3 h1:ww2fsjqocGCAFamzvv/b8IsRduuHHeK2MHTcTxZTQX8= github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoLtnBZ7/TEhXAbg= github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw= @@ -898,8 +898,6 @@ github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYI github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA= github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= -github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa h1:0U2s5loxrTy6/VgfVoLuVLFJcURKLH49ie0zSch7gh4= -github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/securego/gosec/v2 v2.9.1 h1:anHKLS/ApTYU6NZkKa/5cQqqcbKZURjvc+MtR++S4EQ= @@ -943,8 +941,9 @@ github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0= +github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= From 82738eb0166fc24606dab4336c5fa5b4d340c421 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 15 Dec 2021 07:09:32 -0800 Subject: [PATCH 32/33] Move the libs/pubsub package to internal scope (#7451) No API changes, merely changes the import path. --- CHANGELOG_PENDING.md | 1 + internal/consensus/common_test.go | 2 +- internal/consensus/reactor_test.go | 2 +- internal/consensus/replay_file.go | 2 +- internal/consensus/replay_test.go | 2 +- internal/consensus/state_test.go | 2 +- internal/eventbus/event_bus.go | 2 +- internal/eventbus/event_bus_test.go | 4 ++-- internal/inspect/inspect_test.go | 2 +- internal/inspect/rpc/rpc.go | 2 +- {libs => internal}/pubsub/example_test.go | 4 ++-- {libs => internal}/pubsub/pubsub.go | 2 +- {libs => internal}/pubsub/pubsub_test.go | 4 ++-- {libs => internal}/pubsub/query/bench_test.go | 2 +- {libs => internal}/pubsub/query/query.go | 2 +- {libs => internal}/pubsub/query/query_test.go | 6 +++--- {libs => internal}/pubsub/query/syntax/doc.go | 0 {libs => internal}/pubsub/query/syntax/parser.go | 0 {libs => internal}/pubsub/query/syntax/scanner.go | 0 {libs => internal}/pubsub/query/syntax/syntax_test.go | 2 +- {libs => internal}/pubsub/subindex.go | 0 {libs => internal}/pubsub/subscription.go | 0 internal/rpc/core/blocks.go | 2 +- internal/rpc/core/events.go | 4 ++-- internal/rpc/core/tx.go | 2 +- internal/state/execution_test.go | 2 +- internal/state/indexer/block/kv/kv.go | 4 ++-- internal/state/indexer/block/kv/kv_test.go | 2 +- internal/state/indexer/block/kv/util.go | 2 +- internal/state/indexer/block/null/null.go | 2 +- internal/state/indexer/eventsink.go | 2 +- internal/state/indexer/indexer.go | 2 +- internal/state/indexer/indexer_service.go | 2 +- internal/state/indexer/mocks/event_sink.go | 2 +- internal/state/indexer/query_range.go | 2 +- internal/state/indexer/sink/kv/kv.go | 2 +- internal/state/indexer/sink/kv/kv_test.go | 2 +- internal/state/indexer/sink/null/null.go | 2 +- internal/state/indexer/sink/psql/psql.go | 2 +- internal/state/indexer/tx/kv/kv.go | 4 ++-- internal/state/indexer/tx/kv/kv_bench_test.go | 2 +- internal/state/indexer/tx/kv/kv_test.go | 2 +- internal/state/indexer/tx/null/null.go | 2 +- internal/state/mocks/event_sink.go | 2 +- light/proxy/proxy.go | 2 +- node/node.go | 2 +- node/node_test.go | 2 +- rpc/client/http/ws.go | 2 +- rpc/client/local/local.go | 4 ++-- types/events.go | 4 ++-- 50 files changed, 55 insertions(+), 54 deletions(-) rename {libs => internal}/pubsub/example_test.go (87%) rename {libs => internal}/pubsub/pubsub.go (99%) rename {libs => internal}/pubsub/pubsub_test.go (99%) rename {libs => internal}/pubsub/query/bench_test.go (94%) rename {libs => internal}/pubsub/query/query.go (99%) rename {libs => internal}/pubsub/query/query_test.go (97%) rename {libs => internal}/pubsub/query/syntax/doc.go (100%) rename {libs => internal}/pubsub/query/syntax/parser.go (100%) rename {libs => internal}/pubsub/query/syntax/scanner.go (100%) rename {libs => internal}/pubsub/query/syntax/syntax_test.go (98%) rename {libs => internal}/pubsub/subindex.go (100%) rename {libs => internal}/pubsub/subscription.go (100%) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 3e54a230b..e06bf4c06 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -27,6 +27,7 @@ Special thanks to external contributors on this release: - Go API + - [libs/pubsub] \#7451 Internalize the pubsub packages. (@creachadair) - [libs/sync] \#7450 Internalize and remove the library. (@creachadair) - [libs/async] \#7449 Move library to internal. (@creachadair) - [pubsub] \#7231 Remove unbuffered subscriptions and rework the Subscription interface. (@creachadair) diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index 0a61c95a3..ec031b23b 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -22,13 +22,13 @@ import ( cstypes "github.com/tendermint/tendermint/internal/consensus/types" "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/mempool" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index 8b7b7eb51..887f9fd3e 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -24,12 +24,12 @@ import ( "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" sm "github.com/tendermint/tendermint/internal/state" statemocks "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" "github.com/tendermint/tendermint/types" ) diff --git a/internal/consensus/replay_file.go b/internal/consensus/replay_file.go index 2c848a9f2..b9a14ff50 100644 --- a/internal/consensus/replay_file.go +++ b/internal/consensus/replay_file.go @@ -15,10 +15,10 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/proxy" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/types" ) diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index 56a4924cd..76e020c22 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -28,12 +28,12 @@ import ( "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/proxy" + "github.com/tendermint/tendermint/internal/pubsub" sm "github.com/tendermint/tendermint/internal/state" sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/pubsub" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/privval" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index 387650704..92caca7a3 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -14,8 +14,8 @@ import ( "github.com/tendermint/tendermint/crypto/tmhash" cstypes "github.com/tendermint/tendermint/internal/consensus/types" "github.com/tendermint/tendermint/internal/eventbus" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" "github.com/tendermint/tendermint/libs/log" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmrand "github.com/tendermint/tendermint/libs/rand" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" diff --git a/internal/eventbus/event_bus.go b/internal/eventbus/event_bus.go index 3272fe8d7..58f357165 100644 --- a/internal/eventbus/event_bus.go +++ b/internal/eventbus/event_bus.go @@ -6,8 +6,8 @@ import ( "strings" abci "github.com/tendermint/tendermint/abci/types" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" "github.com/tendermint/tendermint/libs/log" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/types" ) diff --git a/internal/eventbus/event_bus_test.go b/internal/eventbus/event_bus_test.go index 72b1094fb..66581b6cc 100644 --- a/internal/eventbus/event_bus_test.go +++ b/internal/eventbus/event_bus_test.go @@ -12,9 +12,9 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/eventbus" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" + tmquery "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/libs/log" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) diff --git a/internal/inspect/inspect_test.go b/internal/inspect/inspect_test.go index a75777741..c4ec3695e 100644 --- a/internal/inspect/inspect_test.go +++ b/internal/inspect/inspect_test.go @@ -17,11 +17,11 @@ import ( abcitypes "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/inspect" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" indexermocks "github.com/tendermint/tendermint/internal/state/indexer/mocks" statemocks "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/proto/tendermint/state" httpclient "github.com/tendermint/tendermint/rpc/client/http" "github.com/tendermint/tendermint/types" diff --git a/internal/inspect/rpc/rpc.go b/internal/inspect/rpc/rpc.go index cfcddcd44..7ed47962a 100644 --- a/internal/inspect/rpc/rpc.go +++ b/internal/inspect/rpc/rpc.go @@ -9,11 +9,11 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/pubsub" "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/rpc/jsonrpc/server" "github.com/tendermint/tendermint/types" ) diff --git a/libs/pubsub/example_test.go b/internal/pubsub/example_test.go similarity index 87% rename from libs/pubsub/example_test.go rename to internal/pubsub/example_test.go index 0cf7b5853..39b179981 100644 --- a/libs/pubsub/example_test.go +++ b/internal/pubsub/example_test.go @@ -7,9 +7,9 @@ import ( "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/libs/pubsub/query" ) func TestExample(t *testing.T) { diff --git a/libs/pubsub/pubsub.go b/internal/pubsub/pubsub.go similarity index 99% rename from libs/pubsub/pubsub.go rename to internal/pubsub/pubsub.go index cd37a17ee..13526febb 100644 --- a/libs/pubsub/pubsub.go +++ b/internal/pubsub/pubsub.go @@ -5,7 +5,7 @@ // Clients register subscriptions with a query to select which messages they // wish to receive. When messages are published, they are broadcast to all // clients whose subscription query matches that message. Queries are -// constructed using the github.com/tendermint/tendermint/libs/pubsub/query +// constructed using the github.com/tendermint/tendermint/internal/pubsub/query // package. // // Example: diff --git a/libs/pubsub/pubsub_test.go b/internal/pubsub/pubsub_test.go similarity index 99% rename from libs/pubsub/pubsub_test.go rename to internal/pubsub/pubsub_test.go index 601e48823..c4da551d0 100644 --- a/libs/pubsub/pubsub_test.go +++ b/internal/pubsub/pubsub_test.go @@ -9,9 +9,9 @@ import ( "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/libs/pubsub/query" ) const ( diff --git a/libs/pubsub/query/bench_test.go b/internal/pubsub/query/bench_test.go similarity index 94% rename from libs/pubsub/query/bench_test.go rename to internal/pubsub/query/bench_test.go index 894c16628..28f5184ab 100644 --- a/libs/pubsub/query/bench_test.go +++ b/internal/pubsub/query/bench_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query" ) const testQuery = `tm.events.type='NewBlock' AND abci.account.name='Igor'` diff --git a/libs/pubsub/query/query.go b/internal/pubsub/query/query.go similarity index 99% rename from libs/pubsub/query/query.go rename to internal/pubsub/query/query.go index e874f037c..ce70238a5 100644 --- a/libs/pubsub/query/query.go +++ b/internal/pubsub/query/query.go @@ -20,7 +20,7 @@ import ( "time" "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/pubsub/query/syntax" + "github.com/tendermint/tendermint/internal/pubsub/query/syntax" ) // All is a query that matches all events. diff --git a/libs/pubsub/query/query_test.go b/internal/pubsub/query/query_test.go similarity index 97% rename from libs/pubsub/query/query_test.go rename to internal/pubsub/query/query_test.go index b0d1fb7fe..ddecae0ad 100644 --- a/libs/pubsub/query/query_test.go +++ b/internal/pubsub/query/query_test.go @@ -7,9 +7,9 @@ import ( "time" "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/libs/pubsub/query/syntax" + "github.com/tendermint/tendermint/internal/pubsub" + "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query/syntax" ) var _ pubsub.Query = (*query.Query)(nil) diff --git a/libs/pubsub/query/syntax/doc.go b/internal/pubsub/query/syntax/doc.go similarity index 100% rename from libs/pubsub/query/syntax/doc.go rename to internal/pubsub/query/syntax/doc.go diff --git a/libs/pubsub/query/syntax/parser.go b/internal/pubsub/query/syntax/parser.go similarity index 100% rename from libs/pubsub/query/syntax/parser.go rename to internal/pubsub/query/syntax/parser.go diff --git a/libs/pubsub/query/syntax/scanner.go b/internal/pubsub/query/syntax/scanner.go similarity index 100% rename from libs/pubsub/query/syntax/scanner.go rename to internal/pubsub/query/syntax/scanner.go diff --git a/libs/pubsub/query/syntax/syntax_test.go b/internal/pubsub/query/syntax/syntax_test.go similarity index 98% rename from libs/pubsub/query/syntax/syntax_test.go rename to internal/pubsub/query/syntax/syntax_test.go index ac95fd8b1..ac0473beb 100644 --- a/libs/pubsub/query/syntax/syntax_test.go +++ b/internal/pubsub/query/syntax/syntax_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - "github.com/tendermint/tendermint/libs/pubsub/query/syntax" + "github.com/tendermint/tendermint/internal/pubsub/query/syntax" ) func TestScanner(t *testing.T) { diff --git a/libs/pubsub/subindex.go b/internal/pubsub/subindex.go similarity index 100% rename from libs/pubsub/subindex.go rename to internal/pubsub/subindex.go diff --git a/libs/pubsub/subscription.go b/internal/pubsub/subscription.go similarity index 100% rename from libs/pubsub/subscription.go rename to internal/pubsub/subscription.go diff --git a/internal/rpc/core/blocks.go b/internal/rpc/core/blocks.go index 2e7f24726..725a2f972 100644 --- a/internal/rpc/core/blocks.go +++ b/internal/rpc/core/blocks.go @@ -4,10 +4,10 @@ import ( "fmt" "sort" + tmquery "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/bytes" tmmath "github.com/tendermint/tendermint/libs/math" - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" diff --git a/internal/rpc/core/events.go b/internal/rpc/core/events.go index 73ca8a755..965291cdb 100644 --- a/internal/rpc/core/events.go +++ b/internal/rpc/core/events.go @@ -6,8 +6,8 @@ import ( "fmt" "time" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" + tmquery "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) diff --git a/internal/rpc/core/tx.go b/internal/rpc/core/tx.go index 7ba2bf90c..98fedc10a 100644 --- a/internal/rpc/core/tx.go +++ b/internal/rpc/core/tx.go @@ -5,10 +5,10 @@ import ( "fmt" "sort" + tmquery "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/bytes" tmmath "github.com/tendermint/tendermint/libs/math" - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" diff --git a/internal/state/execution_test.go b/internal/state/execution_test.go index 4f68bd016..32b26b425 100644 --- a/internal/state/execution_test.go +++ b/internal/state/execution_test.go @@ -19,12 +19,12 @@ import ( "github.com/tendermint/tendermint/internal/eventbus" mmock "github.com/tendermint/tendermint/internal/mempool/mock" "github.com/tendermint/tendermint/internal/proxy" + "github.com/tendermint/tendermint/internal/pubsub" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/mocks" sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/pubsub" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" diff --git a/internal/state/indexer/block/kv/kv.go b/internal/state/indexer/block/kv/kv.go index 26fdcf1fc..2ac133bf1 100644 --- a/internal/state/indexer/block/kv/kv.go +++ b/internal/state/indexer/block/kv/kv.go @@ -12,9 +12,9 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query/syntax" "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/libs/pubsub/query/syntax" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/block/kv/kv_test.go b/internal/state/indexer/block/kv/kv_test.go index 650723dbf..b7970a177 100644 --- a/internal/state/indexer/block/kv/kv_test.go +++ b/internal/state/indexer/block/kv/kv_test.go @@ -9,8 +9,8 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" blockidxkv "github.com/tendermint/tendermint/internal/state/indexer/block/kv" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/block/kv/util.go b/internal/state/indexer/block/kv/util.go index fff88046c..28e22718c 100644 --- a/internal/state/indexer/block/kv/util.go +++ b/internal/state/indexer/block/kv/util.go @@ -6,7 +6,7 @@ import ( "strconv" "github.com/google/orderedcode" - "github.com/tendermint/tendermint/libs/pubsub/query/syntax" + "github.com/tendermint/tendermint/internal/pubsub/query/syntax" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/block/null/null.go b/internal/state/indexer/block/null/null.go index 9b28d93bb..7d5453848 100644 --- a/internal/state/indexer/block/null/null.go +++ b/internal/state/indexer/block/null/null.go @@ -4,8 +4,8 @@ import ( "context" "errors" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/eventsink.go b/internal/state/indexer/eventsink.go index dba50b6af..9b4d6f561 100644 --- a/internal/state/indexer/eventsink.go +++ b/internal/state/indexer/eventsink.go @@ -4,7 +4,7 @@ import ( "context" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/indexer.go b/internal/state/indexer/indexer.go index 5c238e137..a1b78a257 100644 --- a/internal/state/indexer/indexer.go +++ b/internal/state/indexer/indexer.go @@ -5,7 +5,7 @@ import ( "errors" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/indexer_service.go b/internal/state/indexer/indexer_service.go index 83810953b..f1bf763b2 100644 --- a/internal/state/indexer/indexer_service.go +++ b/internal/state/indexer/indexer_service.go @@ -5,8 +5,8 @@ import ( "time" "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/pubsub" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/mocks/event_sink.go b/internal/state/indexer/mocks/event_sink.go index 98b32e935..d5555a417 100644 --- a/internal/state/indexer/mocks/event_sink.go +++ b/internal/state/indexer/mocks/event_sink.go @@ -8,7 +8,7 @@ import ( mock "github.com/stretchr/testify/mock" indexer "github.com/tendermint/tendermint/internal/state/indexer" - query "github.com/tendermint/tendermint/libs/pubsub/query" + query "github.com/tendermint/tendermint/internal/pubsub/query" tenderminttypes "github.com/tendermint/tendermint/types" diff --git a/internal/state/indexer/query_range.go b/internal/state/indexer/query_range.go index 4c026955d..ff54cd32b 100644 --- a/internal/state/indexer/query_range.go +++ b/internal/state/indexer/query_range.go @@ -3,7 +3,7 @@ package indexer import ( "time" - "github.com/tendermint/tendermint/libs/pubsub/query/syntax" + "github.com/tendermint/tendermint/internal/pubsub/query/syntax" ) // QueryRanges defines a mapping between a composite event key and a QueryRange. diff --git a/internal/state/indexer/sink/kv/kv.go b/internal/state/indexer/sink/kv/kv.go index fe7068a1b..10282fd34 100644 --- a/internal/state/indexer/sink/kv/kv.go +++ b/internal/state/indexer/sink/kv/kv.go @@ -6,10 +6,10 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" kvb "github.com/tendermint/tendermint/internal/state/indexer/block/kv" kvt "github.com/tendermint/tendermint/internal/state/indexer/tx/kv" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/sink/kv/kv_test.go b/internal/state/indexer/sink/kv/kv_test.go index 47b1f5364..8955550d0 100644 --- a/internal/state/indexer/sink/kv/kv_test.go +++ b/internal/state/indexer/sink/kv/kv_test.go @@ -11,9 +11,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" kvtx "github.com/tendermint/tendermint/internal/state/indexer/tx/kv" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/sink/null/null.go b/internal/state/indexer/sink/null/null.go index f58142f21..c436bdf0f 100644 --- a/internal/state/indexer/sink/null/null.go +++ b/internal/state/indexer/sink/null/null.go @@ -4,8 +4,8 @@ import ( "context" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/sink/psql/psql.go b/internal/state/indexer/sink/psql/psql.go index 18e95b97d..1208bca19 100644 --- a/internal/state/indexer/sink/psql/psql.go +++ b/internal/state/indexer/sink/psql/psql.go @@ -11,8 +11,8 @@ import ( "github.com/gogo/protobuf/proto" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/tx/kv/kv.go b/internal/state/indexer/tx/kv/kv.go index 4bcff958b..e543959ea 100644 --- a/internal/state/indexer/tx/kv/kv.go +++ b/internal/state/indexer/tx/kv/kv.go @@ -12,9 +12,9 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query/syntax" indexer "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/libs/pubsub/query/syntax" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/tx/kv/kv_bench_test.go b/internal/state/indexer/tx/kv/kv_bench_test.go index b8f72b2fa..7744c3183 100644 --- a/internal/state/indexer/tx/kv/kv_bench_test.go +++ b/internal/state/indexer/tx/kv/kv_bench_test.go @@ -10,7 +10,7 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/tx/kv/kv_test.go b/internal/state/indexer/tx/kv/kv_test.go index e65f9ca2d..018fe51b4 100644 --- a/internal/state/indexer/tx/kv/kv_test.go +++ b/internal/state/indexer/tx/kv/kv_test.go @@ -12,8 +12,8 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/pubsub/query" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/tx/null/null.go b/internal/state/indexer/tx/null/null.go index 0da7fc683..dea5d570f 100644 --- a/internal/state/indexer/tx/null/null.go +++ b/internal/state/indexer/tx/null/null.go @@ -5,8 +5,8 @@ import ( "errors" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/pubsub/query" ) var _ indexer.TxIndexer = (*TxIndex)(nil) diff --git a/internal/state/mocks/event_sink.go b/internal/state/mocks/event_sink.go index b8a8fc464..97e3aff76 100644 --- a/internal/state/mocks/event_sink.go +++ b/internal/state/mocks/event_sink.go @@ -8,7 +8,7 @@ import ( mock "github.com/stretchr/testify/mock" indexer "github.com/tendermint/tendermint/internal/state/indexer" - query "github.com/tendermint/tendermint/libs/pubsub/query" + query "github.com/tendermint/tendermint/internal/pubsub/query" tenderminttypes "github.com/tendermint/tendermint/types" diff --git a/light/proxy/proxy.go b/light/proxy/proxy.go index 60d542b7c..444eefb88 100644 --- a/light/proxy/proxy.go +++ b/light/proxy/proxy.go @@ -6,8 +6,8 @@ import ( "net" "net/http" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" "github.com/tendermint/tendermint/libs/log" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/light" lrpc "github.com/tendermint/tendermint/light/rpc" rpchttp "github.com/tendermint/tendermint/rpc/client/http" diff --git a/node/node.go b/node/node.go index 9878f60e5..5de408945 100644 --- a/node/node.go +++ b/node/node.go @@ -21,6 +21,7 @@ import ( "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/proxy" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" rpccore "github.com/tendermint/tendermint/internal/rpc/core" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/indexer" @@ -28,7 +29,6 @@ import ( "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/libs/strings" tmtime "github.com/tendermint/tendermint/libs/time" diff --git a/node/node_test.go b/node/node_test.go index 666192a5c..ad3980df6 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -24,12 +24,12 @@ import ( "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/proxy" + "github.com/tendermint/tendermint/internal/pubsub" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/pubsub" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/libs/service" tmtime "github.com/tendermint/tendermint/libs/time" diff --git a/rpc/client/http/ws.go b/rpc/client/http/ws.go index 320540450..ceda36c3c 100644 --- a/rpc/client/http/ws.go +++ b/rpc/client/http/ws.go @@ -8,8 +8,8 @@ import ( "sync" "time" + "github.com/tendermint/tendermint/internal/pubsub" tmjson "github.com/tendermint/tendermint/libs/json" - "github.com/tendermint/tendermint/libs/pubsub" rpcclient "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/coretypes" jsonrpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index 799639a04..4a833af9c 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -7,11 +7,11 @@ import ( "time" "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/pubsub" + "github.com/tendermint/tendermint/internal/pubsub/query" rpccore "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/libs/pubsub/query" rpcclient "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" diff --git a/types/events.go b/types/events.go index 86935ba25..00bed4b60 100644 --- a/types/events.go +++ b/types/events.go @@ -6,9 +6,9 @@ import ( "strings" abci "github.com/tendermint/tendermint/abci/types" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" + tmquery "github.com/tendermint/tendermint/internal/pubsub/query" tmjson "github.com/tendermint/tendermint/libs/json" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" ) // Reserved event types (alphabetically sorted). From 7705c9d0865ca97c7e0fd16e2fa26245c946e11e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 Dec 2021 16:26:20 +0000 Subject: [PATCH 33/33] build(deps): Bump google.golang.org/grpc from 1.42.0 to 1.43.0 (#7455) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.42.0 to 1.43.0.
Release notes

Sourced from google.golang.org/grpc's releases.

Release 1.43.0

API Changes

Behavior Changes

  • status: support wrapped errors in FromContextError (#4977)
  • config: remove the environment variable to disable retry support (#4922)

New Features

  • balancer: new field Authority in BuildOptions for server name to use in the authentication handshake with a remote load balancer (#4969)

Bug Fixes

  • xds/resolver: fix possible ClientConn leak upon resolver initialization failure (#4900)
  • client: fix nil panic in rare race conditions with the pick first LB policy (#4971)
  • xds: improve RPC error messages when xDS connection errors occur (#5032, #5054)
  • transport: do not create stream object in the face of illegal stream IDs (#4873)

Documentation

  • client: clarify errors to indicate whether compressed or uncompressed messages exceeded size limits (#4918)
Commits
  • 14c1138 Change version to 1.43.0 (#5039)
  • ae29ac3 xds/client: send NewStream errors to the watchers (#5032)
  • 296afc2 transport: better error message when per-RPC creds fail (#5033)
  • e15d978 xds/client: send connection errors to all watchers (#5054)
  • 46e883a Backport "xds/c2p: replace C2P resolver env var with experimental scheme suff...
  • 3786ae1 xds/resolver: Add support for cluster specifier plugins (#4987)
  • 512e894 rls: support extra_keys and constant_keys (#4995)
  • f3bbd12 xds/bootstrap_config: add a string function to server config (#5031)
  • 46935b9 fix possible nil before casting (#5017)
  • c2bccd0 xds/kokoro: install go 1.17, and retry go build (#5015)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=google.golang.org/grpc&package-manager=go_modules&previous-version=1.42.0&new-version=1.43.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- abci/client/grpc_client.go | 6 +++++- abci/example/example_test.go | 6 +++++- go.mod | 4 ++-- go.sum | 3 ++- privval/grpc/client_test.go | 18 ++++++++++++++---- privval/grpc/util.go | 3 ++- 6 files changed, 30 insertions(+), 10 deletions(-) diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index ee35646f9..7d3351f7e 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -8,6 +8,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" @@ -105,7 +106,10 @@ func (cli *grpcClient) OnStart(ctx context.Context) error { RETRY_LOOP: for { - conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) + conn, err := grpc.Dial(cli.addr, + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(dialerFunc), + ) if err != nil { if cli.mustConnect { return err diff --git a/abci/example/example_test.go b/abci/example/example_test.go index 80d5a3130..f32d9df7c 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" @@ -142,7 +143,10 @@ func testGRPCSync(ctx context.Context, t *testing.T, app types.ABCIApplicationSe t.Cleanup(func() { server.Wait() }) // Connect to the socket - conn, err := grpc.Dial(socket, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) + conn, err := grpc.Dial(socket, + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(dialerFunc), + ) if err != nil { t.Fatalf("Error dialing GRPC server: %v", err.Error()) } diff --git a/go.mod b/go.mod index df894ee4f..457a37a22 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,8 @@ require ( golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - google.golang.org/grpc v1.42.0 + google.golang.org/grpc v1.43.0 + gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect pgregory.net/rapid v0.4.7 ) @@ -195,7 +196,6 @@ require ( golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/protobuf v1.27.1 // indirect - gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect gopkg.in/ini.v1 v1.66.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect diff --git a/go.sum b/go.sum index 8c303dca0..0ada1103c 100644 --- a/go.sum +++ b/go.sum @@ -1612,8 +1612,9 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM= +google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/privval/grpc/client_test.go b/privval/grpc/client_test.go index 98730df19..939a1989f 100644 --- a/privval/grpc/client_test.go +++ b/privval/grpc/client_test.go @@ -6,7 +6,8 @@ import ( "testing" "time" - grpc "google.golang.org/grpc" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/test/bufconn" "github.com/stretchr/testify/assert" @@ -51,7 +52,10 @@ func TestSignerClient_GetPubKey(t *testing.T) { srv, dialer := dialer(mockPV, logger) defer srv.Stop() - conn, err := grpc.DialContext(ctx, "", grpc.WithInsecure(), grpc.WithContextDialer(dialer)) + conn, err := grpc.DialContext(ctx, "", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(dialer), + ) if err != nil { panic(err) } @@ -73,7 +77,10 @@ func TestSignerClient_SignVote(t *testing.T) { srv, dialer := dialer(mockPV, logger) defer srv.Stop() - conn, err := grpc.DialContext(ctx, "", grpc.WithInsecure(), grpc.WithContextDialer(dialer)) + conn, err := grpc.DialContext(ctx, "", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(dialer), + ) if err != nil { panic(err) } @@ -126,7 +133,10 @@ func TestSignerClient_SignProposal(t *testing.T) { srv, dialer := dialer(mockPV, logger) defer srv.Stop() - conn, err := grpc.DialContext(ctx, "", grpc.WithInsecure(), grpc.WithContextDialer(dialer)) + conn, err := grpc.DialContext(ctx, "", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(dialer), + ) if err != nil { panic(err) } diff --git a/privval/grpc/util.go b/privval/grpc/util.go index 7e0483f9c..a73fd65b1 100644 --- a/privval/grpc/util.go +++ b/privval/grpc/util.go @@ -15,6 +15,7 @@ import ( tmnet "github.com/tendermint/tendermint/libs/net" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" ) @@ -97,7 +98,7 @@ func DialRemoteSigner( transportSecurity = GenerateTLS(cfg.ClientCertificateFile(), cfg.ClientKeyFile(), cfg.RootCAFile(), logger) } else { - transportSecurity = grpc.WithInsecure() + transportSecurity = grpc.WithTransportCredentials(insecure.NewCredentials()) logger.Info("Using an insecure gRPC connection!") }