mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-07 05:46:32 +00:00
## Description
Refs #2659
Breaking changes in the mempool package:
[mempool] #2659 Mempool now an interface
old Mempool renamed to CListMempool
NewMempool renamed to NewCListMempool
Option renamed to CListOption
MempoolReactor renamed to Reactor
NewMempoolReactor renamed to NewReactor
unexpose TxID method
TxInfo.PeerID renamed to SenderID
unexpose MempoolReactor.Mempool
Breaking changes in the state package:
[state] #2659 Mempool interface moved to mempool package
MockMempool moved to top-level mock package and renamed to Mempool
Non Breaking changes in the node package:
[node] #2659 Add Mempool method, which allows you to access mempool
## Commits
* move Mempool interface into mempool package
Refs #2659
Breaking changes in the mempool package:
- Mempool now an interface
- old Mempool renamed to CListMempool
Breaking changes to state package:
- MockMempool moved to mempool/mock package and renamed to Mempool
- Mempool interface moved to mempool package
* assert CListMempool impl Mempool
* gofmt code
* rename MempoolReactor to Reactor
- combine everything into one interface
- rename TxInfo.PeerID to TxInfo.SenderID
- unexpose MempoolReactor.Mempool
* move mempool mock into top-level mock package
* add a fixme
TxsFront should not be a part of the Mempool interface
because it leaks implementation details. Instead, we need to come up
with general interface for querying the mempool so the MempoolReactor
can fetch and broadcast txs to peers.
* change node#Mempool to return interface
* save commit = new reactor arch
* Revert "save commit = new reactor arch"
This reverts commit 1bfceacd9d.
* require CListMempool in mempool.Reactor
* add two changelog entries
* fixes after my own review
* quote interfaces, structs and functions
* fixes after Ismail's review
* make node's mempool an interface
* make InitWAL/CloseWAL methods a part of Mempool interface
* fix merge conflicts
* make node's mempool an interface
285 lines
7.7 KiB
Go
285 lines
7.7 KiB
Go
package mempool
|
|
|
|
import (
|
|
"fmt"
|
|
"math"
|
|
"reflect"
|
|
"sync"
|
|
"time"
|
|
|
|
amino "github.com/tendermint/go-amino"
|
|
|
|
cfg "github.com/tendermint/tendermint/config"
|
|
"github.com/tendermint/tendermint/libs/clist"
|
|
"github.com/tendermint/tendermint/libs/log"
|
|
"github.com/tendermint/tendermint/p2p"
|
|
"github.com/tendermint/tendermint/types"
|
|
)
|
|
|
|
const (
|
|
MempoolChannel = byte(0x30)
|
|
|
|
maxMsgSize = 1048576 // 1MB TODO make it configurable
|
|
maxTxSize = maxMsgSize - 8 // account for amino overhead of TxMessage
|
|
|
|
peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount
|
|
|
|
// UnknownPeerID is the peer ID to use when running CheckTx when there is
|
|
// no peer (e.g. RPC)
|
|
UnknownPeerID uint16 = 0
|
|
|
|
maxActiveIDs = math.MaxUint16
|
|
)
|
|
|
|
// Reactor handles mempool tx broadcasting amongst peers.
|
|
// It maintains a map from peer ID to counter, to prevent gossiping txs to the
|
|
// peers you received it from.
|
|
type Reactor struct {
|
|
p2p.BaseReactor
|
|
config *cfg.MempoolConfig
|
|
mempool *CListMempool
|
|
ids *mempoolIDs
|
|
}
|
|
|
|
type mempoolIDs struct {
|
|
mtx sync.RWMutex
|
|
peerMap map[p2p.ID]uint16
|
|
nextID uint16 // assumes that a node will never have over 65536 active peers
|
|
activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter
|
|
}
|
|
|
|
// Reserve searches for the next unused ID and assignes it to the
|
|
// peer.
|
|
func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) {
|
|
ids.mtx.Lock()
|
|
defer ids.mtx.Unlock()
|
|
|
|
curID := ids.nextPeerID()
|
|
ids.peerMap[peer.ID()] = curID
|
|
ids.activeIDs[curID] = struct{}{}
|
|
}
|
|
|
|
// nextPeerID returns the next unused peer ID to use.
|
|
// This assumes that ids's mutex is already locked.
|
|
func (ids *mempoolIDs) nextPeerID() uint16 {
|
|
if len(ids.activeIDs) == maxActiveIDs {
|
|
panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", maxActiveIDs))
|
|
}
|
|
|
|
_, idExists := ids.activeIDs[ids.nextID]
|
|
for idExists {
|
|
ids.nextID++
|
|
_, idExists = ids.activeIDs[ids.nextID]
|
|
}
|
|
curID := ids.nextID
|
|
ids.nextID++
|
|
return curID
|
|
}
|
|
|
|
// Reclaim returns the ID reserved for the peer back to unused pool.
|
|
func (ids *mempoolIDs) Reclaim(peer p2p.Peer) {
|
|
ids.mtx.Lock()
|
|
defer ids.mtx.Unlock()
|
|
|
|
removedID, ok := ids.peerMap[peer.ID()]
|
|
if ok {
|
|
delete(ids.activeIDs, removedID)
|
|
delete(ids.peerMap, peer.ID())
|
|
}
|
|
}
|
|
|
|
// GetForPeer returns an ID reserved for the peer.
|
|
func (ids *mempoolIDs) GetForPeer(peer p2p.Peer) uint16 {
|
|
ids.mtx.RLock()
|
|
defer ids.mtx.RUnlock()
|
|
|
|
return ids.peerMap[peer.ID()]
|
|
}
|
|
|
|
func newMempoolIDs() *mempoolIDs {
|
|
return &mempoolIDs{
|
|
peerMap: make(map[p2p.ID]uint16),
|
|
activeIDs: map[uint16]struct{}{0: {}},
|
|
nextID: 1, // reserve unknownPeerID(0) for mempoolReactor.BroadcastTx
|
|
}
|
|
}
|
|
|
|
// NewReactor returns a new Reactor with the given config and mempool.
|
|
func NewReactor(config *cfg.MempoolConfig, mempool *CListMempool) *Reactor {
|
|
memR := &Reactor{
|
|
config: config,
|
|
mempool: mempool,
|
|
ids: newMempoolIDs(),
|
|
}
|
|
memR.BaseReactor = *p2p.NewBaseReactor("Reactor", memR)
|
|
return memR
|
|
}
|
|
|
|
// SetLogger sets the Logger on the reactor and the underlying mempool.
|
|
func (memR *Reactor) SetLogger(l log.Logger) {
|
|
memR.Logger = l
|
|
memR.mempool.SetLogger(l)
|
|
}
|
|
|
|
// OnStart implements p2p.BaseReactor.
|
|
func (memR *Reactor) OnStart() error {
|
|
if !memR.config.Broadcast {
|
|
memR.Logger.Info("Tx broadcasting is disabled")
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// GetChannels implements Reactor.
|
|
// It returns the list of channels for this reactor.
|
|
func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
|
return []*p2p.ChannelDescriptor{
|
|
{
|
|
ID: MempoolChannel,
|
|
Priority: 5,
|
|
},
|
|
}
|
|
}
|
|
|
|
// AddPeer implements Reactor.
|
|
// It starts a broadcast routine ensuring all txs are forwarded to the given peer.
|
|
func (memR *Reactor) AddPeer(peer p2p.Peer) {
|
|
memR.ids.ReserveForPeer(peer)
|
|
go memR.broadcastTxRoutine(peer)
|
|
}
|
|
|
|
// RemovePeer implements Reactor.
|
|
func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
|
memR.ids.Reclaim(peer)
|
|
// broadcast routine checks if peer is gone and returns
|
|
}
|
|
|
|
// Receive implements Reactor.
|
|
// It adds any received transactions to the mempool.
|
|
func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
|
msg, err := decodeMsg(msgBytes)
|
|
if err != nil {
|
|
memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
|
|
memR.Switch.StopPeerForError(src, err)
|
|
return
|
|
}
|
|
memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
|
|
|
|
switch msg := msg.(type) {
|
|
case *TxMessage:
|
|
peerID := memR.ids.GetForPeer(src)
|
|
err := memR.mempool.CheckTxWithInfo(msg.Tx, nil, TxInfo{SenderID: peerID})
|
|
if err != nil {
|
|
memR.Logger.Info("Could not check tx", "tx", txID(msg.Tx), "err", err)
|
|
}
|
|
// broadcasting happens from go routines per peer
|
|
default:
|
|
memR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
|
}
|
|
}
|
|
|
|
// PeerState describes the state of a peer.
|
|
type PeerState interface {
|
|
GetHeight() int64
|
|
}
|
|
|
|
// Send new mempool txs to peer.
|
|
func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
|
|
if !memR.config.Broadcast {
|
|
return
|
|
}
|
|
|
|
peerID := memR.ids.GetForPeer(peer)
|
|
var next *clist.CElement
|
|
for {
|
|
// In case of both next.NextWaitChan() and peer.Quit() are variable at the same time
|
|
if !memR.IsRunning() || !peer.IsRunning() {
|
|
return
|
|
}
|
|
// This happens because the CElement we were looking at got garbage
|
|
// collected (removed). That is, .NextWait() returned nil. Go ahead and
|
|
// start from the beginning.
|
|
if next == nil {
|
|
select {
|
|
case <-memR.mempool.TxsWaitChan(): // Wait until a tx is available
|
|
if next = memR.mempool.TxsFront(); next == nil {
|
|
continue
|
|
}
|
|
case <-peer.Quit():
|
|
return
|
|
case <-memR.Quit():
|
|
return
|
|
}
|
|
}
|
|
|
|
memTx := next.Value.(*mempoolTx)
|
|
|
|
// make sure the peer is up to date
|
|
peerState, ok := peer.Get(types.PeerStateKey).(PeerState)
|
|
if !ok {
|
|
// Peer does not have a state yet. We set it in the consensus reactor, but
|
|
// when we add peer in Switch, the order we call reactors#AddPeer is
|
|
// different every time due to us using a map. Sometimes other reactors
|
|
// will be initialized before the consensus reactor. We should wait a few
|
|
// milliseconds and retry.
|
|
time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
|
|
continue
|
|
}
|
|
if peerState.GetHeight() < memTx.Height()-1 { // Allow for a lag of 1 block
|
|
time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
|
|
continue
|
|
}
|
|
|
|
// ensure peer hasn't already sent us this tx
|
|
if _, ok := memTx.senders.Load(peerID); !ok {
|
|
// send memTx
|
|
msg := &TxMessage{Tx: memTx.tx}
|
|
success := peer.Send(MempoolChannel, cdc.MustMarshalBinaryBare(msg))
|
|
if !success {
|
|
time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
|
|
continue
|
|
}
|
|
}
|
|
|
|
select {
|
|
case <-next.NextWaitChan():
|
|
// see the start of the for loop for nil check
|
|
next = next.Next()
|
|
case <-peer.Quit():
|
|
return
|
|
case <-memR.Quit():
|
|
return
|
|
}
|
|
}
|
|
}
|
|
|
|
//-----------------------------------------------------------------------------
|
|
// Messages
|
|
|
|
// MempoolMessage is a message sent or received by the Reactor.
|
|
type MempoolMessage interface{}
|
|
|
|
func RegisterMempoolMessages(cdc *amino.Codec) {
|
|
cdc.RegisterInterface((*MempoolMessage)(nil), nil)
|
|
cdc.RegisterConcrete(&TxMessage{}, "tendermint/mempool/TxMessage", nil)
|
|
}
|
|
|
|
func decodeMsg(bz []byte) (msg MempoolMessage, err error) {
|
|
if len(bz) > maxMsgSize {
|
|
return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
|
|
}
|
|
err = cdc.UnmarshalBinaryBare(bz, &msg)
|
|
return
|
|
}
|
|
|
|
//-------------------------------------
|
|
|
|
// TxMessage is a MempoolMessage containing a transaction.
|
|
type TxMessage struct {
|
|
Tx types.Tx
|
|
}
|
|
|
|
// String returns a string representation of the TxMessage.
|
|
func (m *TxMessage) String() string {
|
|
return fmt.Sprintf("[TxMessage %v]", m.Tx)
|
|
}
|