mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-08 14:21:14 +00:00
mempool: disable MaxBatchBytes (#5800)
@p4u from vocdoni.io reported that the mempool might behave incorrectly under a high load. The consequences can range from pauses between blocks to the peers disconnecting from this node. My current theory is that the flowrate lib we're using to control flow (multiplex over a single TCP connection) was not designed w/ large blobs (1MB batch of txs) in mind. I've tried decreasing the Mempool reactor priority, but that did not have any visible effect. What actually worked is adding a time.Sleep into mempool.Reactor#broadcastTxRoutine after an each successful send == manual control flow of sort. As a temporary remedy (until the mempool package is refactored), the max-batch-bytes was disabled. Transactions will be sent one by one without batching Closes #5796
This commit is contained in:
@@ -134,12 +134,18 @@ func (memR *Reactor) OnStart() error {
|
||||
// GetChannels implements Reactor by returning the list of channels for this
|
||||
// reactor.
|
||||
func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
maxMsgSize := memR.config.MaxBatchBytes
|
||||
largestTx := make([]byte, memR.config.MaxTxBytes)
|
||||
batchMsg := protomem.Message{
|
||||
Sum: &protomem.Message_Txs{
|
||||
Txs: &protomem.Txs{Txs: [][]byte{largestTx}},
|
||||
},
|
||||
}
|
||||
|
||||
return []*p2p.ChannelDescriptor{
|
||||
{
|
||||
ID: MempoolChannel,
|
||||
Priority: 5,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
RecvMessageCapacity: batchMsg.Size(),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -232,20 +238,19 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
|
||||
continue
|
||||
}
|
||||
|
||||
txs := memR.txs(next, peerID, peerState.GetHeight()) // WARNING: mutates next!
|
||||
// NOTE: Transaction batching was disabled due to
|
||||
// https://github.com/tendermint/tendermint/issues/5796
|
||||
|
||||
// send txs
|
||||
if len(txs) > 0 {
|
||||
if _, ok := memTx.senders.Load(peerID); !ok {
|
||||
msg := protomem.Message{
|
||||
Sum: &protomem.Message_Txs{
|
||||
Txs: &protomem.Txs{Txs: txs},
|
||||
Txs: &protomem.Txs{Txs: [][]byte{memTx.tx}},
|
||||
},
|
||||
}
|
||||
bz, err := msg.Marshal()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
memR.Logger.Debug("Sending N txs to peer", "N", len(txs), "peer", peer)
|
||||
success := peer.Send(MempoolChannel, bz)
|
||||
if !success {
|
||||
time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
|
||||
@@ -265,37 +270,6 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
|
||||
}
|
||||
}
|
||||
|
||||
// txs iterates over the transaction list and builds a batch of txs. next is
|
||||
// included.
|
||||
// WARNING: mutates next!
|
||||
func (memR *Reactor) txs(next *clist.CElement, peerID uint16, peerHeight int64) [][]byte {
|
||||
batch := make([][]byte, 0)
|
||||
|
||||
for {
|
||||
memTx := next.Value.(*mempoolTx)
|
||||
|
||||
if _, ok := memTx.senders.Load(peerID); !ok {
|
||||
// If current batch + this tx size is greater than max => return.
|
||||
batchMsg := protomem.Message{
|
||||
Sum: &protomem.Message_Txs{
|
||||
Txs: &protomem.Txs{Txs: append(batch, memTx.tx)},
|
||||
},
|
||||
}
|
||||
if batchMsg.Size() > memR.config.MaxBatchBytes {
|
||||
return batch
|
||||
}
|
||||
|
||||
batch = append(batch, memTx.tx)
|
||||
}
|
||||
|
||||
n := next.Next()
|
||||
if n == nil {
|
||||
return batch
|
||||
}
|
||||
next = n
|
||||
}
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Messages
|
||||
|
||||
|
||||
@@ -149,9 +149,8 @@ func TestReactorNoBroadcastToSender(t *testing.T) {
|
||||
ensureNoTxs(t, reactors[peerID], 100*time.Millisecond)
|
||||
}
|
||||
|
||||
func TestReactor_MaxBatchBytes(t *testing.T) {
|
||||
func TestReactor_MaxTxBytes(t *testing.T) {
|
||||
config := cfg.TestConfig()
|
||||
config.Mempool.MaxBatchBytes = 1024
|
||||
|
||||
const N = 2
|
||||
reactors := makeAndConnectReactors(config, N)
|
||||
@@ -168,9 +167,9 @@ func TestReactor_MaxBatchBytes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Broadcast a tx, which has the max size (minus proto overhead)
|
||||
// Broadcast a tx, which has the max size
|
||||
// => ensure it's received by the second reactor.
|
||||
tx1 := tmrand.Bytes(1018)
|
||||
tx1 := tmrand.Bytes(config.Mempool.MaxTxBytes)
|
||||
err := reactors[0].mempool.CheckTx(tx1, nil, TxInfo{SenderID: UnknownPeerID})
|
||||
require.NoError(t, err)
|
||||
waitForTxsOnReactors(t, []types.Tx{tx1}, reactors)
|
||||
@@ -180,13 +179,9 @@ func TestReactor_MaxBatchBytes(t *testing.T) {
|
||||
|
||||
// Broadcast a tx, which is beyond the max size
|
||||
// => ensure it's not sent
|
||||
tx2 := tmrand.Bytes(1020)
|
||||
tx2 := tmrand.Bytes(config.Mempool.MaxTxBytes + 1)
|
||||
err = reactors[0].mempool.CheckTx(tx2, nil, TxInfo{SenderID: UnknownPeerID})
|
||||
require.NoError(t, err)
|
||||
ensureNoTxs(t, reactors[1], 100*time.Millisecond)
|
||||
// => ensure the second reactor did not disconnect from us
|
||||
out, in, _ := reactors[1].Switch.NumPeers()
|
||||
assert.Equal(t, 1, out+in)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) {
|
||||
|
||||
Reference in New Issue
Block a user